Merge tag 'hwmon-for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 31 Aug 2021 21:37:41 +0000 (14:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 31 Aug 2021 21:37:41 +0000 (14:37 -0700)
Pull hwmon updates from Guenter Roeck:
 "New drivers for:

   - Aquacomputer D5 Next

   - SB-RMI power module

  Added chip support to existing drivers:

   - Support for various Zen2 and Zen3 APUs and for Yellow Carp (SMU
     v13) added to k10temp driver

   - Support for Silicom n5010 PAC added to intel-m10-bmc driver

   - Support for BPD-RS600 added to pmbus/bpa-rs600 driver

  Other notable changes:

   - In k10temp, do not display Tdie on Zen CPUs if there is no
     difference between Tdie and Tctl

   - Converted adt7470 and dell-smm drivers to use
     devm_hwmon_device_register_with_info API

   - Support for temperature/pwm tables added to axi-fan-control driver

   - Enabled fan control for Dell Precision 7510 in dell-smm driver

  Various other minor improvements and fixes in several drivers"

* tag 'hwmon-for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging: (41 commits)
  hwmon: add driver for Aquacomputer D5 Next
  hwmon: (adt7470) Convert to devm_hwmon_device_register_with_info API
  hwmon: (adt7470) Convert to use regmap
  hwmon: (adt7470) Fix some style issues
  hwmon: (k10temp) Add support for yellow carp
  hwmon: (k10temp) Rework the temperature offset calculation
  hwmon: (k10temp) Don't show Tdie for all Zen/Zen2/Zen3 CPU/APU
  hwmon: (k10temp) Add additional missing Zen2 and Zen3 APUs
  hwmon: remove amd_energy driver in Makefile
  hwmon: (dell-smm) Rework SMM function debugging
  hwmon: (dell-smm) Mark i8k_get_fan_nominal_speed as __init
  hwmon: (dell-smm) Mark tables as __initconst
  hwmon: (pmbus/bpa-rs600) Add workaround for incorrect Pin max
  hwmon: (pmbus/bpa-rs600) Don't use rated limits as warn limits
  hwmon: (axi-fan-control) Support temperature vs pwm points
  hwmon: (axi-fan-control) Handle irqs in natural order
  hwmon: (axi-fan-control) Make sure the clock is enabled
  hwmon: (pmbus/ibm-cffps) Fix write bits for LED control
  hwmon: (w83781d) Match on device tree compatibles
  dt-bindings: hwmon: Add bindings for Winbond W83781D
  ...

2126 files changed:
Documentation/ABI/testing/sysfs-block
Documentation/ABI/testing/sysfs-block-device
Documentation/ABI/testing/sysfs-bus-event_source-devices-uncore [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-platform
Documentation/ABI/testing/sysfs-platform-dptf
Documentation/ABI/testing/sysfs-platform_profile
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
Documentation/RCU/Design/Requirements/Requirements.rst
Documentation/RCU/checklist.rst
Documentation/RCU/rcu_dereference.rst
Documentation/RCU/stallwarn.rst
Documentation/admin-guide/hw-vuln/index.rst
Documentation/admin-guide/hw-vuln/l1d_flush.rst [new file with mode: 0644]
Documentation/admin-guide/kernel-parameters.txt
Documentation/atomic_t.txt
Documentation/bpf/libbpf/libbpf_naming_convention.rst
Documentation/core-api/cpu_hotplug.rst
Documentation/core-api/irq/irq-domain.rst
Documentation/devicetree/bindings/fsi/ibm,fsi2spi.yaml
Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml
Documentation/devicetree/bindings/iio/st,st-sensors.yaml
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
Documentation/devicetree/bindings/mmc/mmc-pwrseq-sd8787.yaml
Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
Documentation/devicetree/bindings/mmc/sdhci-msm.txt
Documentation/devicetree/bindings/power/supply/battery.yaml
Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/uniphier-regulator.txt [deleted file]
Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
Documentation/devicetree/bindings/spi/omap-spi.txt [deleted file]
Documentation/devicetree/bindings/spi/omap-spi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/spi/rockchip-sfc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-mt65xx.txt
Documentation/devicetree/bindings/spi/spi-sprd-adi.txt [deleted file]
Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt [deleted file]
Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml [new file with mode: 0644]
Documentation/driver-api/index.rst
Documentation/driver-api/lightnvm-pblk.rst [deleted file]
Documentation/fault-injection/fault-injection.rst
Documentation/filesystems/cifs/index.rst [new file with mode: 0644]
Documentation/filesystems/cifs/ksmbd.rst [new file with mode: 0644]
Documentation/filesystems/fscrypt.rst
Documentation/filesystems/idmappings.rst [new file with mode: 0644]
Documentation/filesystems/index.rst
Documentation/filesystems/locking.rst
Documentation/filesystems/mandatory-locking.rst [deleted file]
Documentation/gpu/rfc/i915_gem_lmem.rst
Documentation/i2c/index.rst
Documentation/networking/netdev-FAQ.rst
Documentation/networking/nf_conntrack-sysctl.rst
Documentation/networking/operstates.rst
Documentation/trace/ftrace.rst
Documentation/userspace-api/ioctl/ioctl-number.rst
Documentation/userspace-api/seccomp_filter.rst
Documentation/userspace-api/spec_ctrl.rst
Documentation/virt/kvm/locking.rst
Documentation/x86/x86_64/boot-options.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/arc/Kconfig
arch/arc/include/asm/checksum.h
arch/arc/include/asm/perf_event.h
arch/arc/kernel/fpu.c
arch/arc/kernel/mcip.c
arch/arc/kernel/unwind.c
arch/arc/kernel/vmlinux.lds.S
arch/arm/boot/dts/am437x-l4.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/imx53-m53menlo.dts
arch/arm/boot/dts/imx6qdl-sr-som.dtsi
arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
arch/arm/common/sa1111.c
arch/arm/configs/nhk8815_defconfig
arch/arm/crypto/curve25519-glue.c
arch/arm/include/asm/memory.h
arch/arm/kernel/head.S
arch/arm/mach-imx/common.h
arch/arm/mach-imx/mmdc.c
arch/arm/mach-ixp4xx/Kconfig
arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-pxa/pxa_cplds_irqs.c
arch/arm/mach-s3c/irq-s3c24xx.c
arch/arm/mm/mmu.c
arch/arm/mm/pv-fixup-asm.S
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/nvidia/tegra194.dtsi
arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
arch/arm64/boot/dts/qcom/sc7180.dtsi
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/crypto/Kconfig
arch/arm64/crypto/sm4-ce-glue.c
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/syscall.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/syscall.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/mm/init.c
arch/m68k/Kconfig.cpu
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/stmark2_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/emu/nfeth.c
arch/m68k/include/asm/atomic.h
arch/mips/Makefile
arch/mips/ath25/ar2315.c
arch/mips/ath25/ar5312.c
arch/mips/include/asm/mach-rc32434/rb.h
arch/mips/include/asm/pgalloc.h
arch/mips/lantiq/irq.c
arch/mips/mti-malta/malta-platform.c
arch/mips/pci/pci-ar2315.c
arch/mips/pci/pci-rt3883.c
arch/mips/ralink/irq.c
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sgi-ip30/ip30-irq.c
arch/nios2/kernel/irq.c
arch/parisc/include/asm/string.h
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/lib/Makefile
arch/parisc/lib/memset.c [new file with mode: 0644]
arch/parisc/lib/string.S [deleted file]
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/mm/pageattr.c
arch/powerpc/platforms/4xx/uic.c
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
arch/powerpc/platforms/52xx/media5200.c
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/spider-pic.c
arch/powerpc/platforms/embedded6xx/hlwd-pic.c
arch/powerpc/platforms/powernv/opal-irqchip.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/fsl_mpic_err.c
arch/powerpc/sysdev/fsl_msi.c
arch/powerpc/sysdev/xive/common.c
arch/riscv/Kconfig
arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
arch/riscv/configs/defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/page.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/boot/Makefile
arch/s390/boot/boot.h
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/clz_ctz.c [new file with mode: 0644]
arch/s390/boot/compressed/decompressor.c
arch/s390/boot/compressed/decompressor.h
arch/s390/boot/compressed/vmlinux.lds.S
arch/s390/boot/head.S
arch/s390/boot/ipl_report.c
arch/s390/boot/kaslr.c
arch/s390/boot/mem_detect.c
arch/s390/boot/pgm_check_info.c
arch/s390/boot/sclp_early_core.c
arch/s390/boot/startup.c
arch/s390/boot/text_dma.S [deleted file]
arch/s390/boot/uv.c
arch/s390/boot/uv.h [new file with mode: 0644]
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/hypfs/hypfs_diag0c.c
arch/s390/include/asm/cio.h
arch/s390/include/asm/cpacf.h
arch/s390/include/asm/cpufeature.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/diag.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/extable.h
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/ftrace.lds.h [new file with mode: 0644]
arch/s390/include/asm/ipl.h
arch/s390/include/asm/kfence.h [new file with mode: 0644]
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/linkage.h
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/module.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pci_dma.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/sclp.h
arch/s390/include/asm/sections.h
arch/s390/include/asm/set_memory.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/uv.h
arch/s390/include/asm/vdso/gettimeofday.h
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/debug.c
arch/s390/kernel/diag.c
arch/s390/kernel/dis.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/ftrace.h [new file with mode: 0644]
arch/s390/kernel/head64.S
arch/s390/kernel/ipl.c
arch/s390/kernel/ipl_vmparm.c
arch/s390/kernel/irq.c
arch/s390/kernel/jump_label.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/module.c
arch/s390/kernel/os_info.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/processor.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/text_amode31.S [new file with mode: 0644]
arch/s390/kernel/topology.c
arch/s390/kernel/traps.c
arch/s390/kernel/uv.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kernel/vdso32/vdso32.lds.S
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vdso64/vdso64.lds.S
arch/s390/kernel/vmlinux.lds.S
arch/s390/lib/delay.c
arch/s390/mm/dump_pagetables.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/s390/mm/kasan_init.c
arch/s390/mm/maccess.c
arch/s390/mm/page-states.c
arch/s390/mm/pageattr.c
arch/s390/mm/vmem.c
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.c
arch/s390/pci/pci_bus.h
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_event.c
arch/s390/pci/pci_irq.c
arch/s390/pci/pci_sysfs.c
arch/s390/purgatory/Makefile
arch/s390/tools/opcodes.txt
arch/sh/boards/mach-se/7343/irq.c
arch/sh/boards/mach-se/7722/irq.c
arch/sh/boards/mach-x3proto/gpio.c
arch/um/drivers/ubd_kern.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/compressed/efi_thunk_64.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/kaslr.c
arch/x86/crypto/Makefile
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/sm4-aesni-avx-asm_64.S [new file with mode: 0644]
arch/x86/crypto/sm4-aesni-avx2-asm_64.S [new file with mode: 0644]
arch/x86/crypto/sm4-avx.h [new file with mode: 0644]
arch/x86/crypto/sm4_aesni_avx2_glue.c [new file with mode: 0644]
arch/x86/crypto/sm4_aesni_avx_glue.c [new file with mode: 0644]
arch/x86/events/Kconfig
arch/x86/events/amd/Makefile
arch/x86/events/amd/ibs.c
arch/x86/events/amd/power.c
arch/x86/events/amd/uncore.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_discovery.c
arch/x86/events/intel/uncore_discovery.h
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/perf_event.h
arch/x86/include/asm/amd-ibs.h [new file with mode: 0644]
arch/x86/include/asm/i8259.h
arch/x86/include/asm/kfence.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/pc-conf-reg.h [new file with mode: 0644]
arch/x86/include/asm/processor-cyrix.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/svm.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/msi.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/mtrr/mtrr.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/ctrlmondata.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/hpet.c
arch/x86/kernel/i8259.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/i8259.c
arch/x86/kvm/irq.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/lib/pc-conf-reg.c [new file with mode: 0644]
arch/x86/mm/mmio-mod.c
arch/x86/mm/tlb.c
arch/x86/pci/irq.c
arch/x86/power/cpu.c
arch/x86/tools/chkobjdump.awk
arch/x86/tools/relocs.c
arch/x86/tools/relocs.h
arch/xtensa/kernel/irq.c
block/Kconfig
block/Kconfig.iosched
block/Makefile
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/bio-integrity.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-crypto.c
block/blk-flush.c
block/blk-integrity.c
block/blk-iocost.c
block/blk-iolatency.c
block/blk-map.c
block/blk-merge.c
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk-wbt.c
block/blk-zoned.c
block/blk.h
block/bounce.c
block/cmdline-parser.c [deleted file]
block/disk-events.c
block/elevator.c
block/genhd.c
block/holder.c [new file with mode: 0644]
block/ioctl.c
block/ioprio.c
block/kyber-iosched.c
block/mq-deadline-cgroup.c [deleted file]
block/mq-deadline-cgroup.h [deleted file]
block/mq-deadline-main.c [deleted file]
block/mq-deadline.c [new file with mode: 0644]
block/partitions/Kconfig
block/partitions/acorn.c
block/partitions/aix.c
block/partitions/amiga.c
block/partitions/atari.c
block/partitions/check.h
block/partitions/cmdline.c
block/partitions/core.c
block/partitions/efi.c
block/partitions/ibm.c
block/partitions/ldm.c
block/partitions/mac.c
block/partitions/msdos.c
block/partitions/sgi.c
block/partitions/sun.c
block/t10-pi.c
certs/Kconfig
certs/Makefile
crypto/Kconfig
crypto/Makefile
crypto/asymmetric_keys/pkcs7_parser.c
crypto/ecc.h
crypto/sha512_generic.c
crypto/skcipher.c
crypto/sm4_generic.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
crypto/wp512.c
drivers/Kconfig
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/acpi_configfs.c
drivers/acpi/acpi_fpdt.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/dswexec.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/dptf/dptf_pch_fivr.c
drivers/acpi/glue.c
drivers/acpi/nfit/core.c
drivers/acpi/numa/Kconfig
drivers/acpi/numa/srat.c
drivers/acpi/osl.c
drivers/acpi/platform_profile.c
drivers/acpi/pmic/intel_pmic_xpower.c
drivers/acpi/power.c
drivers/acpi/prmt.c
drivers/acpi/processor_idle.c
drivers/acpi/spcr.c
drivers/acpi/x86/s2idle.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-sata.c
drivers/ata/libata-scsi.c
drivers/ata/sata_dwc_460ex.c
drivers/base/base.h
drivers/base/core.c
drivers/base/dd.c
drivers/base/firmware_loader/fallback.c
drivers/base/firmware_loader/firmware.h
drivers/base/firmware_loader/main.c
drivers/base/platform-msi.c
drivers/base/power/domain.c
drivers/base/property.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap.c
drivers/base/swnode.c
drivers/block/Kconfig
drivers/block/brd.c
drivers/block/cryptoloop.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_req.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/n64cart.c
drivers/block/nbd.c
drivers/block/null_blk/main.c
drivers/block/paride/pd.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/rbd.c
drivers/block/rnbd/rnbd-clt-sysfs.c
drivers/block/rnbd/rnbd-clt.c
drivers/block/rnbd/rnbd-srv-sysfs.c
drivers/block/sx8.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/bus/ti-sysc.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/hw_random/amd-rng.c
drivers/char/hw_random/arm_smccc_trng.c [new file with mode: 0644]
drivers/char/hw_random/geode-rng.c
drivers/char/hw_random/intel-rng.c
drivers/char/hw_random/via-rng.c
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm_ftpm_tee.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_ibmvtpm.h
drivers/char/tpm/tpm_tis_i2c_cr50.c
drivers/clk/clk-devres.c
drivers/clk/clk-stm32f4.c
drivers/clk/hisilicon/Kconfig
drivers/clk/imx/clk-imx6q.c
drivers/clk/qcom/clk-smd-rpm.c
drivers/clk/qcom/gdsc.c
drivers/clk/renesas/rcar-usb2-clock-sel.c
drivers/clk/tegra/clk-sdmmc-mux.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/ingenic-sysost.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/timer-fttmr010.c
drivers/clocksource/timer-mediatek.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/armada-37xx-cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/scmi-cpufreq.c
drivers/cpuidle/governors/teo.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-tdes.c
drivers/crypto/ccp/sev-dev.c
drivers/crypto/ccp/sp-pci.c
drivers/crypto/hisilicon/hpre/hpre_main.c
drivers/crypto/hisilicon/qm.c
drivers/crypto/hisilicon/qm.h
drivers/crypto/hisilicon/sec2/sec.h
drivers/crypto/hisilicon/sec2/sec_main.c
drivers/crypto/hisilicon/zip/zip_main.c
drivers/crypto/mxs-dcp.c
drivers/crypto/omap-aes.c
drivers/crypto/omap-crypto.c
drivers/crypto/omap-des.c
drivers/crypto/omap-sham.c
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/crypto/qat/qat_4xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
drivers/crypto/qat/qat_c3xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
drivers/crypto/qat/qat_c62xvf/adf_drv.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_aer.c
drivers/crypto/qat/qat_common/adf_common_drv.h
drivers/crypto/qat/qat_common/adf_init.c
drivers/crypto/qat/qat_common/adf_isr.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
drivers/crypto/qat/qat_common/adf_sriov.c
drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
drivers/crypto/qat/qat_common/adf_vf_isr.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
drivers/crypto/virtio/virtio_crypto_core.c
drivers/dax/super.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c
drivers/dma/idxd/submit.c
drivers/dma/idxd/sysfs.c
drivers/dma/imx-dma.c
drivers/dma/of-dma.c
drivers/dma/sh/usb-dmac.c
drivers/dma/stm32-dma.c
drivers/dma/stm32-dmamux.c
drivers/dma/uniphier-xdmac.c
drivers/dma/xilinx/xilinx_dma.c
drivers/edac/altera_edac.c
drivers/edac/amd64_edac.c
drivers/edac/edac_mc.c
drivers/edac/i10nm_base.c
drivers/edac/mce_amd.c
drivers/edac/skx_base.c
drivers/edac/skx_common.c
drivers/edac/skx_common.h
drivers/firmware/broadcom/tee_bnxt_fw.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/libstub/arm64-stub.c
drivers/firmware/efi/libstub/randomalloc.c
drivers/firmware/smccc/smccc.c
drivers/fpga/dfl-fme-perf.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-104-dio-48e.c
drivers/gpio/gpio-104-idi-48.c
drivers/gpio/gpio-104-idio-16.c
drivers/gpio/gpio-altera.c
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-ath79.c
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-brcmstb.c
drivers/gpio/gpio-cadence.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-em.c
drivers/gpio/gpio-ep93xx.c
drivers/gpio/gpio-ftgpio010.c
drivers/gpio/gpio-hisi.c
drivers/gpio/gpio-hlwd.c
drivers/gpio/gpio-merrifield.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-mt7621.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-pci-idio-16.c
drivers/gpio/gpio-pcie-idio-24.c
drivers/gpio/gpio-pl061.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpio-rda.c
drivers/gpio/gpio-realtek-otto.c
drivers/gpio/gpio-rockchip.c [new file with mode: 0644]
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-sodaville.c
drivers/gpio/gpio-sprd.c
drivers/gpio/gpio-tb10x.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-tegra186.c
drivers/gpio/gpio-tqmx86.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpio-ws16c48.c
drivers/gpio/gpio-xgs-iproc.c
drivers/gpio/gpio-xilinx.c
drivers/gpio/gpio-xlp.c
drivers/gpio/gpio-zynq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_globals.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/kmb/kmb_drv.h
drivers/gpu/drm/kmb/kmb_plane.c
drivers/gpu/drm/mediatek/mtk_disp_color.c
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/meson/meson_registers.h
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/dispnv50/head.h
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvif/client.h
drivers/gpu/drm/nouveau/include/nvif/driver.h
drivers/gpu/drm/nouveau/include/nvkm/core/client.h
drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_nvif.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nvif/client.c
drivers/gpu/drm/nouveau/nvif/object.c
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-cpmem.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-at91-core.c
drivers/i2c/busses/i2c-at91-master.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-common.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-highlander.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-iop3xx.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-parport.c
drivers/i2c/busses/i2c-pmcmsp.c [deleted file]
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-sun6i-p2wi.c
drivers/i2c/busses/i2c-synquacer.c
drivers/i2c/busses/i2c-virtio.c [new file with mode: 0644]
drivers/i2c/busses/i2c-xlp9xx.c
drivers/i2c/i2c-dev.c
drivers/iio/accel/Kconfig
drivers/iio/accel/fxls8962af-core.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/adc/rn5t618-adc.c
drivers/iio/adc/ti-ads7950.c
drivers/iio/humidity/hdc100x.c
drivers/iio/imu/adis.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs_std_types_mr.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/efa/efa_main.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hns/hns_roce_cmd.c
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/irdma/Kconfig
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/rxe/rxe_mcast.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_queue.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/interconnect/core.c
drivers/interconnect/qcom/icc-rpmh.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h
drivers/iommu/intel/svm.c
drivers/iommu/iommu.c
drivers/iommu/s390-iommu.c
drivers/ipack/carriers/tpci200.c
drivers/irqchip/irq-alpine-msi.c
drivers/irqchip/irq-apple-aic.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-mbi.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-loongson-pch-pic.c
drivers/irqchip/irq-ls-scfg-msi.c
drivers/irqchip/irq-mtk-sysirq.c
drivers/irqchip/irq-mvebu-gicp.c
drivers/irqchip/irq-mvebu-odmi.c
drivers/irqchip/irq-partition-percpu.c
drivers/irqchip/qcom-pdc.c
drivers/lightnvm/Kconfig [deleted file]
drivers/lightnvm/Makefile [deleted file]
drivers/lightnvm/core.c [deleted file]
drivers/lightnvm/pblk-cache.c [deleted file]
drivers/lightnvm/pblk-core.c [deleted file]
drivers/lightnvm/pblk-gc.c [deleted file]
drivers/lightnvm/pblk-init.c [deleted file]
drivers/lightnvm/pblk-map.c [deleted file]
drivers/lightnvm/pblk-rb.c [deleted file]
drivers/lightnvm/pblk-read.c [deleted file]
drivers/lightnvm/pblk-recovery.c [deleted file]
drivers/lightnvm/pblk-rl.c [deleted file]
drivers/lightnvm/pblk-sysfs.c [deleted file]
drivers/lightnvm/pblk-trace.h [deleted file]
drivers/lightnvm/pblk-write.c [deleted file]
drivers/lightnvm/pblk.h [deleted file]
drivers/md/Kconfig
drivers/md/bcache/Kconfig
drivers/md/bcache/btree.c
drivers/md/bcache/super.c
drivers/md/bcache/util.h
drivers/md/dm-ebs-target.c
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm-writecache.c
drivers/md/dm.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/pci/intel/ipu3/cio2-bridge.c
drivers/media/platform/atmel/Kconfig
drivers/media/platform/atmel/Makefile
drivers/media/platform/atmel/atmel-isc-base.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/memstick/core/ms_block.c
drivers/memstick/host/r592.c
drivers/memstick/host/tifm_ms.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/fsl-imx25-tsadc.c
drivers/mfd/ioc3.c
drivers/mfd/qcom-pm8xxx.c
drivers/mmc/core/Kconfig
drivers/mmc/core/block.c
drivers/mmc/core/core.c
drivers/mmc/core/core.h
drivers/mmc/core/crypto.c
drivers/mmc/core/host.c
drivers/mmc/core/host.h
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/mmc_ops.h
drivers/mmc/core/pwrseq_sd8787.c
drivers/mmc/core/queue.c
drivers/mmc/core/sdio_cis.c
drivers/mmc/host/cqhci-crypto.h
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/moxart-mmc.c
drivers/mmc/host/renesas_sdhi.h
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mmc/host/renesas_sdhi_sys_dmac.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-arasan.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/tifm_sd.c
drivers/mmc/host/usdhi6rol0.c
drivers/mmc/host/via-sdmmc.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/devices/mchp48l640.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/nand_base.c
drivers/net/bareudp.c
drivers/net/can/m_can/m_can.c
drivers/net/can/usb/esd_usb2.c
drivers/net/dsa/hirschmann/hellcreek.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz8795_reg.h
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/qca/ar9331.c
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_mdio.c
drivers/net/ethernet/apm/xgene-v2/main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_devlink.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/prestera/prestera_devlink.c
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_io.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
drivers/net/ethernet/ti/cpsw_new.c
drivers/net/ethernet/ti/cpsw_priv.h
drivers/net/ethernet/ti/cpsw_switchdev.c
drivers/net/hamradio/6pack.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/mdio/mdio-mux.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/mediatek-ge.c
drivers/net/phy/micrel.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/asix.h
drivers/net/usb/asix_common.c
drivers/net/usb/asix_devices.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
drivers/net/wwan/iosm/iosm_ipc_mmio.h
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
drivers/net/wwan/iosm/iosm_ipc_wwan.c
drivers/net/wwan/mhi_wwan_ctrl.c
drivers/net/wwan/wwan_core.c
drivers/nvdimm/namespace_devs.c
drivers/nvme/host/Kconfig
drivers/nvme/host/Makefile
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/lightnvm.c [deleted file]
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/host/trace.c
drivers/nvme/target/Kconfig
drivers/nvme/target/core.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/loop.c
drivers/nvme/target/trace.c
drivers/nvme/target/zns.c
drivers/opp/core.c
drivers/opp/of.c
drivers/pci/controller/pci-ixp4xx.c
drivers/pci/msi.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/quirks.c
drivers/pcmcia/i82092.c
drivers/pinctrl/actions/pinctrl-owl.c
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-lynxpoint.c
drivers/pinctrl/intel/pinctrl-tigerlake.c
drivers/pinctrl/mediatek/mtk-eint.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-equilibrium.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-k210.c
drivers/pinctrl/pinctrl-microchip-sgpio.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/pinctrl-oxnas.c
drivers/pinctrl/pinctrl-pic32.c
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-rockchip.h [new file with mode: 0644]
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-s3c24xx.c
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
drivers/pinctrl/spear/pinctrl-plgpio.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/platform/x86/Kconfig
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/dual_accel_detect.h [new file with mode: 0644]
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/pcengines-apuv2.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/reset/Kconfig
drivers/power/reset/Makefile
drivers/power/reset/linkstation-poweroff.c
drivers/power/reset/tps65086-restart.c [new file with mode: 0644]
drivers/power/supply/Kconfig
drivers/power/supply/Makefile
drivers/power/supply/ab8500-bm.h
drivers/power/supply/ab8500_bmdata.c
drivers/power/supply/ab8500_btemp.c
drivers/power/supply/ab8500_chargalg.c [new file with mode: 0644]
drivers/power/supply/ab8500_charger.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/abx500_chargalg.c [deleted file]
drivers/power/supply/axp288_charger.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq24735-charger.c
drivers/power/supply/cros_peripheral_charger.c [new file with mode: 0644]
drivers/power/supply/cw2015_battery.c
drivers/power/supply/max17042_battery.c
drivers/power/supply/mt6360_charger.c [new file with mode: 0644]
drivers/power/supply/power_supply_core.c
drivers/power/supply/qcom_smbb.c
drivers/power/supply/rn5t618_power.c
drivers/power/supply/sbs-battery.c
drivers/power/supply/sc27xx_fuel_gauge.c
drivers/power/supply/smb347-charger.c
drivers/powercap/intel_rapl_common.c
drivers/powercap/intel_rapl_msr.c
drivers/ptp/Kconfig
drivers/ptp/ptp_sysfs.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/bd718x7-regulator.c
drivers/regulator/da9063-regulator.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/devres.c
drivers/regulator/fixed.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/irq_helpers.c
drivers/regulator/mt6358-regulator.c
drivers/regulator/mt6359-regulator.c
drivers/regulator/mt6397-regulator.c
drivers/regulator/rt5033-regulator.c
drivers/regulator/rt6245-regulator.c
drivers/regulator/rtq2134-regulator.c [new file with mode: 0644]
drivers/regulator/rtq6752-regulator.c [new file with mode: 0644]
drivers/regulator/sy7636a-regulator.c
drivers/regulator/sy8824x.c
drivers/regulator/sy8827n.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/vctrl-regulator.c
drivers/reset/Kconfig
drivers/reset/reset-zynqmp.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_genhd.c
drivers/s390/block/dasd_ioctl.c
drivers/s390/block/dcssblk.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp.h
drivers/s390/char/sclp_cmd.c
drivers/s390/char/sclp_config.c
drivers/s390/char/sclp_early_core.c
drivers/s390/char/zcore.c
drivers/s390/cio/css.c
drivers/s390/cio/qdio.h
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_ccamisc.c
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2c.c
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/scsi/zfcp_qdio.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/megaraid/megaraid_mm.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/st.c
drivers/scsi/st.h
drivers/scsi/storvsc_drv.c
drivers/slimbus/messaging.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/soc/Makefile
drivers/soc/fsl/qe/qe_ic.c
drivers/soc/imx/soc-imx8m.c
drivers/soc/ixp4xx/ixp4xx-npe.c
drivers/soc/ixp4xx/ixp4xx-qmgr.c
drivers/soc/tegra/Kconfig
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-bcm2835aux.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-davinci.c
drivers/spi/spi-ep93xx.c
drivers/spi/spi-fsi.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-imx.c
drivers/spi/spi-meson-spicc.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-mux.c
drivers/spi/spi-mxic.c
drivers/spi/spi-orion.c
drivers/spi/spi-pic32.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rockchip-sfc.c [new file with mode: 0644]
drivers/spi/spi-sprd-adi.c
drivers/spi/spi-stm32.c
drivers/spi/spi-tegra114.c
drivers/spi/spi-tegra20-slink.c
drivers/spi/spi-zynq-qspi.c
drivers/spi/spi.c
drivers/staging/media/atomisp/pci/atomisp_ioctl.c
drivers/staging/media/av7110/audio.h [deleted file]
drivers/staging/media/av7110/av7110.h
drivers/staging/media/av7110/osd.h [deleted file]
drivers/staging/media/av7110/video.h [deleted file]
drivers/staging/mt7621-pci/pci-mt7621.c
drivers/staging/rtl8712/hal_init.c
drivers/staging/rtl8712/rtl8712_led.c
drivers/staging/rtl8712/rtl871x_led.h
drivers/staging/rtl8712/rtl871x_pwrctrl.c
drivers/staging/rtl8712/rtl871x_pwrctrl.h
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rtl8723bs/Kconfig
drivers/staging/rtl8723bs/hal/sdio_ops.c
drivers/tee/optee/call.c
drivers/tee/optee/core.c
drivers/tee/optee/optee_private.h
drivers/tee/optee/rpc.c
drivers/tee/optee/shm_pool.c
drivers/tee/tee_shm.c
drivers/thermal/intel/therm_throt.c
drivers/thermal/intel/thermal_interrupt.h
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250_aspeed_vuart.c
drivers/tty/serial/8250/8250_fsl.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/max310x.c
drivers/tty/serial/serial-tegra.c
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/cdns3/cdns3-ep0.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-gadget.h
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/class/usbtmc.c
drivers/usb/common/usb-otg-fsm.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/udc/max3420_udc.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/xhci-pci-renesas.c
drivers/usb/musb/omap2430.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/typec/tcpm/tcpm.c
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vringh.c
drivers/virt/acrn/vm.c
drivers/virtio/virtio.c
drivers/virtio/virtio_mem.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_ring.c
drivers/virtio/virtio_vdpa.c
drivers/xen/events/events_base.c
fs/9p/vfs_file.c
fs/Kconfig
fs/Makefile
fs/afs/flock.c
fs/aio.c
fs/block_dev.c
fs/btrfs/Makefile
fs/btrfs/acl.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/block-group.c
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/dir-item.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/ref-verify.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/space-info.c
fs/btrfs/struct-funcs.c
fs/btrfs/subpage.c
fs/btrfs/subpage.h
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-log.c
fs/btrfs/verity.c [new file with mode: 0644]
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/zlib.c
fs/btrfs/zoned.c
fs/btrfs/zoned.h
fs/btrfs/zstd.c
fs/buffer.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mdsmap.c
fs/ceph/snap.c
fs/ceph/super.h
fs/cifs/Kconfig
fs/cifs/cifs_debug.c
fs/cifs/cifs_swn.c
fs/cifs/cifs_unicode.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/fs_context.c
fs/cifs/fs_context.h
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb2maperror.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smbencrypt.c
fs/cifs_common/Makefile [new file with mode: 0644]
fs/cifs_common/arc4.h [new file with mode: 0644]
fs/cifs_common/cifs_arc4.c [new file with mode: 0644]
fs/cifs_common/cifs_md4.c [new file with mode: 0644]
fs/cifs_common/md4.h [new file with mode: 0644]
fs/configfs/file.c
fs/crypto/fname.c
fs/crypto/hooks.c
fs/dax.c
fs/eventfd.c
fs/exec.c
fs/ext2/Kconfig
fs/ext2/ext2.h
fs/ext2/file.c
fs/ext2/inode.c
fs/ext2/super.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/symlink.c
fs/ext4/truncate.h
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/namei.c
fs/f2fs/super.c
fs/f2fs/sysfs.c
fs/fat/fatent.c
fs/fcntl.c
fs/file.c
fs/fs-writeback.c
fs/fuse/dax.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/aops.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/lock_dlm.c
fs/gfs2/log.c
fs/gfs2/lops.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/gfs2/super.c
fs/gfs2/super.h
fs/gfs2/util.c
fs/gfs2/util.h
fs/hpfs/Kconfig
fs/hpfs/file.c
fs/inode.c
fs/internal.h
fs/io-wq.c
fs/io-wq.h
fs/io_uring.c
fs/ioctl.c
fs/iomap/Makefile
fs/iomap/apply.c [deleted file]
fs/iomap/buffered-io.c
fs/iomap/direct-io.c
fs/iomap/fiemap.c
fs/iomap/iter.c [new file with mode: 0644]
fs/iomap/seek.c
fs/iomap/swapfile.c
fs/iomap/trace.h
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/joliet.c
fs/ksmbd/Kconfig [new file with mode: 0644]
fs/ksmbd/Makefile [new file with mode: 0644]
fs/ksmbd/asn1.c [new file with mode: 0644]
fs/ksmbd/asn1.h [new file with mode: 0644]
fs/ksmbd/auth.c [new file with mode: 0644]
fs/ksmbd/auth.h [new file with mode: 0644]
fs/ksmbd/connection.c [new file with mode: 0644]
fs/ksmbd/connection.h [new file with mode: 0644]
fs/ksmbd/crypto_ctx.c [new file with mode: 0644]
fs/ksmbd/crypto_ctx.h [new file with mode: 0644]
fs/ksmbd/glob.h [new file with mode: 0644]
fs/ksmbd/ksmbd_netlink.h [new file with mode: 0644]
fs/ksmbd/ksmbd_spnego_negtokeninit.asn1 [new file with mode: 0644]
fs/ksmbd/ksmbd_spnego_negtokentarg.asn1 [new file with mode: 0644]
fs/ksmbd/ksmbd_work.c [new file with mode: 0644]
fs/ksmbd/ksmbd_work.h [new file with mode: 0644]
fs/ksmbd/mgmt/ksmbd_ida.c [new file with mode: 0644]
fs/ksmbd/mgmt/ksmbd_ida.h [new file with mode: 0644]
fs/ksmbd/mgmt/share_config.c [new file with mode: 0644]
fs/ksmbd/mgmt/share_config.h [new file with mode: 0644]
fs/ksmbd/mgmt/tree_connect.c [new file with mode: 0644]
fs/ksmbd/mgmt/tree_connect.h [new file with mode: 0644]
fs/ksmbd/mgmt/user_config.c [new file with mode: 0644]
fs/ksmbd/mgmt/user_config.h [new file with mode: 0644]
fs/ksmbd/mgmt/user_session.c [new file with mode: 0644]
fs/ksmbd/mgmt/user_session.h [new file with mode: 0644]
fs/ksmbd/misc.c [new file with mode: 0644]
fs/ksmbd/misc.h [new file with mode: 0644]
fs/ksmbd/ndr.c [new file with mode: 0644]
fs/ksmbd/ndr.h [new file with mode: 0644]
fs/ksmbd/nterr.h [new file with mode: 0644]
fs/ksmbd/ntlmssp.h [new file with mode: 0644]
fs/ksmbd/oplock.c [new file with mode: 0644]
fs/ksmbd/oplock.h [new file with mode: 0644]
fs/ksmbd/server.c [new file with mode: 0644]
fs/ksmbd/server.h [new file with mode: 0644]
fs/ksmbd/smb2misc.c [new file with mode: 0644]
fs/ksmbd/smb2ops.c [new file with mode: 0644]
fs/ksmbd/smb2pdu.c [new file with mode: 0644]
fs/ksmbd/smb2pdu.h [new file with mode: 0644]
fs/ksmbd/smb_common.c [new file with mode: 0644]
fs/ksmbd/smb_common.h [new file with mode: 0644]
fs/ksmbd/smbacl.c [new file with mode: 0644]
fs/ksmbd/smbacl.h [new file with mode: 0644]
fs/ksmbd/smbfsctl.h [new file with mode: 0644]
fs/ksmbd/smbstatus.h [new file with mode: 0644]
fs/ksmbd/transport_ipc.c [new file with mode: 0644]
fs/ksmbd/transport_ipc.h [new file with mode: 0644]
fs/ksmbd/transport_rdma.c [new file with mode: 0644]
fs/ksmbd/transport_rdma.h [new file with mode: 0644]
fs/ksmbd/transport_tcp.c [new file with mode: 0644]
fs/ksmbd/transport_tcp.h [new file with mode: 0644]
fs/ksmbd/unicode.c [new file with mode: 0644]
fs/ksmbd/unicode.h [new file with mode: 0644]
fs/ksmbd/uniupr.h [new file with mode: 0644]
fs/ksmbd/vfs.c [new file with mode: 0644]
fs/ksmbd/vfs.h [new file with mode: 0644]
fs/ksmbd/vfs_cache.c [new file with mode: 0644]
fs/ksmbd/vfs_cache.h [new file with mode: 0644]
fs/ksmbd/xattr.h [new file with mode: 0644]
fs/lockd/svc.c
fs/lockd/svc4proc.c
fs/lockd/svclock.c
fs/lockd/svcproc.c
fs/lockd/svcsubs.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/nfs/export.c
fs/nfs/file.c
fs/nfsd/lockd.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsproc.c
fs/nfsd/trace.h
fs/nfsd/vfs.c
fs/nilfs2/super.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fsnotify.c
fs/notify/fsnotify.h
fs/notify/inotify/inotify_user.c
fs/notify/mark.c
fs/ocfs2/locks.c
fs/open.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/readdir.c
fs/pipe.c
fs/read_write.c
fs/remap_range.c
fs/squashfs/block.c
fs/squashfs/lz4_wrapper.c
fs/squashfs/lzo_wrapper.c
fs/squashfs/xz_wrapper.c
fs/squashfs/zlib_wrapper.c
fs/squashfs/zstd_wrapper.c
fs/super.c
fs/timerfd.c
fs/ubifs/file.c
fs/udf/dir.c
fs/udf/ecma_167.h
fs/udf/inode.c
fs/udf/misc.c
fs/udf/namei.c
fs/udf/osta_udf.h
fs/udf/super.c
fs/udf/udf_sb.h
fs/udf/udfdecl.h
fs/udf/unicode.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_super.c
fs/zonefs/super.c
fs/zonefs/zonefs.h
include/acpi/acnames.h
include/acpi/acoutput.h
include/acpi/acpi_io.h
include/acpi/acpixf.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/asm-generic/atomic-instrumented.h [deleted file]
include/asm-generic/atomic-long.h [deleted file]
include/asm-generic/bitops/atomic.h
include/asm-generic/bitops/lock.h
include/asm-generic/bitops/non-atomic.h
include/asm-generic/vmlinux.lds.h
include/crypto/public_key.h
include/crypto/sm4.h
include/dt-bindings/clock/ingenic,sysost.h
include/dt-bindings/power/summit,smb347-charger.h
include/linux/acpi.h
include/linux/atomic-arch-fallback.h [deleted file]
include/linux/atomic.h
include/linux/atomic/atomic-arch-fallback.h [new file with mode: 0644]
include/linux/atomic/atomic-instrumented.h [new file with mode: 0644]
include/linux/atomic/atomic-long.h [new file with mode: 0644]
include/linux/backing-dev.h
include/linux/bio.h
include/linux/blk-cgroup.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bvec.h
include/linux/cmdline-parser.h [deleted file]
include/linux/cpuhotplug.h
include/linux/cpuset.h
include/linux/debug_locks.h
include/linux/device-mapper.h
include/linux/device.h
include/linux/edac.h
include/linux/energy_model.h
include/linux/errno.h
include/linux/eventfd.h
include/linux/exportfs.h
include/linux/fanotify.h
include/linux/fiemap.h
include/linux/fs.h
include/linux/fscrypt.h
include/linux/fsnotify.h
include/linux/ftrace.h
include/linux/genhd.h
include/linux/hrtimer.h
include/linux/inetdevice.h
include/linux/interrupt.h
include/linux/io_uring.h
include/linux/iomap.h
include/linux/ioprio.h
include/linux/irq.h
include/linux/kfence.h
include/linux/libata.h
include/linux/lightnvm.h [deleted file]
include/linux/linear_range.h
include/linux/local_lock_internal.h
include/linux/lockd/bind.h
include/linux/lockd/lockd.h
include/linux/memcontrol.h
include/linux/mfd/rt5033-private.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc_vdpa.h
include/linux/mmc/card.h
include/linux/mmc/core.h
include/linux/mmc/host.h
include/linux/mmu_context.h
include/linux/moduleparam.h
include/linux/msi.h
include/linux/mutex.h
include/linux/namei.h
include/linux/netfilter/ipset/ip_set.h
include/linux/notifier.h
include/linux/oid_registry.h
include/linux/once.h
include/linux/padata.h
include/linux/pci_ids.h
include/linux/pid.h
include/linux/pipe_fs_i.h
include/linux/platform_data/cros_ec_commands.h
include/linux/platform_data/mmc-esdhc-imx.h [deleted file]
include/linux/platform_data/spi-mt65xx.h
include/linux/pm_domain.h
include/linux/posix-timers.h
include/linux/power/max17042_battery.h
include/linux/power_supply.h
include/linux/preempt.h
include/linux/property.h
include/linux/rbtree.h
include/linux/rbtree_types.h [new file with mode: 0644]
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/regmap.h
include/linux/regulator/consumer.h
include/linux/regulator/driver.h
include/linux/regulator/machine.h
include/linux/resctrl.h
include/linux/rtmutex.h
include/linux/rwbase_rt.h [new file with mode: 0644]
include/linux/rwlock_rt.h [new file with mode: 0644]
include/linux/rwlock_types.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/sched/signal.h
include/linux/sched/sysctl.h
include/linux/sched/wake_q.h
include/linux/security.h
include/linux/serial_core.h
include/linux/socket.h
include/linux/spi/spi.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_rt.h [new file with mode: 0644]
include/linux/spinlock_types.h
include/linux/spinlock_types_raw.h [new file with mode: 0644]
include/linux/srcutiny.h
include/linux/static_call.h
include/linux/sunrpc/msg_prot.h
include/linux/sunrpc/svc.h
include/linux/sunrpc/svc_rdma.h
include/linux/sunrpc/xdr.h
include/linux/sunrpc/xprt.h
include/linux/sysctl.h
include/linux/tee_drv.h
include/linux/units.h
include/linux/usb/otg-fsm.h
include/linux/vdpa.h
include/linux/virtio.h
include/linux/vringh.h
include/linux/wait.h
include/linux/writeback.h
include/linux/ww_mutex.h
include/net/bluetooth/hci_core.h
include/net/flow_offload.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/netns/conntrack.h
include/net/netns/xfrm.h
include/net/pkt_cls.h
include/net/psample.h
include/trace/events/btrfs.h
include/trace/events/kyber.h
include/trace/events/mmflags.h
include/trace/events/sunrpc.h
include/trace/trace_events.h
include/uapi/linux/btrfs.h
include/uapi/linux/btrfs_tree.h
include/uapi/linux/dvb/audio.h [new file with mode: 0644]
include/uapi/linux/dvb/osd.h [new file with mode: 0644]
include/uapi/linux/dvb/video.h [new file with mode: 0644]
include/uapi/linux/fanotify.h
include/uapi/linux/fs.h
include/uapi/linux/io_uring.h
include/uapi/linux/ioprio.h [new file with mode: 0644]
include/uapi/linux/lightnvm.h [deleted file]
include/uapi/linux/mount.h
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/nfnetlink_hook.h
include/uapi/linux/nfsd/nfsfh.h
include/uapi/linux/prctl.h
include/uapi/linux/virtio_i2c.h [new file with mode: 0644]
include/uapi/linux/virtio_ids.h
init/do_mounts.c
init/init_task.c
init/main.c
kernel/Kconfig.locks
kernel/audit.h
kernel/audit_tree.c
kernel/bpf/core.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/verifier.c
kernel/cfi.c
kernel/cgroup/cpuset.c
kernel/cgroup/rstat.c
kernel/cpu.c
kernel/cpu_pm.c
kernel/cred.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/affinity.c
kernel/irq/chip.c
kernel/irq/cpuhotplug.c
kernel/irq/generic-chip.c
kernel/irq/ipi.c
kernel/irq/irqdesc.c
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/irq/matrix.c
kernel/irq/msi.c
kernel/irq/pm.c
kernel/irq/proc.c
kernel/irq/timings.c
kernel/kcsan/debugfs.c
kernel/locking/Makefile
kernel/locking/locktorture.c
kernel/locking/mutex-debug.c
kernel/locking/mutex-debug.h [deleted file]
kernel/locking/mutex.c
kernel/locking/mutex.h
kernel/locking/rtmutex.c
kernel/locking/rtmutex_api.c [new file with mode: 0644]
kernel/locking/rtmutex_common.h
kernel/locking/rwbase_rt.c [new file with mode: 0644]
kernel/locking/rwsem.c
kernel/locking/semaphore.c
kernel/locking/spinlock.c
kernel/locking/spinlock_debug.c
kernel/locking/spinlock_rt.c [new file with mode: 0644]
kernel/locking/ww_mutex.h [new file with mode: 0644]
kernel/locking/ww_rt_mutex.c [new file with mode: 0644]
kernel/notifier.c
kernel/padata.c
kernel/params.c
kernel/pid.c
kernel/power/energy_model.c
kernel/power/main.c
kernel/power/suspend.c
kernel/power/suspend_test.c
kernel/rcu/rcuscale.c
kernel/rcu/rcutorture.c
kernel/rcu/refscale.c
kernel/rcu/srcutiny.c
kernel/rcu/tasks.h
kernel/rcu/tree.c
kernel/rcu/tree_nocb.h [new file with mode: 0644]
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h
kernel/scftorture.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c
kernel/seccomp.c
kernel/signal.c
kernel/smp.c
kernel/smpboot.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl.c
kernel/time/clocksource-wdtest.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/jiffies.c
kernel/time/posix-cpu-timers.c
kernel/time/posix-timers.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/torture.c
kernel/trace/Kconfig
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_hist.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_osnoise.c
kernel/tracepoint.c
kernel/ucount.c
lib/Kconfig.debug
lib/crypto/Kconfig
lib/crypto/Makefile
lib/crypto/blake2s.c
lib/crypto/chacha20poly1305.c
lib/crypto/curve25519.c
lib/crypto/sm4.c [new file with mode: 0644]
lib/debugobjects.c
lib/devmem_is_allowed.c
lib/linear_ranges.c
lib/mpi/mpiutil.c
lib/once.c
lib/string.c
lib/test_lockup.c
mm/backing-dev.c
mm/filemap.c
mm/gup.c
mm/hugetlb.c
mm/kfence/kfence_test.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mmap.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slub.c
mm/swap_slots.c
mm/swap_state.c
mm/truncate.c
mm/vmscan.c
mm/vmstat.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bpf/test_run.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/core/link_watch.c
net/core/page_pool.c
net/core/rtnetlink.c
net/dccp/dccp.h
net/dsa/slave.c
net/ieee802154/socket.c
net/ipv4/cipso_ipv4.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/route.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_offload.c
net/ipv4/udp_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/route.c
net/mac80211/main.c
net/mptcp/options.c
net/mptcp/pm_netlink.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nfnetlink_hook.c
net/openvswitch/flow.c
net/openvswitch/vport.c
net/qrtr/qrtr.c
net/rds/ib_frmr.c
net/sched/act_mirred.c
net/sched/sch_cake.c
net/sched/sch_ets.c
net/sched/sch_generic.c
net/sched/sch_taprio.c
net/sctp/auth.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_llc.c
net/smc/smc_tx.c
net/smc/smc_wr.c
net/socket.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/debugfs.c
net/sunrpc/fail.h [new file with mode: 0644]
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtsock.c
net/tipc/link.c
net/tipc/socket.c
net/vmw_vsock/virtio_transport.c
net/vmw_vsock/virtio_transport_common.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_ipcomp.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/trace_events/trace-events-sample.h
scripts/atomic/check-atomics.sh
scripts/atomic/fallbacks/acquire
scripts/atomic/fallbacks/add_negative
scripts/atomic/fallbacks/add_unless
scripts/atomic/fallbacks/andnot
scripts/atomic/fallbacks/dec
scripts/atomic/fallbacks/dec_and_test
scripts/atomic/fallbacks/dec_if_positive
scripts/atomic/fallbacks/dec_unless_positive
scripts/atomic/fallbacks/fence
scripts/atomic/fallbacks/fetch_add_unless
scripts/atomic/fallbacks/inc
scripts/atomic/fallbacks/inc_and_test
scripts/atomic/fallbacks/inc_not_zero
scripts/atomic/fallbacks/inc_unless_negative
scripts/atomic/fallbacks/read_acquire
scripts/atomic/fallbacks/release
scripts/atomic/fallbacks/set_release
scripts/atomic/fallbacks/sub_and_test
scripts/atomic/fallbacks/try_cmpxchg
scripts/atomic/gen-atomic-fallback.sh
scripts/atomic/gen-atomic-instrumented.sh
scripts/atomic/gen-atomic-long.sh
scripts/atomic/gen-atomics.sh
scripts/checkversion.pl
scripts/recordmcount.pl
scripts/tracing/draw_functrace.py
security/integrity/platform_certs/efi_parser.c
security/security.c
security/selinux/hooks.c
security/selinux/ss/policydb.c
security/selinux/ss/services.c
security/smack/smack.h
security/smack/smack_access.c
security/smack/smack_lsm.c
sound/core/memalloc.c
sound/core/pcm_native.c
sound/core/seq/seq_ports.c
sound/firewire/oxfw/oxfw-stream.c
sound/firewire/oxfw/oxfw.c
sound/firewire/oxfw/oxfw.h
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/Kconfig
sound/soc/amd/acp-da7219-max98357a.c
sound/soc/amd/acp-pcm-dma.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/amd/renoir/acp3x-pdm-dma.c
sound/soc/amd/renoir/rn-pci-acp3x.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs42l42.h
sound/soc/codecs/nau8824.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/sof_da7219_max98373.c
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/soc-component.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-ipc.c
sound/soc/sof/intel/hda.c
sound/soc/uniphier/aio-dma.c
sound/soc/xilinx/xlnx_formatter_pcm.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/mixer.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/quirks.c
tools/include/nolibc/nolibc.h
tools/io_uring/io_uring-cp.c
tools/lib/bpf/btf.c
tools/lib/bpf/libbpf_probes.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/verifier/dead_code.c
tools/testing/selftests/kvm/include/x86_64/hyperv.h
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/move_mount_set_group/.gitignore [new file with mode: 0644]
tools/testing/selftests/move_mount_set_group/Makefile [new file with mode: 0644]
tools/testing/selftests/move_mount_set_group/config [new file with mode: 0644]
tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c [new file with mode: 0644]
tools/testing/selftests/net/ipsec.c
tools/testing/selftests/rcutorture/bin/jitter.sh
tools/testing/selftests/rcutorture/bin/kcsan-collapse.sh
tools/testing/selftests/rcutorture/bin/kvm-again.sh
tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-scf.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-remote.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/torture.sh
tools/testing/selftests/rcutorture/configs/rcu/RUDE01
tools/testing/selftests/rcutorture/configs/rcu/TASKS01
tools/testing/selftests/rcutorture/configs/rcu/TASKS03
tools/testing/selftests/sgx/sigstruct.c
tools/virtio/Makefile
tools/virtio/linux/spinlock.h [new file with mode: 0644]
tools/virtio/linux/virtio.h
virt/kvm/kvm_main.c

index e34cdee..a0ed873 100644 (file)
@@ -28,6 +28,18 @@ Description:
                For more details refer Documentation/admin-guide/iostats.rst
 
 
+What:          /sys/block/<disk>/diskseq
+Date:          February 2021
+Contact:       Matteo Croce <mcroce@microsoft.com>
+Description:
+               The /sys/block/<disk>/diskseq files reports the disk
+               sequence number, which is a monotonically increasing
+               number assigned to every drive.
+               Some devices, like the loop device, refresh such number
+               every time the backing file is changed.
+               The value type is 64 bit unsigned.
+
+
 What:          /sys/block/<disk>/<part>/stat
 Date:          February 2008
 Contact:       Jerome Marchand <jmarchan@redhat.com>
index aa0fb50..7ac7b19 100644 (file)
@@ -55,6 +55,43 @@ Date:                Oct, 2016
 KernelVersion: v4.10
 Contact:       linux-ide@vger.kernel.org
 Description:
-               (RW) Write to the file to turn on or off the SATA ncq (native
-               command queueing) support. By default this feature is turned
-               off.
+               (RW) Write to the file to turn on or off the SATA NCQ (native
+               command queueing) priority support. By default this feature is
+               turned off. If the device does not support the SATA NCQ
+               priority feature, writing "1" to this file results in an error
+               (see ncq_prio_supported).
+
+
+What:          /sys/block/*/device/sas_ncq_prio_enable
+Date:          Oct, 2016
+KernelVersion: v4.10
+Contact:       linux-ide@vger.kernel.org
+Description:
+               (RW) This is the equivalent of the ncq_prio_enable attribute
+               file for SATA devices connected to a SAS host-bus-adapter
+               (HBA) implementing support for the SATA NCQ priority feature.
+               This file does not exist if the HBA driver does not implement
+               support for the SATA NCQ priority feature, regardless of the
+               device support for this feature (see sas_ncq_prio_supported).
+
+
+What:          /sys/block/*/device/ncq_prio_supported
+Date:          Aug, 2021
+KernelVersion: v5.15
+Contact:       linux-ide@vger.kernel.org
+Description:
+               (RO) Indicates if the device supports the SATA NCQ (native
+               command queueing) priority feature.
+
+
+What:          /sys/block/*/device/sas_ncq_prio_supported
+Date:          Aug, 2021
+KernelVersion: v5.15
+Contact:       linux-ide@vger.kernel.org
+Description:
+               (RO) This is the equivalent of the ncq_prio_supported attribute
+               file for SATA devices connected to a SAS host-bus-adapter
+               (HBA) implementing support for the SATA NCQ priority feature.
+               This file does not exist if the HBA driver does not implement
+               support for the SATA NCQ priority feature, regardless of the
+               device support for this feature.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-uncore b/Documentation/ABI/testing/sysfs-bus-event_source-devices-uncore
new file mode 100644 (file)
index 0000000..b56e8f0
--- /dev/null
@@ -0,0 +1,13 @@
+What:          /sys/bus/event_source/devices/uncore_*/alias
+Date:          June 2021
+KernelVersion: 5.15
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:   Read-only.  An attribute to describe the alias name of
+               the uncore PMU if an alias exists on some platforms.
+               The 'perf(1)' tool should treat both names the same.
+               They both can be used to access the uncore PMU.
+
+               Example:
+
+               $ cat /sys/devices/uncore_cha_2/alias
+               uncore_type_0_2
index 194ca70..ff30728 100644 (file)
@@ -28,3 +28,17 @@ Description:
                value comes from an ACPI _PXM method or a similar firmware
                source. Initial users for this file would be devices like
                arm smmu which are populated by arm64 acpi_iort.
+
+What:          /sys/bus/platform/devices/.../msi_irqs/
+Date:          August 2021
+Contact:       Barry Song <song.bao.hua@hisilicon.com>
+Description:
+               The /sys/devices/.../msi_irqs directory contains a variable set
+               of files, with each file being named after a corresponding msi
+               irq vector allocated to that device.
+
+What:          /sys/bus/platform/devices/.../msi_irqs/<N>
+Date:          August 2021
+Contact:       Barry Song <song.bao.hua@hisilicon.com>
+Description:
+               This attribute will show "msi" if <N> is a valid msi irq
index 1418343..53c6b10 100644 (file)
@@ -111,3 +111,43 @@ Contact:   linux-acpi@vger.kernel.org
 Description:
                (RW) The PCH FIVR (Fully Integrated Voltage Regulator) switching frequency in MHz,
                when FIVR clock is 38.4MHz.
+
+What:          /sys/bus/platform/devices/INTC1045:00/pch_fivr_switch_frequency/fivr_switching_freq_mhz
+Date:          September, 2021
+KernelVersion: v5.15
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Get the FIVR switching control frequency in MHz.
+
+What:          /sys/bus/platform/devices/INTC1045:00/pch_fivr_switch_frequency/fivr_switching_fault_status
+Date:          September, 2021
+KernelVersion: v5.15
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Read the FIVR switching frequency control fault status.
+
+What:          /sys/bus/platform/devices/INTC1045:00/pch_fivr_switch_frequency/ssc_clock_info
+Date:          September, 2021
+KernelVersion: v5.15
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Presents SSC (spread spectrum clock) information for EMI
+               (Electro magnetic interference) control. This is a bit mask.
+               Bits    Description
+               [7:0]   Sets clock spectrum spread percentage:
+                       0x00=0.2% , 0x3F=10%
+                       1 LSB = 0.1% increase in spread (for
+                       settings 0x01 thru 0x1C)
+                       1 LSB = 0.2% increase in spread (for
+                       settings 0x1E thru 0x3F)
+               [8]     When set to 1, enables spread
+                       spectrum clock
+               [9]     0: Triangle mode. FFC frequency
+                       walks around the Fcenter in a linear
+                       fashion
+                       1: Random walk mode. FFC frequency
+                       changes randomly within the SSC
+                       (Spread spectrum clock) range
+               [10]    0: No white noise. 1: Add white noise
+                       to spread waveform
+               [11]    When 1, future writes are ignored.
index dae9c89..baf1d12 100644 (file)
@@ -26,3 +26,10 @@ Contact:     Hans de Goede <hdegoede@redhat.com>
 Description:   Reading this file gives the current selected profile for this
                device. Writing this file with one of the strings from
                platform_profile_choices changes the profile to the new value.
+
+               This file can be monitored for changes by polling for POLLPRI,
+               POLLPRI will be signalled on any changes, independent of those
+               changes coming from a userspace write; or coming from another
+               source such as e.g. a hotkey triggered profile change handled
+               either directly by the embedded-controller or fully handled
+               inside the kernel.
index 11cdab0..eeb3512 100644 (file)
@@ -112,6 +112,35 @@ on PowerPC.
 The ``smp_mb__after_unlock_lock()`` invocations prevent this
 ``WARN_ON()`` from triggering.
 
++-----------------------------------------------------------------------+
+| **Quick Quiz**:                                                       |
++-----------------------------------------------------------------------+
+| But the chain of rcu_node-structure lock acquisitions guarantees      |
+| that new readers will see all of the updater's pre-grace-period       |
+| accesses and also guarantees that the updater's post-grace-period     |
+| accesses will see all of the old reader's accesses.  So why do we     |
+| need all of those calls to smp_mb__after_unlock_lock()?               |
++-----------------------------------------------------------------------+
+| **Answer**:                                                           |
++-----------------------------------------------------------------------+
+| Because we must provide ordering for RCU's polling grace-period       |
+| primitives, for example, get_state_synchronize_rcu() and              |
+| poll_state_synchronize_rcu().  Consider this code::                   |
+|                                                                       |
+|  CPU 0                                     CPU 1                      |
+|  ----                                      ----                       |
+|  WRITE_ONCE(X, 1)                          WRITE_ONCE(Y, 1)           |
+|  g = get_state_synchronize_rcu()           smp_mb()                   |
+|  while (!poll_state_synchronize_rcu(g))    r1 = READ_ONCE(X)          |
+|          continue;                                                    |
+|  r0 = READ_ONCE(Y)                                                    |
+|                                                                       |
+| RCU guarantees that the outcome r0 == 0 && r1 == 0 will not           |
+| happen, even if CPU 1 is in an RCU extended quiescent state           |
+| (idle or offline) and thus won't interact directly with the RCU       |
+| core processing at all.                                               |
++-----------------------------------------------------------------------+
+
 This approach must be extended to include idle CPUs, which need
 RCU's grace-period memory ordering guarantee to extend to any
 RCU read-side critical sections preceding and following the current
index 38a3947..45278e2 100644 (file)
@@ -362,9 +362,8 @@ do_something_gp() uses rcu_dereference() to fetch from ``gp``:
       12 }
 
 The rcu_dereference() uses volatile casts and (for DEC Alpha) memory
-barriers in the Linux kernel. Should a `high-quality implementation of
-C11 ``memory_order_consume``
-[PDF] <http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf>`__
+barriers in the Linux kernel. Should a |high-quality implementation of
+C11 memory_order_consume [PDF]|_
 ever appear, then rcu_dereference() could be implemented as a
 ``memory_order_consume`` load. Regardless of the exact implementation, a
 pointer fetched by rcu_dereference() may not be used outside of the
@@ -374,6 +373,9 @@ element has been passed from RCU to some other synchronization
 mechanism, most commonly locking or `reference
 counting <https://www.kernel.org/doc/Documentation/RCU/rcuref.txt>`__.
 
+.. |high-quality implementation of C11 memory_order_consume [PDF]| replace:: high-quality implementation of C11 ``memory_order_consume`` [PDF]
+.. _high-quality implementation of C11 memory_order_consume [PDF]: http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf
+
 In short, updaters use rcu_assign_pointer() and readers use
 rcu_dereference(), and these two RCU API elements work together to
 ensure that readers have a consistent view of newly added data elements.
index 01cc21f..f4545b7 100644 (file)
@@ -37,7 +37,7 @@ over a rather long period of time, but improvements are always welcome!
 
 1.     Does the update code have proper mutual exclusion?
 
-       RCU does allow -readers- to run (almost) naked, but -writers- must
+       RCU does allow *readers* to run (almost) naked, but *writers* must
        still use some sort of mutual exclusion, such as:
 
        a.      locking,
@@ -73,7 +73,7 @@ over a rather long period of time, but improvements are always welcome!
        critical section is every bit as bad as letting them leak out
        from under a lock.  Unless, of course, you have arranged some
        other means of protection, such as a lock or a reference count
-       -before- letting them out of the RCU read-side critical section.
+       *before* letting them out of the RCU read-side critical section.
 
 3.     Does the update code tolerate concurrent accesses?
 
@@ -101,7 +101,7 @@ over a rather long period of time, but improvements are always welcome!
        c.      Make updates appear atomic to readers.  For example,
                pointer updates to properly aligned fields will
                appear atomic, as will individual atomic primitives.
-               Sequences of operations performed under a lock will -not-
+               Sequences of operations performed under a lock will *not*
                appear to be atomic to RCU readers, nor will sequences
                of multiple atomic primitives.
 
@@ -333,7 +333,7 @@ over a rather long period of time, but improvements are always welcome!
        for example) may be omitted.
 
 10.    Conversely, if you are in an RCU read-side critical section,
-       and you don't hold the appropriate update-side lock, you -must-
+       and you don't hold the appropriate update-side lock, you *must*
        use the "_rcu()" variants of the list macros.  Failing to do so
        will break Alpha, cause aggressive compilers to generate bad code,
        and confuse people trying to read your code.
@@ -359,12 +359,12 @@ over a rather long period of time, but improvements are always welcome!
        callback pending, then that RCU callback will execute on some
        surviving CPU.  (If this was not the case, a self-spawning RCU
        callback would prevent the victim CPU from ever going offline.)
-       Furthermore, CPUs designated by rcu_nocbs= might well -always-
+       Furthermore, CPUs designated by rcu_nocbs= might well *always*
        have their RCU callbacks executed on some other CPUs, in fact,
        for some  real-time workloads, this is the whole point of using
        the rcu_nocbs= kernel boot parameter.
 
-13.    Unlike other forms of RCU, it -is- permissible to block in an
+13.    Unlike other forms of RCU, it *is* permissible to block in an
        SRCU read-side critical section (demarked by srcu_read_lock()
        and srcu_read_unlock()), hence the "SRCU": "sleepable RCU".
        Please note that if you don't need to sleep in read-side critical
@@ -411,16 +411,16 @@ over a rather long period of time, but improvements are always welcome!
 14.    The whole point of call_rcu(), synchronize_rcu(), and friends
        is to wait until all pre-existing readers have finished before
        carrying out some otherwise-destructive operation.  It is
-       therefore critically important to -first- remove any path
+       therefore critically important to *first* remove any path
        that readers can follow that could be affected by the
-       destructive operation, and -only- -then- invoke call_rcu(),
+       destructive operation, and *only then* invoke call_rcu(),
        synchronize_rcu(), or friends.
 
        Because these primitives only wait for pre-existing readers, it
        is the caller's responsibility to guarantee that any subsequent
        readers will execute safely.
 
-15.    The various RCU read-side primitives do -not- necessarily contain
+15.    The various RCU read-side primitives do *not* necessarily contain
        memory barriers.  You should therefore plan for the CPU
        and the compiler to freely reorder code into and out of RCU
        read-side critical sections.  It is the responsibility of the
@@ -459,8 +459,8 @@ over a rather long period of time, but improvements are always welcome!
        pass in a function defined within a loadable module, then it in
        necessary to wait for all pending callbacks to be invoked after
        the last invocation and before unloading that module.  Note that
-       it is absolutely -not- sufficient to wait for a grace period!
-       The current (say) synchronize_rcu() implementation is -not-
+       it is absolutely *not* sufficient to wait for a grace period!
+       The current (say) synchronize_rcu() implementation is *not*
        guaranteed to wait for callbacks registered on other CPUs.
        Or even on the current CPU if that CPU recently went offline
        and came back online.
@@ -470,7 +470,7 @@ over a rather long period of time, but improvements are always welcome!
        -       call_rcu() -> rcu_barrier()
        -       call_srcu() -> srcu_barrier()
 
-       However, these barrier functions are absolutely -not- guaranteed
+       However, these barrier functions are absolutely *not* guaranteed
        to wait for a grace period.  In fact, if there are no call_rcu()
        callbacks waiting anywhere in the system, rcu_barrier() is within
        its rights to return immediately.
index f3e587a..0b418a5 100644 (file)
@@ -43,7 +43,7 @@ Follow these rules to keep your RCU code working properly:
        -       Set bits and clear bits down in the must-be-zero low-order
                bits of that pointer.  This clearly means that the pointer
                must have alignment constraints, for example, this does
-               -not- work in general for char* pointers.
+               *not* work in general for char* pointers.
 
        -       XOR bits to translate pointers, as is done in some
                classic buddy-allocator algorithms.
@@ -174,7 +174,7 @@ Follow these rules to keep your RCU code working properly:
                Please see the "CONTROL DEPENDENCIES" section of
                Documentation/memory-barriers.txt for more details.
 
-       -       The pointers are not equal -and- the compiler does
+       -       The pointers are not equal *and* the compiler does
                not have enough information to deduce the value of the
                pointer.  Note that the volatile cast in rcu_dereference()
                will normally prevent the compiler from knowing too much.
@@ -360,7 +360,7 @@ in turn destroying the ordering between this load and the loads of the
 return values.  This can result in "p->b" returning pre-initialization
 garbage values.
 
-In short, rcu_dereference() is -not- optional when you are going to
+In short, rcu_dereference() is *not* optional when you are going to
 dereference the resulting pointer.
 
 
index 7148e9b..5036df2 100644 (file)
@@ -32,7 +32,7 @@ warnings:
 
 -      Booting Linux using a console connection that is too slow to
        keep up with the boot-time console-message rate.  For example,
-       a 115Kbaud serial console can be -way- too slow to keep up
+       a 115Kbaud serial console can be *way* too slow to keep up
        with boot-time message rates, and will frequently result in
        RCU CPU stall warning messages.  Especially if you have added
        debug printk()s.
@@ -105,7 +105,7 @@ warnings:
        leading the realization that the CPU had failed.
 
 The RCU, RCU-sched, and RCU-tasks implementations have CPU stall warning.
-Note that SRCU does -not- have CPU stall warnings.  Please note that
+Note that SRCU does *not* have CPU stall warnings.  Please note that
 RCU only detects CPU stalls when there is a grace period in progress.
 No grace period, no CPU stall warnings.
 
@@ -145,7 +145,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
        this parameter is checked only at the beginning of a cycle.
        So if you are 10 seconds into a 40-second stall, setting this
        sysfs parameter to (say) five will shorten the timeout for the
-       -next- stall, or the following warning for the current stall
+       *next* stall, or the following warning for the current stall
        (assuming the stall lasts long enough).  It will not affect the
        timing of the next warning for the current stall.
 
@@ -189,8 +189,8 @@ rcupdate.rcu_task_stall_timeout
 Interpreting RCU's CPU Stall-Detector "Splats"
 ==============================================
 
-For non-RCU-tasks flavors of RCU, when a CPU detects that it is stalling,
-it will print a message similar to the following::
+For non-RCU-tasks flavors of RCU, when a CPU detects that some other
+CPU is stalling, it will print a message similar to the following::
 
        INFO: rcu_sched detected stalls on CPUs/tasks:
        2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
@@ -202,8 +202,10 @@ causing stalls, and that the stall was affecting RCU-sched.  This message
 will normally be followed by stack dumps for each CPU.  Please note that
 PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that
 the tasks will be indicated by PID, for example, "P3421".  It is even
-possible for an rcu_state stall to be caused by both CPUs -and- tasks,
+possible for an rcu_state stall to be caused by both CPUs *and* tasks,
 in which case the offending CPUs and tasks will all be called out in the list.
+In some cases, CPUs will detect themselves stalling, which will result
+in a self-detected stall.
 
 CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with
 the RCU core for the past three grace periods.  In contrast, CPU 16's "(0
@@ -224,7 +226,7 @@ is the number that had executed since boot at the time that this CPU
 last noted the beginning of a grace period, which might be the current
 (stalled) grace period, or it might be some earlier grace period (for
 example, if the CPU might have been in dyntick-idle mode for an extended
-time period.  The number after the "/" is the number that have executed
+time period).  The number after the "/" is the number that have executed
 since boot until the current time.  If this latter number stays constant
 across repeated stall-warning messages, it is possible that RCU's softirq
 handlers are no longer able to execute on this CPU.  This can happen if
@@ -283,7 +285,8 @@ If the relevant grace-period kthread has been unable to run prior to
 the stall warning, as was the case in the "All QSes seen" line above,
 the following additional line is printed::
 
-       kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
+       rcu_sched kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
+       Unless rcu_sched kthread gets sufficient CPU time, OOM is now expected behavior.
 
 Starving the grace-period kthreads of CPU time can of course result
 in RCU CPU stall warnings even when all CPUs and tasks have passed
@@ -313,15 +316,21 @@ is the current ``TIMER_SOFTIRQ`` count on cpu 4.  If this value does not
 change on successive RCU CPU stall warnings, there is further reason to
 suspect a timer problem.
 
+These messages are usually followed by stack dumps of the CPUs and tasks
+involved in the stall.  These stack traces can help you locate the cause
+of the stall, keeping in mind that the CPU detecting the stall will have
+an interrupt frame that is mainly devoted to detecting the stall.
+
 
 Multiple Warnings From One Stall
 ================================
 
-If a stall lasts long enough, multiple stall-warning messages will be
-printed for it.  The second and subsequent messages are printed at
+If a stall lasts long enough, multiple stall-warning messages will
+be printed for it.  The second and subsequent messages are printed at
 longer intervals, so that the time between (say) the first and second
 message will be about three times the interval between the beginning
-of the stall and the first message.
+of the stall and the first message.  It can be helpful to compare the
+stack dumps for the different messages for the same stalled grace period.
 
 
 Stall Warnings for Expedited Grace Periods
index f12cda5..8cbc711 100644 (file)
@@ -16,3 +16,4 @@ are configurable at compile, boot or run time.
    multihit.rst
    special-register-buffer-data-sampling.rst
    core-scheduling.rst
+   l1d_flush.rst
diff --git a/Documentation/admin-guide/hw-vuln/l1d_flush.rst b/Documentation/admin-guide/hw-vuln/l1d_flush.rst
new file mode 100644 (file)
index 0000000..210020b
--- /dev/null
@@ -0,0 +1,69 @@
+L1D Flushing
+============
+
+With an increasing number of vulnerabilities being reported around data
+leaks from the Level 1 Data cache (L1D) the kernel provides an opt-in
+mechanism to flush the L1D cache on context switch.
+
+This mechanism can be used to address e.g. CVE-2020-0550. For applications
+the mechanism keeps them safe from vulnerabilities, related to leaks
+(snooping of) from the L1D cache.
+
+
+Related CVEs
+------------
+The following CVEs can be addressed by this
+mechanism
+
+    =============       ========================     ==================
+    CVE-2020-0550       Improper Data Forwarding     OS related aspects
+    =============       ========================     ==================
+
+Usage Guidelines
+----------------
+
+Please see document: :ref:`Documentation/userspace-api/spec_ctrl.rst
+<set_spec_ctrl>` for details.
+
+**NOTE**: The feature is disabled by default, applications need to
+specifically opt into the feature to enable it.
+
+Mitigation
+----------
+
+When PR_SET_L1D_FLUSH is enabled for a task a flush of the L1D cache is
+performed when the task is scheduled out and the incoming task belongs to a
+different process and therefore to a different address space.
+
+If the underlying CPU supports L1D flushing in hardware, the hardware
+mechanism is used, software fallback for the mitigation, is not supported.
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the L1D flush mitigations at boot
+time with the option "l1d_flush=". The valid arguments for this option are:
+
+  ============  =============================================================
+  on            Enables the prctl interface, applications trying to use
+                the prctl() will fail with an error if l1d_flush is not
+                enabled
+  ============  =============================================================
+
+By default the mechanism is disabled.
+
+Limitations
+-----------
+
+The mechanism does not mitigate L1D data leaks between tasks belonging to
+different processes which are concurrently executing on sibling threads of
+a physical CPU core when SMT is enabled on the system.
+
+This can be addressed by controlled placement of processes on physical CPU
+cores or by disabling SMT. See the relevant chapter in the L1TF mitigation
+document: :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst <smt_control>`.
+
+**NOTE** : The opt-in of a task for L1D flushing works only when the task's
+affinity is limited to cores running in non-SMT mode. If a task which
+requested L1D flushing is scheduled on a SMT-enabled core the kernel sends
+a SIGBUS to the task.
index bdb2200..56bd70e 100644 (file)
                        feature (tagged TLBs) on capable Intel chips.
                        Default is 1 (enabled)
 
+       l1d_flush=      [X86,INTEL]
+                       Control mitigation for L1D based snooping vulnerability.
+
+                       Certain CPUs are vulnerable to an exploit against CPU
+                       internal buffers which can forward information to a
+                       disclosure gadget under certain conditions.
+
+                       In vulnerable processors, the speculatively
+                       forwarded data can be used in a cache side channel
+                       attack, to access data to which the attacker does
+                       not have direct access.
+
+                       This parameter controls the mitigation. The
+                       options are:
+
+                       on         - enable the interface for the mitigation
+
        l1tf=           [X86] Control mitigation of the L1TF vulnerability on
                              affected CPUs
 
 
        reboot=         [KNL]
                        Format (x86 or x86_64):
-                               [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
+                               [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] | d[efault] \
                                [[,]s[mp]#### \
                                [[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \
                                [[,]f[orce]
index 0f1fded..0f1ffa0 100644 (file)
@@ -271,3 +271,97 @@ WRITE_ONCE.  Thus:
                        SC *y, t;
 
 is allowed.
+
+
+CMPXCHG vs TRY_CMPXCHG
+----------------------
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new);
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new);
+
+Both provide the same functionality, but try_cmpxchg() can lead to more
+compact code. The functions relate like:
+
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new)
+  {
+    int ret, old = *oldp;
+    ret = atomic_cmpxchg(ptr, old, new);
+    if (ret != old)
+      *oldp = ret;
+    return ret == old;
+  }
+
+and:
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+  {
+    (void)atomic_try_cmpxchg(ptr, &old, new);
+    return old;
+  }
+
+Usage:
+
+  old = atomic_read(&v);                       old = atomic_read(&v);
+  for (;;) {                                   do {
+    new = func(old);                             new = func(old);
+    tmp = atomic_cmpxchg(&v, old, new);                } while (!atomic_try_cmpxchg(&v, &old, new));
+    if (tmp == old)
+      break;
+    old = tmp;
+  }
+
+NB. try_cmpxchg() also generates better code on some platforms (notably x86)
+where the function more closely matches the hardware instruction.
+
+
+FORWARD PROGRESS
+----------------
+
+In general strong forward progress is expected of all unconditional atomic
+operations -- those in the Arithmetic and Bitwise classes and xchg(). However
+a fair amount of code also requires forward progress from the conditional
+atomic operations.
+
+Specifically 'simple' cmpxchg() loops are expected to not starve one another
+indefinitely. However, this is not evident on LL/SC architectures, because
+while an LL/SC architecure 'can/should/must' provide forward progress
+guarantees between competing LL/SC sections, such a guarantee does not
+transfer to cmpxchg() implemented using LL/SC. Consider:
+
+  old = atomic_read(&v);
+  do {
+    new = func(old);
+  } while (!atomic_try_cmpxchg(&v, &old, new));
+
+which on LL/SC becomes something like:
+
+  old = atomic_read(&v);
+  do {
+    new = func(old);
+  } while (!({
+    volatile asm ("1: LL  %[oldval], %[v]\n"
+                  "   CMP %[oldval], %[old]\n"
+                  "   BNE 2f\n"
+                  "   SC  %[new], %[v]\n"
+                  "   BNE 1b\n"
+                  "2:\n"
+                  : [oldval] "=&r" (oldval), [v] "m" (v)
+                 : [old] "r" (old), [new] "r" (new)
+                  : "memory");
+    success = (oldval == old);
+    if (!success)
+      old = oldval;
+    success; }));
+
+However, even the forward branch from the failed compare can cause the LL/SC
+to fail on some architectures, let alone whatever the compiler makes of the C
+loop body. As a result there is no guarantee what so ever the cacheline
+containing @v will stay on the local CPU and progress is made.
+
+Even native CAS architectures can fail to provide forward progress for their
+primitive (See Sparc64 for an example).
+
+Such implementations are strongly encouraged to add exponential backoff loops
+to a failed CAS in order to ensure some progress. Affected architectures are
+also strongly encouraged to inspect/audit the atomic fallbacks, refcount_t and
+their locking primitives.
index 3de1d51..6bf9c5a 100644 (file)
@@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle.
 
 For example, if current state of ``libbpf.map`` is:
 
-.. code-block:: c
+.. code-block:: none
 
         LIBBPF_0.0.1 {
                global:
@@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is:
 , and a new symbol ``bpf_func_c`` is being introduced, then
 ``libbpf.map`` should be changed like this:
 
-.. code-block:: c
+.. code-block:: none
 
         LIBBPF_0.0.1 {
                global:
index a2c96be..1122cd3 100644 (file)
@@ -220,7 +220,7 @@ goes online (offline) and during initial setup (shutdown) of the driver. However
 each registration and removal function is also available with a ``_nocalls``
 suffix which does not invoke the provided callbacks if the invocation of the
 callbacks is not desired. During the manual setup (or teardown) the functions
-``get_online_cpus()`` and ``put_online_cpus()`` should be used to inhibit CPU
+``cpus_read_lock()`` and ``cpus_read_unlock()`` should be used to inhibit CPU
 hotplug operations.
 
 
index 53283b3..6979b4a 100644 (file)
@@ -55,8 +55,24 @@ exist then it will allocate a new Linux irq_desc, associate it with
 the hwirq, and call the .map() callback so the driver can perform any
 required hardware setup.
 
-When an interrupt is received, irq_find_mapping() function should
-be used to find the Linux IRQ number from the hwirq number.
+Once a mapping has been established, it can be retrieved or used via a
+variety of methods:
+
+- irq_resolve_mapping() returns a pointer to the irq_desc structure
+  for a given domain and hwirq number, and NULL if there was no
+  mapping.
+- irq_find_mapping() returns a Linux IRQ number for a given domain and
+  hwirq number, and 0 if there was no mapping
+- irq_linear_revmap() is now identical to irq_find_mapping(), and is
+  deprecated
+- generic_handle_domain_irq() handles an interrupt described by a
+  domain and a hwirq number
+- handle_domain_irq() does the same thing for root interrupt
+  controllers and deals with the set_irq_reg()/irq_enter() sequences
+  that most architecture requires
+
+Note that irq domain lookups must happen in contexts that are
+compatible with a RCU read-side critical section.
 
 The irq_create_mapping() function must be called *atleast once*
 before any call to irq_find_mapping(), lest the descriptor will not
@@ -137,7 +153,9 @@ required.  Calling irq_create_direct_mapping() will allocate a Linux
 IRQ number and call the .map() callback so that driver can program the
 Linux IRQ number into the hardware.
 
-Most drivers cannot use this mapping.
+Most drivers cannot use this mapping, and it is now gated on the
+CONFIG_IRQ_DOMAIN_NOMAP option. Please refrain from introducing new
+users of this API.
 
 Legacy
 ------
@@ -157,6 +175,10 @@ for IRQ numbers that are passed to struct device registrations.  In that
 case the Linux IRQ numbers cannot be dynamically assigned and the legacy
 mapping should be used.
 
+As the name implies, the *_legacy() functions are deprecated and only
+exist to ease the support of ancient platforms. No new users should be
+added.
+
 The legacy map assumes a contiguous range of IRQ numbers has already
 been allocated for the controller and that the IRQ number can be
 calculated by adding a fixed offset to the hwirq number, and
index e425278..e2ca0b0 100644 (file)
@@ -19,7 +19,6 @@ properties:
   compatible:
     enum:
       - ibm,fsi2spi
-      - ibm,fsi2spi-restricted
 
   reg:
     items:
index d993e00..0d62c28 100644 (file)
@@ -22,7 +22,10 @@ properties:
     maxItems: 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    items:
+      - description: APB interface clock source
+      - description: GPIO debounce reference clock source
 
   gpio-controller: true
 
index b2a1e42..71de563 100644 (file)
@@ -152,47 +152,6 @@ allOf:
           maxItems: 1
         st,drdy-int-pin: false
 
-  - if:
-      properties:
-        compatible:
-          enum:
-            # Two intertial interrupts i.e. accelerometer/gyro interrupts
-            - st,h3lis331dl-accel
-            - st,l3g4200d-gyro
-            - st,l3g4is-gyro
-            - st,l3gd20-gyro
-            - st,l3gd20h-gyro
-            - st,lis2de12
-            - st,lis2dw12
-            - st,lis2hh12
-            - st,lis2dh12-accel
-            - st,lis331dl-accel
-            - st,lis331dlh-accel
-            - st,lis3de
-            - st,lis3dh-accel
-            - st,lis3dhh
-            - st,lis3mdl-magn
-            - st,lng2dm-accel
-            - st,lps331ap-press
-            - st,lsm303agr-accel
-            - st,lsm303dlh-accel
-            - st,lsm303dlhc-accel
-            - st,lsm303dlm-accel
-            - st,lsm330-accel
-            - st,lsm330-gyro
-            - st,lsm330d-accel
-            - st,lsm330d-gyro
-            - st,lsm330dl-accel
-            - st,lsm330dl-gyro
-            - st,lsm330dlc-accel
-            - st,lsm330dlc-gyro
-            - st,lsm9ds0-gyro
-            - st,lsm9ds1-magn
-    then:
-      properties:
-        interrupts:
-          maxItems: 2
-
 required:
   - compatible
   - reg
index b5baf43..a3412f2 100644 (file)
@@ -29,6 +29,7 @@ properties:
           - fsl,imx53-esdhc
           - fsl,imx6q-usdhc
           - fsl,imx6sl-usdhc
+          - fsl,imx6sll-usdhc
           - fsl,imx6sx-usdhc
           - fsl,imx6ull-usdhc
           - fsl,imx7d-usdhc
@@ -115,12 +116,17 @@ properties:
       - const: per
 
   pinctrl-names:
-    minItems: 1
-    items:
-      - const: default
-      - const: state_100mhz
-      - const: state_200mhz
-      - const: sleep
+    oneOf:
+      - minItems: 3
+        items:
+          - const: default
+          - const: state_100mhz
+          - const: state_200mhz
+          - const: sleep
+      - minItems: 1
+        items:
+          - const: default
+          - const: sleep
 
 required:
   - compatible
index e0169a2..9e23967 100644 (file)
@@ -11,7 +11,9 @@ maintainers:
 
 properties:
   compatible:
-    const: mmc-pwrseq-sd8787
+    enum:
+      - mmc-pwrseq-sd8787
+      - mmc-pwrseq-wilc1000
 
   powerdown-gpios:
     minItems: 1
index 677989b..9f1e709 100644 (file)
@@ -9,9 +9,6 @@ title: Renesas SDHI SD/MMC controller
 maintainers:
   - Wolfram Sang <wsa+renesas@sang-engineering.com>
 
-allOf:
-  - $ref: "mmc-controller.yaml"
-
 properties:
   compatible:
     oneOf:
@@ -47,19 +44,20 @@ properties:
           - const: renesas,sdhi-mmc-r8a77470 # RZ/G1C (SDHI/MMC IP)
       - items:
           - enum:
-              - renesas,sdhi-r8a774a1 # RZ/G2M
-              - renesas,sdhi-r8a774b1 # RZ/G2N
-              - renesas,sdhi-r8a774c0 # RZ/G2E
-              - renesas,sdhi-r8a774e1 # RZ/G2H
-              - renesas,sdhi-r8a7795  # R-Car H3
-              - renesas,sdhi-r8a7796  # R-Car M3-W
-              - renesas,sdhi-r8a77961 # R-Car M3-W+
-              - renesas,sdhi-r8a77965 # R-Car M3-N
-              - renesas,sdhi-r8a77970 # R-Car V3M
-              - renesas,sdhi-r8a77980 # R-Car V3H
-              - renesas,sdhi-r8a77990 # R-Car E3
-              - renesas,sdhi-r8a77995 # R-Car D3
-              - renesas,sdhi-r8a779a0 # R-Car V3U
+              - renesas,sdhi-r8a774a1  # RZ/G2M
+              - renesas,sdhi-r8a774b1  # RZ/G2N
+              - renesas,sdhi-r8a774c0  # RZ/G2E
+              - renesas,sdhi-r8a774e1  # RZ/G2H
+              - renesas,sdhi-r8a7795   # R-Car H3
+              - renesas,sdhi-r8a7796   # R-Car M3-W
+              - renesas,sdhi-r8a77961  # R-Car M3-W+
+              - renesas,sdhi-r8a77965  # R-Car M3-N
+              - renesas,sdhi-r8a77970  # R-Car V3M
+              - renesas,sdhi-r8a77980  # R-Car V3H
+              - renesas,sdhi-r8a77990  # R-Car E3
+              - renesas,sdhi-r8a77995  # R-Car D3
+              - renesas,sdhi-r8a779a0  # R-Car V3U
+              - renesas,sdhi-r9a07g044 # RZ/G2{L,LC}
           - const: renesas,rcar-gen3-sdhi # R-Car Gen3 or RZ/G2
 
   reg:
@@ -69,15 +67,9 @@ properties:
     minItems: 1
     maxItems: 3
 
-  clocks:
-    minItems: 1
-    maxItems: 2
+  clocks: true
 
-  clock-names:
-    minItems: 1
-    items:
-      - const: core
-      - const: cd
+  clock-names: true
 
   dmas:
     minItems: 4
@@ -104,14 +96,82 @@ properties:
   pinctrl-1:
     maxItems: 1
 
-  pinctrl-names:
-    minItems: 1
-    items:
-      - const: default
-      - const: state_uhs
+  pinctrl-names: true
 
   max-frequency: true
 
+allOf:
+  - $ref: "mmc-controller.yaml"
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: renesas,sdhi-r9a07g044
+    then:
+      properties:
+        clocks:
+          items:
+            - description: IMCLK, SDHI channel main clock1.
+            - description: IMCLK2, SDHI channel main clock2. When this clock is
+                           turned off, external SD card detection cannot be
+                           detected.
+            - description: CLK_HS, SDHI channel High speed clock which operates
+                           4 times that of SDHI channel main clock1.
+            - description: ACLK, SDHI channel bus clock.
+        clock-names:
+          items:
+            - const: imclk
+            - const: imclk2
+            - const: clk_hs
+            - const: aclk
+      required:
+        - clock-names
+        - resets
+    else:
+      properties:
+        clocks:
+          minItems: 1
+          maxItems: 2
+        clock-names:
+          minItems: 1
+          items:
+            - const: core
+            - const: cd
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: renesas,sdhi-mmc-r8a77470
+    then:
+      properties:
+        pinctrl-names:
+          items:
+            - const: state_uhs
+    else:
+      properties:
+        pinctrl-names:
+          minItems: 1
+          items:
+            - const: default
+            - const: state_uhs
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - renesas,sdhi-r7s72100
+              - renesas,sdhi-r7s9210
+    then:
+      required:
+        - clock-names
+      description:
+        The internal card detection logic that exists in these controllers is
+        sectioned off to be run by a separate second clock source to allow
+        the main core clock to be turned off to save power.
+
 required:
   - compatible
   - reg
@@ -119,21 +179,6 @@ required:
   - clocks
   - power-domains
 
-if:
-  properties:
-    compatible:
-      contains:
-        enum:
-          - renesas,sdhi-r7s72100
-          - renesas,sdhi-r7s9210
-then:
-  required:
-    - clock-names
-  description:
-    The internal card detection logic that exists in these controllers is
-    sectioned off to be run by a separate second clock source to allow
-    the main core clock to be turned off to save power.
-
 unevaluatedProperties: false
 
 examples:
index 4c7fa6a..365c3fc 100644 (file)
@@ -19,6 +19,7 @@ Required properties:
                "qcom,msm8996-sdhci", "qcom,sdhci-msm-v4"
                "qcom,qcs404-sdhci", "qcom,sdhci-msm-v5"
                "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
+               "qcom,sc7280-sdhci", "qcom,sdhci-msm-v5";
                "qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"
                "qcom,sdx55-sdhci", "qcom,sdhci-msm-v5";
                "qcom,sm8250-sdhci", "qcom,sdhci-msm-v5"
index c3b4b75..d56ac48 100644 (file)
@@ -31,6 +31,20 @@ properties:
   compatible:
     const: simple-battery
 
+  device-chemistry:
+    description: This describes the chemical technology of the battery.
+    oneOf:
+      - const: nickel-cadmium
+      - const: nickel-metal-hydride
+      - const: lithium-ion
+        description: This is a blanket type for all lithium-ion batteries,
+          including those below. If possible, a precise compatible string
+          from below should be used, but sometimes it is unknown which specific
+          lithium ion battery is employed and this wide compatible can be used.
+      - const: lithium-ion-polymer
+      - const: lithium-ion-iron-phosphate
+      - const: lithium-ion-manganese-oxide
+
   over-voltage-threshold-microvolt:
     description: battery over-voltage limit
 
index c70f05e..971b53c 100644 (file)
@@ -19,12 +19,15 @@ properties:
       - maxim,max17047
       - maxim,max17050
       - maxim,max17055
+      - maxim,max77849-battery
 
   reg:
     maxItems: 1
 
   interrupts:
     maxItems: 1
+    description: |
+      The ALRT pin, an open-drain interrupt.
 
   maxim,rsns-microohm:
     $ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml b/Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml
new file mode 100644 (file)
index 0000000..b89b15a
--- /dev/null
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/mt6360_charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Battery charger driver for MT6360 PMIC from MediaTek Integrated.
+
+maintainers:
+  - Gene Chen <gene_chen@richtek.com>
+
+description: |
+  This module is part of the MT6360 MFD device.
+  Provides Battery Charger, Boost for OTG devices and BC1.2 detection.
+
+properties:
+  compatible:
+    const: mediatek,mt6360-chg
+
+  richtek,vinovp-microvolt:
+    description: Maximum CHGIN regulation voltage in uV.
+    enum: [ 5500000, 6500000, 11000000, 14500000 ]
+
+
+  usb-otg-vbus-regulator:
+    type: object
+    description: OTG boost regulator.
+    $ref: /schemas/regulator/regulator.yaml#
+
+required:
+  - compatible
+
+additionalProperties: false
+
+examples:
+  - |
+    mt6360_charger: charger {
+      compatible = "mediatek,mt6360-chg";
+      richtek,vinovp-microvolt = <14500000>;
+
+      otg_vbus_regulator: usb-otg-vbus-regulator {
+        regulator-compatible = "usb-otg-vbus";
+        regulator-name = "usb-otg-vbus";
+        regulator-min-microvolt = <4425000>;
+        regulator-max-microvolt = <5825000>;
+      };
+    };
+...
index 983fc21..20862cd 100644 (file)
@@ -73,6 +73,26 @@ properties:
       - 1 # SMB3XX_SOFT_TEMP_COMPENSATE_CURRENT Current compensation
       - 2 # SMB3XX_SOFT_TEMP_COMPENSATE_VOLTAGE Voltage compensation
 
+  summit,inok-polarity:
+    description: |
+      Polarity of INOK signal indicating presence of external power supply.
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum:
+      - 0 # SMB3XX_SYSOK_INOK_ACTIVE_LOW
+      - 1 # SMB3XX_SYSOK_INOK_ACTIVE_HIGH
+
+  usb-vbus:
+    $ref: "../../regulator/regulator.yaml#"
+    type: object
+
+    properties:
+      summit,needs-inok-toggle:
+        type: boolean
+        description: INOK signal is fixed and polarity needs to be toggled
+                     in order to enable/disable output mode.
+
+    unevaluatedProperties: false
+
 allOf:
   - if:
       properties:
@@ -134,6 +154,7 @@ examples:
             reg = <0x7f>;
 
             summit,enable-charge-control = <SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH>;
+            summit,inok-polarity = <SMB3XX_SYSOK_INOK_ACTIVE_LOW>;
             summit,chip-temperature-threshold-celsius = <110>;
             summit,mains-current-limit-microamp = <2000000>;
             summit,usb-current-limit-microamp = <500000>;
@@ -141,6 +162,15 @@ examples:
             summit,enable-mains-charging;
 
             monitored-battery = <&battery>;
+
+            usb-vbus {
+                regulator-name = "usb_vbus";
+                regulator-min-microvolt = <5000000>;
+                regulator-max-microvolt = <5000000>;
+                regulator-min-microamp = <750000>;
+                regulator-max-microamp = <750000>;
+                summit,needs-inok-toggle;
+            };
         };
     };
 
index dcda666..de6a23a 100644 (file)
@@ -21,10 +21,13 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp202-ac-power-supply
-      - x-powers,axp221-ac-power-supply
-      - x-powers,axp813-ac-power-supply
+    oneOf:
+      - const: x-powers,axp202-ac-power-supply
+      - const: x-powers,axp221-ac-power-supply
+      - items:
+          - const: x-powers,axp803-ac-power-supply
+          - const: x-powers,axp813-ac-power-supply
+      - const: x-powers,axp813-ac-power-supply
 
 required:
   - compatible
index 86e8a71..d055428 100644 (file)
@@ -19,10 +19,14 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp209-battery-power-supply
-      - x-powers,axp221-battery-power-supply
-      - x-powers,axp813-battery-power-supply
+    oneOf:
+      - const: x-powers,axp202-battery-power-supply
+      - const: x-powers,axp209-battery-power-supply
+      - const: x-powers,axp221-battery-power-supply
+      - items:
+          - const: x-powers,axp803-battery-power-supply
+          - const: x-powers,axp813-battery-power-supply
+      - const: x-powers,axp813-battery-power-supply
 
 required:
   - compatible
index 61f1b32..0c371b5 100644 (file)
@@ -20,11 +20,15 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp202-usb-power-supply
-      - x-powers,axp221-usb-power-supply
-      - x-powers,axp223-usb-power-supply
-      - x-powers,axp813-usb-power-supply
+    oneOf:
+      - enum:
+          - x-powers,axp202-usb-power-supply
+          - x-powers,axp221-usb-power-supply
+          - x-powers,axp223-usb-power-supply
+          - x-powers,axp813-usb-power-supply
+      - items:
+          - const: x-powers,axp803-usb-power-supply
+          - const: x-powers,axp813-usb-power-supply
 
 
 required:
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml
new file mode 100644 (file)
index 0000000..3f47e8e
--- /dev/null
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rtq2134-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RTQ2134 SubPMIC Regulator
+
+maintainers:
+  - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+  The RTQ2134 is a multi-phase, programmable power management IC that
+  integrates with four high efficient, synchronous step-down converter cores.
+
+  Datasheet is available at
+  https://www.richtek.com/assets/product_file/RTQ2134-QA/DSQ2134-QA-01.pdf
+
+properties:
+  compatible:
+    enum:
+      - richtek,rtq2134
+
+  reg:
+    maxItems: 1
+
+  regulators:
+    type: object
+
+    patternProperties:
+      "^buck[1-3]$":
+        type: object
+        $ref: regulator.yaml#
+        description: |
+          regulator description for buck[1-3].
+
+        properties:
+          richtek,use-vsel-dvs:
+            type: boolean
+            description: |
+              If specified, buck will listen to 'vsel' pin for dvs config.
+              Else, use dvs0 voltage by default.
+
+          richtek,uv-shutdown:
+            type: boolean
+            description: |
+              If specified, use shutdown as UV action. Else, hiccup by default.
+
+        unevaluatedProperties: false
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - regulators
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      rtq2134@18 {
+        compatible = "richtek,rtq2134";
+        reg = <0x18>;
+
+        regulators {
+          buck1 {
+            regulator-name = "rtq2134-buck1";
+            regulator-min-microvolt = <300000>;
+            regulator-max-microvolt = <1850000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <550000>;
+              regulator-suspend-max-microvolt = <550000>;
+            };
+          };
+          buck2 {
+            regulator-name = "rtq2134-buck2";
+            regulator-min-microvolt = <1120000>;
+            regulator-max-microvolt = <1120000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <1120000>;
+              regulator-suspend-max-microvolt = <1120000>;
+            };
+          };
+          buck3 {
+            regulator-name = "rtq2134-buck3";
+            regulator-min-microvolt = <600000>;
+            regulator-max-microvolt = <600000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <600000>;
+              regulator-suspend-max-microvolt = <600000>;
+            };
+          };
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml
new file mode 100644 (file)
index 0000000..e6e5a9a
--- /dev/null
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rtq6752-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RTQ6752 TFT LCD Voltage Regulator
+
+maintainers:
+  - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+  The RTQ6752 is an I2C interface pgorammable power management IC. It includes
+  two synchronous boost converter for PAVDD, and one synchronous NAVDD
+  buck-boost. The device is suitable for automotive TFT-LCD panel.
+
+properties:
+  compatible:
+    enum:
+      - richtek,rtq6752
+
+  reg:
+    maxItems: 1
+
+  enable-gpios:
+    description: |
+      A connection of the chip 'enable' gpio line. If not provided, treat it as
+      external pull up.
+    maxItems: 1
+
+  regulators:
+    type: object
+
+    patternProperties:
+      "^(p|n)avdd$":
+        type: object
+        $ref: regulator.yaml#
+        description: |
+          regulator description for pavdd and navdd.
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - regulators
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      rtq6752@6b {
+        compatible = "richtek,rtq6752";
+        reg = <0x6b>;
+        enable-gpios = <&gpio26 2 0>;
+
+        regulators {
+          pavdd {
+            regulator-name = "rtq6752-pavdd";
+            regulator-min-microvolt = <5000000>;
+            regulator-max-microvolt = <7300000>;
+            regulator-boot-on;
+          };
+          navdd {
+            regulator-name = "rtq6752-navdd";
+            regulator-min-microvolt = <5000000>;
+            regulator-max-microvolt = <7300000>;
+            regulator-boot-on;
+          };
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml b/Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml
new file mode 100644 (file)
index 0000000..861d5f3
--- /dev/null
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/socionext,uniphier-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier regulator controller
+
+description: |
+  This regulator controls VBUS and belongs to USB3 glue layer. Before using
+  the regulator, it is necessary to control the clocks and resets to enable
+  this layer. These clocks and resets should be described in each property.
+
+maintainers:
+  - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+allOf:
+  - $ref: "regulator.yaml#"
+
+# USB3 Controller
+
+properties:
+  compatible:
+    enum:
+      - socionext,uniphier-pro4-usb3-regulator
+      - socionext,uniphier-pro5-usb3-regulator
+      - socionext,uniphier-pxs2-usb3-regulator
+      - socionext,uniphier-ld20-usb3-regulator
+      - socionext,uniphier-pxs3-usb3-regulator
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    oneOf:
+      - items:          # for Pro4, Pro5
+          - const: gio
+          - const: link
+      - items:          # for others
+          - const: link
+
+  resets:
+    minItems: 1
+    maxItems: 2
+
+  reset-names:
+    oneOf:
+      - items:          # for Pro4, Pro5
+          - const: gio
+          - const: link
+      - items:
+          - const: link
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+
+examples:
+  - |
+    usb-glue@65b00000 {
+        compatible = "simple-mfd";
+        #address-cells = <1>;
+        #size-cells = <1>;
+        ranges = <0 0x65b00000 0x400>;
+
+        usb_vbus0: regulators@100 {
+            compatible = "socionext,uniphier-ld20-usb3-regulator";
+            reg = <0x100 0x10>;
+            clock-names = "link";
+            clocks = <&sys_clk 14>;
+            reset-names = "link";
+            resets = <&sys_rst 14>;
+        };
+    };
+
diff --git a/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt b/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
deleted file mode 100644 (file)
index 94fd38b..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-Socionext UniPhier Regulator Controller
-
-This describes the devicetree bindings for regulator controller implemented
-on Socionext UniPhier SoCs.
-
-USB3 Controller
----------------
-
-This regulator controls VBUS and belongs to USB3 glue layer. Before using
-the regulator, it is necessary to control the clocks and resets to enable
-this layer. These clocks and resets should be described in each property.
-
-Required properties:
-- compatible: Should be
-    "socionext,uniphier-pro4-usb3-regulator" - for Pro4 SoC
-    "socionext,uniphier-pro5-usb3-regulator" - for Pro5 SoC
-    "socionext,uniphier-pxs2-usb3-regulator" - for PXs2 SoC
-    "socionext,uniphier-ld20-usb3-regulator" - for LD20 SoC
-    "socionext,uniphier-pxs3-usb3-regulator" - for PXs3 SoC
-- reg: Specifies offset and length of the register set for the device.
-- clocks: A list of phandles to the clock gate for USB3 glue layer.
-       According to the clock-names, appropriate clocks are required.
-- clock-names: Should contain
-    "gio", "link" - for Pro4 and Pro5 SoCs
-    "link"        - for others
-- resets: A list of phandles to the reset control for USB3 glue layer.
-       According to the reset-names, appropriate resets are required.
-- reset-names: Should contain
-    "gio", "link" - for Pro4 and Pro5 SoCs
-    "link"        - for others
-
-See Documentation/devicetree/bindings/regulator/regulator.txt
-for more details about the regulator properties.
-
-Example:
-
-       usb-glue@65b00000 {
-               compatible = "socionext,uniphier-ld20-dwc3-glue",
-                            "simple-mfd";
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges = <0 0x65b00000 0x400>;
-
-               usb_vbus0: regulators@100 {
-                       compatible = "socionext,uniphier-ld20-usb3-regulator";
-                       reg = <0x100 0x10>;
-                       clock-names = "link";
-                       clocks = <&sys_clk 14>;
-                       reset-names = "link";
-                       resets = <&sys_rst 14>;
-               };
-
-               phy {
-                       ...
-                       phy-supply = <&usb_vbus0>;
-               };
-               ...
-       };
index 1d38ff7..2b1f916 100644 (file)
@@ -24,10 +24,10 @@ allOf:
 select:
   properties:
     compatible:
-      items:
-        enum:
-            - sifive,fu540-c000-ccache
-            - sifive,fu740-c000-ccache
+      contains:
+        enum:
+          - sifive,fu540-c000-ccache
+          - sifive,fu740-c000-ccache
 
   required:
     - compatible
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
deleted file mode 100644 (file)
index 487208c..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-OMAP2+ McSPI device
-
-Required properties:
-- compatible :
-  - "ti,am654-mcspi" for AM654.
-  - "ti,omap2-mcspi" for OMAP2 & OMAP3.
-  - "ti,omap4-mcspi" for OMAP4+.
-- ti,spi-num-cs : Number of chipselect supported  by the instance.
-- ti,hwmods: Name of the hwmod associated to the McSPI
-- ti,pindir-d0-out-d1-in: Select the D0 pin as output and D1 as
-                         input. The default is D0 as input and
-                         D1 as output.
-
-Optional properties:
-- dmas: List of DMA specifiers with the controller specific format
-       as described in the generic DMA client binding. A tx and rx
-       specifier is required for each chip select.
-- dma-names: List of DMA request names. These strings correspond
-       1:1 with the DMA specifiers listed in dmas. The string naming
-       is to be "rxN" and "txN" for RX and TX requests,
-       respectively, where N equals the chip select number.
-
-Examples:
-
-[hwmod populated DMA resources]
-
-mcspi1: mcspi@1 {
-    #address-cells = <1>;
-    #size-cells = <0>;
-    compatible = "ti,omap4-mcspi";
-    ti,hwmods = "mcspi1";
-    ti,spi-num-cs = <4>;
-};
-
-[generic DMA request binding]
-
-mcspi1: mcspi@1 {
-    #address-cells = <1>;
-    #size-cells = <0>;
-    compatible = "ti,omap4-mcspi";
-    ti,hwmods = "mcspi1";
-    ti,spi-num-cs = <2>;
-    dmas = <&edma 42
-           &edma 43
-           &edma 44
-           &edma 45>;
-    dma-names = "tx0", "rx0", "tx1", "rx1";
-};
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.yaml b/Documentation/devicetree/bindings/spi/omap-spi.yaml
new file mode 100644 (file)
index 0000000..e555381
--- /dev/null
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/omap-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPI controller bindings for OMAP and K3 SoCs
+
+maintainers:
+  - Aswath Govindraju <a-govindraju@ti.com>
+
+allOf:
+  - $ref: spi-controller.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - ti,am654-mcspi
+              - ti,am4372-mcspi
+          - const: ti,omap4-mcspi
+      - items:
+          - enum:
+              - ti,omap2-mcspi
+              - ti,omap4-mcspi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  ti,spi-num-cs:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Number of chipselect supported  by the instance.
+    minimum: 1
+    maximum: 4
+
+  ti,hwmods:
+    $ref: /schemas/types.yaml#/definitions/string
+    description:
+      Must be "mcspi<n>", n being the instance number (1-based).
+      This property is applicable only on legacy platforms mainly omap2/3
+      and ti81xx and should not be used on other platforms.
+    deprecated: true
+
+  ti,pindir-d0-out-d1-in:
+    description:
+      Select the D0 pin as output and D1 as input. The default is D0
+      as input and D1 as output.
+    type: boolean
+
+  dmas:
+    description:
+      List of DMA specifiers with the controller specific format as
+      described in the generic DMA client binding. A tx and rx
+      specifier is required for each chip select.
+    minItems: 1
+    maxItems: 8
+
+  dma-names:
+    description:
+      List of DMA request names. These strings correspond 1:1 with
+      the DMA sepecifiers listed in dmas. The string names is to be
+      "rxN" and "txN" for RX and TX requests, respectively. Where N
+      is the chip select number.
+    minItems: 1
+    maxItems: 8
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+unevaluatedProperties: false
+
+if:
+  properties:
+    compatible:
+      oneOf:
+        - const: ti,omap2-mcspi
+        - const: ti,omap4-mcspi
+
+then:
+  properties:
+    ti,hwmods:
+      items:
+        - pattern: "^mcspi([1-9])$"
+
+else:
+  properties:
+    ti,hwmods: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+    spi@2100000 {
+      compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+      reg = <0x2100000 0x400>;
+      interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&k3_clks 137 1>;
+      power-domains = <&k3_pds 137 TI_SCI_PD_EXCLUSIVE>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+      dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
+      dma-names = "tx0", "rx0";
+    };
diff --git a/Documentation/devicetree/bindings/spi/rockchip-sfc.yaml b/Documentation/devicetree/bindings/spi/rockchip-sfc.yaml
new file mode 100644 (file)
index 0000000..339fb39
--- /dev/null
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/rockchip-sfc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Serial Flash Controller (SFC)
+
+maintainers:
+  - Heiko Stuebner <heiko@sntech.de>
+  - Chris Morgan <macromorgan@hotmail.com>
+
+allOf:
+  - $ref: spi-controller.yaml#
+
+properties:
+  compatible:
+    const: rockchip,sfc
+    description:
+      The rockchip sfc controller is a standalone IP with version register,
+      and the driver can handle all the feature difference inside the IP
+      depending on the version register.
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: Module Clock
+
+  clock-names:
+    items:
+      - const: clk_sfc
+      - const: hclk_sfc
+
+  power-domains:
+    maxItems: 1
+
+  rockchip,sfc-no-dma:
+    description: Disable DMA and utilize FIFO mode only
+    type: boolean
+
+patternProperties:
+  "^flash@[0-3]$":
+    type: object
+    properties:
+      reg:
+        minimum: 0
+        maximum: 3
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/px30-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/px30-power.h>
+
+    sfc: spi@ff3a0000 {
+        compatible = "rockchip,sfc";
+        reg = <0xff3a0000 0x4000>;
+        interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
+        clock-names = "clk_sfc", "hclk_sfc";
+        pinctrl-0 = <&sfc_clk &sfc_cs &sfc_bus2>;
+        pinctrl-names = "default";
+        power-domains = <&power PX30_PD_MMC_NAND>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        flash@0 {
+            compatible = "jedec,spi-nor";
+            reg = <0>;
+            spi-max-frequency = <108000000>;
+            spi-rx-bus-width = <2>;
+            spi-tx-bus-width = <2>;
+        };
+    };
+
+...
index 4d0e4c1..2a24969 100644 (file)
@@ -11,6 +11,7 @@ Required properties:
     - mediatek,mt8135-spi: for mt8135 platforms
     - mediatek,mt8173-spi: for mt8173 platforms
     - mediatek,mt8183-spi: for mt8183 platforms
+    - mediatek,mt6893-spi: for mt6893 platforms
     - "mediatek,mt8192-spi", "mediatek,mt6765-spi": for mt8192 platforms
     - "mediatek,mt8195-spi", "mediatek,mt6765-spi": for mt8195 platforms
     - "mediatek,mt8516-spi", "mediatek,mt2712-spi": for mt8516 platforms
diff --git a/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt b/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
deleted file mode 100644 (file)
index 2567c82..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-Spreadtrum ADI controller
-
-ADI is the abbreviation of Anolog-Digital interface, which is used to access
-analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
-framework for its hardware implementation is alike to SPI bus and its timing
-is compatile to SPI timing.
-
-ADI controller has 50 channels including 2 software read/write channels and
-48 hardware channels to access analog chip. For 2 software read/write channels,
-users should set ADI registers to access analog chip. For hardware channels,
-we can configure them to allow other hardware components to use it independently,
-which means we can just link one analog chip address to one hardware channel,
-then users can access the mapped analog chip address by this hardware channel
-triggered by hardware components instead of ADI software channels.
-
-Thus we introduce one property named "sprd,hw-channels" to configure hardware
-channels, the first value specifies the hardware channel id which is used to
-transfer data triggered by hardware automatically, and the second value specifies
-the analog chip address where user want to access by hardware components.
-
-Since we have multi-subsystems will use unique ADI to access analog chip, when
-one system is reading/writing data by ADI software channels, that should be under
-one hardware spinlock protection to prevent other systems from reading/writing
-data by ADI software channels at the same time, or two parallel routine of setting
-ADI registers will make ADI controller registers chaos to lead incorrect results.
-Then we need one hardware spinlock to synchronize between the multiple subsystems.
-
-The new version ADI controller supplies multiple master channels for different
-subsystem accessing, that means no need to add hardware spinlock to synchronize,
-thus change the hardware spinlock support to be optional to keep backward
-compatibility.
-
-Required properties:
-- compatible: Should be "sprd,sc9860-adi".
-- reg: Offset and length of ADI-SPI controller register space.
-- #address-cells: Number of cells required to define a chip select address
-       on the ADI-SPI bus. Should be set to 1.
-- #size-cells: Size of cells required to define a chip select address size
-       on the ADI-SPI bus. Should be set to 0.
-
-Optional properties:
-- hwlocks: Reference to a phandle of a hwlock provider node.
-- hwlock-names: Reference to hwlock name strings defined in the same order
-       as the hwlocks, should be "adi".
-- sprd,hw-channels: This is an array of channel values up to 49 channels.
-       The first value specifies the hardware channel id which is used to
-       transfer data triggered by hardware automatically, and the second
-       value specifies the analog chip address where user want to access
-       by hardware components.
-
-SPI slave nodes must be children of the SPI controller node and can contain
-properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
-
-Example:
-       adi_bus: spi@40030000 {
-               compatible = "sprd,sc9860-adi";
-               reg = <0 0x40030000 0 0x10000>;
-               hwlocks = <&hwlock1 0>;
-               hwlock-names = "adi";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               sprd,hw-channels = <30 0x8c20>;
-       };
diff --git a/Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml b/Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml
new file mode 100644 (file)
index 0000000..fe01402
--- /dev/null
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/spi/sprd,spi-adi.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Spreadtrum ADI controller
+
+maintainers:
+  - Orson Zhai <orsonzhai@gmail.com>
+  - Baolin Wang <baolin.wang7@gmail.com>
+  - Chunyan Zhang <zhang.lyra@gmail.com>
+
+description: |
+  ADI is the abbreviation of Anolog-Digital interface, which is used to access
+  analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
+  framework for its hardware implementation is alike to SPI bus and its timing
+  is compatile to SPI timing.
+
+  ADI controller has 50 channels including 2 software read/write channels and
+  48 hardware channels to access analog chip. For 2 software read/write channels,
+  users should set ADI registers to access analog chip. For hardware channels,
+  we can configure them to allow other hardware components to use it independently,
+  which means we can just link one analog chip address to one hardware channel,
+  then users can access the mapped analog chip address by this hardware channel
+  triggered by hardware components instead of ADI software channels.
+
+  Thus we introduce one property named "sprd,hw-channels" to configure hardware
+  channels, the first value specifies the hardware channel id which is used to
+  transfer data triggered by hardware automatically, and the second value specifies
+  the analog chip address where user want to access by hardware components.
+
+  Since we have multi-subsystems will use unique ADI to access analog chip, when
+  one system is reading/writing data by ADI software channels, that should be under
+  one hardware spinlock protection to prevent other systems from reading/writing
+  data by ADI software channels at the same time, or two parallel routine of setting
+  ADI registers will make ADI controller registers chaos to lead incorrect results.
+  Then we need one hardware spinlock to synchronize between the multiple subsystems.
+
+  The new version ADI controller supplies multiple master channels for different
+  subsystem accessing, that means no need to add hardware spinlock to synchronize,
+  thus change the hardware spinlock support to be optional to keep backward
+  compatibility.
+
+allOf:
+  - $ref: /spi/spi-controller.yaml#
+
+properties:
+  compatible:
+    enum:
+      - sprd,sc9860-adi
+      - sprd,sc9863-adi
+      - sprd,ums512-adi
+
+  reg:
+    maxItems: 1
+
+  hwlocks:
+    maxItems: 1
+
+  hwlock-names:
+    const: adi
+
+  sprd,hw-channels:
+    $ref: /schemas/types.yaml#/definitions/uint32-matrix
+    description: A list of hardware channels
+    minItems: 1
+    maxItems: 48
+    items:
+      items:
+        - description: The hardware channel id which is used to transfer data
+            triggered by hardware automatically, channel id 0-1 are for software
+            use, 2-49 are hardware channels.
+          minimum: 2
+          maximum: 49
+        - description: The analog chip address where user want to access by
+            hardware components.
+
+required:
+  - compatible
+  - reg
+  - '#address-cells'
+  - '#size-cells'
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    aon {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        adi_bus: spi@40030000 {
+            compatible = "sprd,sc9860-adi";
+            reg = <0 0x40030000 0 0x10000>;
+            hwlocks = <&hwlock1 0>;
+            hwlock-names = "adi";
+            #address-cells = <1>;
+            #size-cells = <0>;
+            sprd,hw-channels = <30 0x8c20>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
deleted file mode 100644 (file)
index d65fdce..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-Rockchip rk timer
-
-Required properties:
-- compatible: should be:
-  "rockchip,rv1108-timer", "rockchip,rk3288-timer": for Rockchip RV1108
-  "rockchip,rk3036-timer", "rockchip,rk3288-timer": for Rockchip RK3036
-  "rockchip,rk3066-timer", "rockchip,rk3288-timer": for Rockchip RK3066
-  "rockchip,rk3188-timer", "rockchip,rk3288-timer": for Rockchip RK3188
-  "rockchip,rk3228-timer", "rockchip,rk3288-timer": for Rockchip RK3228
-  "rockchip,rk3229-timer", "rockchip,rk3288-timer": for Rockchip RK3229
-  "rockchip,rk3288-timer": for Rockchip RK3288
-  "rockchip,rk3368-timer", "rockchip,rk3288-timer": for Rockchip RK3368
-  "rockchip,rk3399-timer": for Rockchip RK3399
-- reg: base address of the timer register starting with TIMERS CONTROL register
-- interrupts: should contain the interrupts for Timer0
-- clocks : must contain an entry for each entry in clock-names
-- clock-names : must include the following entries:
-  "timer", "pclk"
-
-Example:
-       timer: timer@ff810000 {
-               compatible = "rockchip,rk3288-timer";
-               reg = <0xff810000 0x20>;
-               interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&xin24m>, <&cru PCLK_TIMER>;
-               clock-names = "timer", "pclk";
-       };
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
new file mode 100644 (file)
index 0000000..e26ecb5
--- /dev/null
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/rockchip,rk-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Timer Device Tree Bindings
+
+maintainers:
+  - Daniel Lezcano <daniel.lezcano@linaro.org>
+
+properties:
+  compatible:
+    oneOf:
+      - const: rockchip,rk3288-timer
+      - const: rockchip,rk3399-timer
+      - items:
+          - enum:
+              - rockchip,rv1108-timer
+              - rockchip,rk3036-timer
+              - rockchip,rk3066-timer
+              - rockchip,rk3188-timer
+              - rockchip,rk3228-timer
+              - rockchip,rk3229-timer
+              - rockchip,rk3288-timer
+              - rockchip,rk3368-timer
+              - rockchip,px30-timer
+          - const: rockchip,rk3288-timer
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 2
+
+  clock-names:
+    items:
+      - const: pclk
+      - const: timer
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/rk3288-cru.h>
+
+    timer: timer@ff810000 {
+        compatible = "rockchip,rk3288-timer";
+        reg = <0xff810000 0x20>;
+        interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&cru PCLK_TIMER>, <&xin24m>;
+        clock-names = "pclk", "timer";
+    };
index f5a3207..c57c609 100644 (file)
@@ -85,7 +85,6 @@ available subsections can be seen below.
    io-mapping
    io_ordering
    generic-counter
-   lightnvm-pblk
    memory-devices/index
    men-chameleon-bus
    ntb
diff --git a/Documentation/driver-api/lightnvm-pblk.rst b/Documentation/driver-api/lightnvm-pblk.rst
deleted file mode 100644 (file)
index 1040ed1..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-pblk: Physical Block Device Target
-==================================
-
-pblk implements a fully associative, host-based FTL that exposes a traditional
-block I/O interface. Its primary responsibilities are:
-
-  - Map logical addresses onto physical addresses (4KB granularity) in a
-    logical-to-physical (L2P) table.
-  - Maintain the integrity and consistency of the L2P table as well as its
-    recovery from normal tear down and power outage.
-  - Deal with controller- and media-specific constrains.
-  - Handle I/O errors.
-  - Implement garbage collection.
-  - Maintain consistency across the I/O stack during synchronization points.
-
-For more information please refer to:
-
-  http://lightnvm.io
-
-which maintains updated FAQs, manual pages, technical documentation, tools,
-contacts, etc.
index f47d05e..4a25c5e 100644 (file)
@@ -24,6 +24,10 @@ Available fault injection capabilities
 
   injects futex deadlock and uaddr fault errors.
 
+- fail_sunrpc
+
+  injects kernel RPC client and server failures.
+
 - fail_make_request
 
   injects disk IO errors on devices permitted by setting
@@ -151,6 +155,20 @@ configuration of fault-injection capabilities.
        default is 'N', setting it to 'Y' will disable failure injections
        when dealing with private (address space) futexes.
 
+- /sys/kernel/debug/fail_sunrpc/ignore-client-disconnect:
+
+       Format: { 'Y' | 'N' }
+
+       default is 'N', setting it to 'Y' will disable disconnect
+       injection on the RPC client.
+
+- /sys/kernel/debug/fail_sunrpc/ignore-server-disconnect:
+
+       Format: { 'Y' | 'N' }
+
+       default is 'N', setting it to 'Y' will disable disconnect
+       injection on the RPC server.
+
 - /sys/kernel/debug/fail_function/inject:
 
        Format: { 'function-name' | '!function-name' | '' }
diff --git a/Documentation/filesystems/cifs/index.rst b/Documentation/filesystems/cifs/index.rst
new file mode 100644 (file)
index 0000000..1c8597a
--- /dev/null
@@ -0,0 +1,10 @@
+===============================
+CIFS
+===============================
+
+
+.. toctree::
+   :maxdepth: 1
+
+   ksmbd
+   cifsroot
diff --git a/Documentation/filesystems/cifs/ksmbd.rst b/Documentation/filesystems/cifs/ksmbd.rst
new file mode 100644 (file)
index 0000000..a132615
--- /dev/null
@@ -0,0 +1,165 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+KSMBD - SMB3 Kernel Server
+==========================
+
+KSMBD is a linux kernel server which implements SMB3 protocol in kernel space
+for sharing files over network.
+
+KSMBD architecture
+==================
+
+The subset of performance related operations belong in kernelspace and
+the other subset which belong to operations which are not really related with
+performance in userspace. So, DCE/RPC management that has historically resulted
+into number of buffer overflow issues and dangerous security bugs and user
+account management are implemented in user space as ksmbd.mountd.
+File operations that are related with performance (open/read/write/close etc.)
+in kernel space (ksmbd). This also allows for easier integration with VFS
+interface for all file operations.
+
+ksmbd (kernel daemon)
+---------------------
+
+When the server daemon is started, It starts up a forker thread
+(ksmbd/interface name) at initialization time and open a dedicated port 445
+for listening to SMB requests. Whenever new clients make request, Forker
+thread will accept the client connection and fork a new thread for dedicated
+communication channel between the client and the server. It allows for parallel
+processing of SMB requests(commands) from clients as well as allowing for new
+clients to make new connections. Each instance is named ksmbd/1~n(port number)
+to indicate connected clients. Depending on the SMB request types, each new
+thread can decide to pass through the commands to the user space (ksmbd.mountd),
+currently DCE/RPC commands are identified to be handled through the user space.
+To further utilize the linux kernel, it has been chosen to process the commands
+as workitems and to be executed in the handlers of the ksmbd-io kworker threads.
+It allows for multiplexing of the handlers as the kernel take care of initiating
+extra worker threads if the load is increased and vice versa, if the load is
+decreased it destroys the extra worker threads. So, after connection is
+established with client. Dedicated ksmbd/1..n(port number) takes complete
+ownership of receiving/parsing of SMB commands. Each received command is worked
+in parallel i.e., There can be multiple clients commands which are worked in
+parallel. After receiving each command a separated kernel workitem is prepared
+for each command which is further queued to be handled by ksmbd-io kworkers.
+So, each SMB workitem is queued to the kworkers. This allows the benefit of load
+sharing to be managed optimally by the default kernel and optimizing client
+performance by handling client commands in parallel.
+
+ksmbd.mountd (user space daemon)
+--------------------------------
+
+ksmbd.mountd is userspace process to, transfer user account and password that
+are registered using ksmbd.adduser(part of utils for user space). Further it
+allows sharing information parameters that parsed from smb.conf to ksmbd in
+kernel. For the execution part it has a daemon which is continuously running
+and connected to the kernel interface using netlink socket, it waits for the
+requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
+dozen) that are most important for file server from NetShareEnum and
+NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
+and passed over to the associated kernel thread for the client.
+
+
+KSMBD Feature Status
+====================
+
+============================== =================================================
+Feature name                   Status
+============================== =================================================
+Dialects                       Supported. SMB2.1 SMB3.0, SMB3.1.1 dialects
+                               (intentionally excludes security vulnerable SMB1
+                               dialect).
+Auto Negotiation               Supported.
+Compound Request               Supported.
+Oplock Cache Mechanism         Supported.
+SMB2 leases(v1 lease)          Supported.
+Directory leases(v2 lease)     Planned for future.
+Multi-credits                  Supported.
+NTLM/NTLMv2                    Supported.
+HMAC-SHA256 Signing            Supported.
+Secure negotiate               Supported.
+Signing Update                 Supported.
+Pre-authentication integrity   Supported.
+SMB3 encryption(CCM, GCM)      Supported. (CCM and GCM128 supported, GCM256 in
+                               progress)
+SMB direct(RDMA)               Partially Supported. SMB3 Multi-channel is
+                               required to connect to Windows client.
+SMB3 Multi-channel             Partially Supported. Planned to implement
+                               replay/retry mechanisms for future.
+SMB3.1.1 POSIX extension       Supported.
+ACLs                           Partially Supported. only DACLs available, SACLs
+                               (auditing) is planned for the future. For
+                               ownership (SIDs) ksmbd generates random subauth
+                               values(then store it to disk) and use uid/gid
+                               get from inode as RID for local domain SID.
+                               The current acl implementation is limited to
+                               standalone server, not a domain member.
+                               Integration with Samba tools is being worked on
+                               to allow future support for running as a domain
+                               member.
+Kerberos                       Supported.
+Durable handle v1,v2           Planned for future.
+Persistent handle              Planned for future.
+SMB2 notify                    Planned for future.
+Sparse file support            Supported.
+DCE/RPC support                Partially Supported. a few calls(NetShareEnumAll,
+                               NetServerGetInfo, SAMR, LSARPC) that are needed
+                               for file server handled via netlink interface
+                               from ksmbd.mountd. Additional integration with
+                               Samba tools and libraries via upcall is being
+                               investigated to allow support for additional
+                               DCE/RPC management calls (and future support
+                               for Witness protocol e.g.)
+ksmbd/nfsd interoperability    Planned for future. The features that ksmbd
+                               support are Leases, Notify, ACLs and Share modes.
+============================== =================================================
+
+
+How to run
+==========
+
+1. Download ksmbd-tools and compile them.
+       - https://github.com/cifsd-team/ksmbd-tools
+
+2. Create user/password for SMB share.
+
+       # mkdir /etc/ksmbd/
+       # ksmbd.adduser -a <Enter USERNAME for SMB share access>
+
+3. Create /etc/ksmbd/smb.conf file, add SMB share in smb.conf file
+       - Refer smb.conf.example and
+          https://github.com/cifsd-team/ksmbd-tools/blob/master/Documentation/configuration.txt
+
+4. Insert ksmbd.ko module
+
+       # insmod ksmbd.ko
+
+5. Start ksmbd user space daemon
+       # ksmbd.mountd
+
+6. Access share from Windows or Linux using CIFS
+
+Shutdown KSMBD
+==============
+
+1. kill user and kernel space daemon
+       # sudo ksmbd.control -s
+
+How to turn debug print on
+==========================
+
+Each layer
+/sys/class/ksmbd-control/debug
+
+1. Enable all component prints
+       # sudo ksmbd.control -d "all"
+
+2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
+       # sudo ksmbd.control -d "smb"
+
+3. Show what prints are enable.
+       # cat/sys/class/ksmbd-control/debug
+         [smb] auth vfs oplock ipc conn [rdma]
+
+4. Disable prints:
+       If you try the selected component once more, It is disabled without brackets.
index 44b67eb..0eb799d 100644 (file)
@@ -1063,11 +1063,6 @@ astute users may notice some differences in behavior:
 
 - DAX (Direct Access) is not supported on encrypted files.
 
-- The st_size of an encrypted symlink will not necessarily give the
-  length of the symlink target as required by POSIX.  It will actually
-  give the length of the ciphertext, which will be slightly longer
-  than the plaintext due to NUL-padding and an extra 2-byte overhead.
-
 - The maximum length of an encrypted symlink is 2 bytes shorter than
   the maximum length of an unencrypted symlink.  For example, on an
   EXT4 filesystem with a 4K block size, unencrypted symlinks can be up
@@ -1235,12 +1230,12 @@ the user-supplied name to get the ciphertext.
 
 Lookups without the key are more complicated.  The raw ciphertext may
 contain the ``\0`` and ``/`` characters, which are illegal in
-filenames.  Therefore, readdir() must base64-encode the ciphertext for
-presentation.  For most filenames, this works fine; on ->lookup(), the
-filesystem just base64-decodes the user-supplied name to get back to
-the raw ciphertext.
+filenames.  Therefore, readdir() must base64url-encode the ciphertext
+for presentation.  For most filenames, this works fine; on ->lookup(),
+the filesystem just base64url-decodes the user-supplied name to get
+back to the raw ciphertext.
 
-However, for very long filenames, base64 encoding would cause the
+However, for very long filenames, base64url encoding would cause the
 filename length to exceed NAME_MAX.  To prevent this, readdir()
 actually presents long filenames in an abbreviated form which encodes
 a strong "hash" of the ciphertext filename, along with the optional
diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst
new file mode 100644 (file)
index 0000000..1229a75
--- /dev/null
@@ -0,0 +1,1026 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Idmappings
+==========
+
+Most filesystem developers will have encountered idmappings. They are used when
+reading from or writing ownership to disk, reporting ownership to userspace, or
+for permission checking. This document is aimed at filesystem developers that
+want to know how idmappings work.
+
+Formal notes
+------------
+
+An idmapping is essentially a translation of a range of ids into another or the
+same range of ids. The notational convention for idmappings that is widely used
+in userspace is::
+
+ u:k:r
+
+``u`` indicates the first element in the upper idmapset ``U`` and ``k``
+indicates the first element in the lower idmapset ``K``. The ``r`` parameter
+indicates the range of the idmapping, i.e. how many ids are mapped. From now
+on, we will always prefix ids with ``u`` or ``k`` to make it clear whether
+we're talking about an id in the upper or lower idmapset.
+
+To see what this looks like in practice, let's take the following idmapping::
+
+ u22:k10000:r3
+
+and write down the mappings it will generate::
+
+ u22 -> k10000
+ u23 -> k10001
+ u24 -> k10002
+
+From a mathematical viewpoint ``U`` and ``K`` are well-ordered sets and an
+idmapping is an order isomorphism from ``U`` into ``K``. So ``U`` and ``K`` are
+order isomorphic. In fact, ``U`` and ``K`` are always well-ordered subsets of
+the set of all possible ids useable on a given system.
+
+Looking at this mathematically briefly will help us highlight some properties
+that make it easier to understand how we can translate between idmappings. For
+example, we know that the inverse idmapping is an order isomorphism as well::
+
+ k10000 -> u22
+ k10001 -> u23
+ k10002 -> u24
+
+Given that we are dealing with order isomorphisms plus the fact that we're
+dealing with subsets we can embedd idmappings into each other, i.e. we can
+sensibly translate between different idmappings. For example, assume we've been
+given the three idmappings::
+
+ 1. u0:k10000:r10000
+ 2. u0:k20000:r10000
+ 3. u0:k30000:r10000
+
+and id ``k11000`` which has been generated by the first idmapping by mapping
+``u1000`` from the upper idmapset down to ``k11000`` in the lower idmapset.
+
+Because we're dealing with order isomorphic subsets it is meaningful to ask
+what id ``k11000`` corresponds to in the second or third idmapping. The
+straightfoward algorithm to use is to apply the inverse of the first idmapping,
+mapping ``k11000`` up to ``u1000``. Afterwards, we can map ``u1000`` down using
+either the second idmapping mapping or third idmapping mapping. The second
+idmapping would map ``u1000`` down to ``21000``. The third idmapping would map
+``u1000`` down to ``u31000``.
+
+If we were given the same task for the following three idmappings::
+
+ 1. u0:k10000:r10000
+ 2. u0:k20000:r200
+ 3. u0:k30000:r300
+
+we would fail to translate as the sets aren't order isomorphic over the full
+range of the first idmapping anymore (However they are order isomorphic over
+the full range of the second idmapping.). Neither the second or third idmapping
+contain ``u1000`` in the upper idmapset ``U``. This is equivalent to not having
+an id mapped. We can simply say that ``u1000`` is unmapped in the second and
+third idmapping. The kernel will report unmapped ids as the overflowuid
+``(uid_t)-1`` or overflowgid ``(gid_t)-1`` to userspace.
+
+The algorithm to calculate what a given id maps to is pretty simple. First, we
+need to verify that the range can contain our target id. We will skip this step
+for simplicity. After that if we want to know what ``id`` maps to we can do
+simple calculations:
+
+- If we want to map from left to right::
+
+   u:k:r
+   id - u + k = n
+
+- If we want to map from right to left::
+
+   u:k:r
+   id - k + u = n
+
+Instead of "left to right" we can also say "down" and instead of "right to
+left" we can also say "up". Obviously mapping down and up invert each other.
+
+To see whether the simple formulas above work, consider the following two
+idmappings::
+
+ 1. u0:k20000:r10000
+ 2. u500:k30000:r10000
+
+Assume we are given ``k21000`` in the lower idmapset of the first idmapping. We
+want to know what id this was mapped from in the upper idmapset of the first
+idmapping. So we're mapping up in the first idmapping::
+
+ id     - k      + u  = n
+ k21000 - k20000 + u0 = u1000
+
+Now assume we are given the id ``u1100`` in the upper idmapset of the second
+idmapping and we want to know what this id maps down to in the lower idmapset
+of the second idmapping. This means we're mapping down in the second
+idmapping::
+
+ id    - u    + k      = n
+ u1100 - u500 + k30000 = k30600
+
+General notes
+-------------
+
+In the context of the kernel an idmapping can be interpreted as mapping a range
+of userspace ids into a range of kernel ids::
+
+ userspace-id:kernel-id:range
+
+A userspace id is always an element in the upper idmapset of an idmapping of
+type ``uid_t`` or ``gid_t`` and a kernel id is always an element in the lower
+idmapset of an idmapping of type ``kuid_t`` or ``kgid_t``. From now on
+"userspace id" will be used to refer to the well known ``uid_t`` and ``gid_t``
+types and "kernel id" will be used to refer to ``kuid_t`` and ``kgid_t``.
+
+The kernel is mostly concerned with kernel ids. They are used when performing
+permission checks and are stored in an inode's ``i_uid`` and ``i_gid`` field.
+A userspace id on the other hand is an id that is reported to userspace by the
+kernel, or is passed by userspace to the kernel, or a raw device id that is
+written or read from disk.
+
+Note that we are only concerned with idmappings as the kernel stores them not
+how userspace would specify them.
+
+For the rest of this document we will prefix all userspace ids with ``u`` and
+all kernel ids with ``k``. Ranges of idmappings will be prefixed with ``r``. So
+an idmapping will be written as ``u0:k10000:r10000``.
+
+For example, the id ``u1000`` is an id in the upper idmapset or "userspace
+idmapset" starting with ``u1000``. And it is mapped to ``k11000`` which is a
+kernel id in the lower idmapset or "kernel idmapset" starting with ``k10000``.
+
+A kernel id is always created by an idmapping. Such idmappings are associated
+with user namespaces. Since we mainly care about how idmappings work we're not
+going to be concerned with how idmappings are created nor how they are used
+outside of the filesystem context. This is best left to an explanation of user
+namespaces.
+
+The initial user namespace is special. It always has an idmapping of the
+following form::
+
+ u0:k0:r4294967295
+
+which is an identity idmapping over the full range of ids available on this
+system.
+
+Other user namespaces usually have non-identity idmappings such as::
+
+ u0:k10000:r10000
+
+When a process creates or wants to change ownership of a file, or when the
+ownership of a file is read from disk by a filesystem, the userspace id is
+immediately translated into a kernel id according to the idmapping associated
+with the relevant user namespace.
+
+For instance, consider a file that is stored on disk by a filesystem as being
+owned by ``u1000``:
+
+- If a filesystem were to be mounted in the initial user namespaces (as most
+  filesystems are) then the initial idmapping will be used. As we saw this is
+  simply the identity idmapping. This would mean id ``u1000`` read from disk
+  would be mapped to id ``k1000``. So an inode's ``i_uid`` and ``i_gid`` field
+  would contain ``k1000``.
+
+- If a filesystem were to be mounted with an idmapping of ``u0:k10000:r10000``
+  then ``u1000`` read from disk would be mapped to ``k11000``. So an inode's
+  ``i_uid`` and ``i_gid`` would contain ``k11000``.
+
+Translation algorithms
+----------------------
+
+We've already seen briefly that it is possible to translate between different
+idmappings. We'll now take a closer look how that works.
+
+Crossmapping
+~~~~~~~~~~~~
+
+This translation algorithm is used by the kernel in quite a few places. For
+example, it is used when reporting back the ownership of a file to userspace
+via the ``stat()`` system call family.
+
+If we've been given ``k11000`` from one idmapping we can map that id up in
+another idmapping. In order for this to work both idmappings need to contain
+the same kernel id in their kernel idmapsets. For example, consider the
+following idmappings::
+
+ 1. u0:k10000:r10000
+ 2. u20000:k10000:r10000
+
+and we are mapping ``u1000`` down to ``k11000`` in the first idmapping . We can
+then translate ``k11000`` into a userspace id in the second idmapping using the
+kernel idmapset of the second idmapping::
+
+ /* Map the kernel id up into a userspace id in the second idmapping. */
+ from_kuid(u20000:k10000:r10000, k11000) = u21000
+
+Note, how we can get back to the kernel id in the first idmapping by inverting
+the algorithm::
+
+ /* Map the userspace id down into a kernel id in the second idmapping. */
+ make_kuid(u20000:k10000:r10000, u21000) = k11000
+
+ /* Map the kernel id up into a userspace id in the first idmapping. */
+ from_kuid(u0:k10000:r10000, k11000) = u1000
+
+This algorithm allows us to answer the question what userspace id a given
+kernel id corresponds to in a given idmapping. In order to be able to answer
+this question both idmappings need to contain the same kernel id in their
+respective kernel idmapsets.
+
+For example, when the kernel reads a raw userspace id from disk it maps it down
+into a kernel id according to the idmapping associated with the filesystem.
+Let's assume the filesystem was mounted with an idmapping of
+``u0:k20000:r10000`` and it reads a file owned by ``u1000`` from disk. This
+means ``u1000`` will be mapped to ``k21000`` which is what will be stored in
+the inode's ``i_uid`` and ``i_gid`` field.
+
+When someone in userspace calls ``stat()`` or a related function to get
+ownership information about the file the kernel can't simply map the id back up
+according to the filesystem's idmapping as this would give the wrong owner if
+the caller is using an idmapping.
+
+So the kernel will map the id back up in the idmapping of the caller. Let's
+assume the caller has the slighly unconventional idmapping
+``u3000:k20000:r10000`` then ``k21000`` would map back up to ``u4000``.
+Consequently the user would see that this file is owned by ``u4000``.
+
+Remapping
+~~~~~~~~~
+
+It is possible to translate a kernel id from one idmapping to another one via
+the userspace idmapset of the two idmappings. This is equivalent to remapping
+a kernel id.
+
+Let's look at an example. We are given the following two idmappings::
+
+ 1. u0:k10000:r10000
+ 2. u0:k20000:r10000
+
+and we are given ``k11000`` in the first idmapping. In order to translate this
+kernel id in the first idmapping into a kernel id in the second idmapping we
+need to perform two steps:
+
+1. Map the kernel id up into a userspace id in the first idmapping::
+
+    /* Map the kernel id up into a userspace id in the first idmapping. */
+    from_kuid(u0:k10000:r10000, k11000) = u1000
+
+2. Map the userspace id down into a kernel id in the second idmapping::
+
+    /* Map the userspace id down into a kernel id in the second idmapping. */
+    make_kuid(u0:k20000:r10000, u1000) = k21000
+
+As you can see we used the userspace idmapset in both idmappings to translate
+the kernel id in one idmapping to a kernel id in another idmapping.
+
+This allows us to answer the question what kernel id we would need to use to
+get the same userspace id in another idmapping. In order to be able to answer
+this question both idmappings need to contain the same userspace id in their
+respective userspace idmapsets.
+
+Note, how we can easily get back to the kernel id in the first idmapping by
+inverting the algorithm:
+
+1. Map the kernel id up into a userspace id in the second idmapping::
+
+    /* Map the kernel id up into a userspace id in the second idmapping. */
+    from_kuid(u0:k20000:r10000, k21000) = u1000
+
+2. Map the userspace id down into a kernel id in the first idmapping::
+
+    /* Map the userspace id down into a kernel id in the first idmapping. */
+    make_kuid(u0:k10000:r10000, u1000) = k11000
+
+Another way to look at this translation is to treat it as inverting one
+idmapping and applying another idmapping if both idmappings have the relevant
+userspace id mapped. This will come in handy when working with idmapped mounts.
+
+Invalid translations
+~~~~~~~~~~~~~~~~~~~~
+
+It is never valid to use an id in the kernel idmapset of one idmapping as the
+id in the userspace idmapset of another or the same idmapping. While the kernel
+idmapset always indicates an idmapset in the kernel id space the userspace
+idmapset indicates a userspace id. So the following translations are forbidden::
+
+ /* Map the userspace id down into a kernel id in the first idmapping. */
+ make_kuid(u0:k10000:r10000, u1000) = k11000
+
+ /* INVALID: Map the kernel id down into a kernel id in the second idmapping. */
+ make_kuid(u10000:k20000:r10000, k110000) = k21000
+                                 ~~~~~~~
+
+and equally wrong::
+
+ /* Map the kernel id up into a userspace id in the first idmapping. */
+ from_kuid(u0:k10000:r10000, k11000) = u1000
+
+ /* INVALID: Map the userspace id up into a userspace id in the second idmapping. */
+ from_kuid(u20000:k0:r10000, u1000) = k21000
+                             ~~~~~
+
+Idmappings when creating filesystem objects
+-------------------------------------------
+
+The concepts of mapping an id down or mapping an id up are expressed in the two
+kernel functions filesystem developers are rather familiar with and which we've
+already used in this document::
+
+ /* Map the userspace id down into a kernel id. */
+ make_kuid(idmapping, uid)
+
+ /* Map the kernel id up into a userspace id. */
+ from_kuid(idmapping, kuid)
+
+We will take an abbreviated look into how idmappings figure into creating
+filesystem objects. For simplicity we will only look at what happens when the
+VFS has already completed path lookup right before it calls into the filesystem
+itself. So we're concerned with what happens when e.g. ``vfs_mkdir()`` is
+called. We will also assume that the directory we're creating filesystem
+objects in is readable and writable for everyone.
+
+When creating a filesystem object the caller will look at the caller's
+filesystem ids. These are just regular ``uid_t`` and ``gid_t`` userspace ids
+but they are exclusively used when determining file ownership which is why they
+are called "filesystem ids". They are usually identical to the uid and gid of
+the caller but can differ. We will just assume they are always identical to not
+get lost in too many details.
+
+When the caller enters the kernel two things happen:
+
+1. Map the caller's userspace ids down into kernel ids in the caller's
+   idmapping.
+   (To be precise, the kernel will simply look at the kernel ids stashed in the
+   credentials of the current task but for our education we'll pretend this
+   translation happens just in time.)
+2. Verify that the caller's kernel ids can be mapped up to userspace ids in the
+   filesystem's idmapping.
+
+The second step is important as regular filesystem will ultimately need to map
+the kernel id back up into a userspace id when writing to disk.
+So with the second step the kernel guarantees that a valid userspace id can be
+written to disk. If it can't the kernel will refuse the creation request to not
+even remotely risk filesystem corruption.
+
+The astute reader will have realized that this is simply a varation of the
+crossmapping algorithm we mentioned above in a previous section. First, the
+kernel maps the caller's userspace id down into a kernel id according to the
+caller's idmapping and then maps that kernel id up according to the
+filesystem's idmapping.
+
+Example 1
+~~~~~~~~~
+
+::
+
+ caller id:            u1000
+ caller idmapping:     u0:k0:r4294967295
+ filesystem idmapping: u0:k0:r4294967295
+
+Both the caller and the filesystem use the identity idmapping:
+
+1. Map the caller's userspace ids into kernel ids in the caller's idmapping::
+
+    make_kuid(u0:k0:r4294967295, u1000) = k1000
+
+2. Verify that the caller's kernel ids can be mapped to userspace ids in the
+   filesystem's idmapping.
+
+   For this second step the kernel will call the function
+   ``fsuidgid_has_mapping()`` which ultimately boils down to calling
+   ``from_kuid()``::
+
+    from_kuid(u0:k0:r4294967295, k1000) = u1000
+
+In this example both idmappings are the same so there's nothing exciting going
+on. Ultimately the userspace id that lands on disk will be ``u1000``.
+
+Example 2
+~~~~~~~~~
+
+::
+
+ caller id:            u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k20000:r10000
+
+1. Map the caller's userspace ids down into kernel ids in the caller's
+   idmapping::
+
+    make_kuid(u0:k10000:r10000, u1000) = k11000
+
+2. Verify that the caller's kernel ids can be mapped up to userspace ids in the
+   filesystem's idmapping::
+
+    from_kuid(u0:k20000:r10000, k11000) = u-1
+
+It's immediately clear that while the caller's userspace id could be
+successfully mapped down into kernel ids in the caller's idmapping the kernel
+ids could not be mapped up according to the filesystem's idmapping. So the
+kernel will deny this creation request.
+
+Note that while this example is less common, because most filesystem can't be
+mounted with non-initial idmappings this is a general problem as we can see in
+the next examples.
+
+Example 3
+~~~~~~~~~
+
+::
+
+ caller id:            u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k0:r4294967295
+
+1. Map the caller's userspace ids down into kernel ids in the caller's
+   idmapping::
+
+    make_kuid(u0:k10000:r10000, u1000) = k11000
+
+2. Verify that the caller's kernel ids can be mapped up to userspace ids in the
+   filesystem's idmapping::
+
+    from_kuid(u0:k0:r4294967295, k11000) = u11000
+
+We can see that the translation always succeeds. The userspace id that the
+filesystem will ultimately put to disk will always be identical to the value of
+the kernel id that was created in the caller's idmapping. This has mainly two
+consequences.
+
+First, that we can't allow a caller to ultimately write to disk with another
+userspace id. We could only do this if we were to mount the whole fileystem
+with the caller's or another idmapping. But that solution is limited to a few
+filesystems and not very flexible. But this is a use-case that is pretty
+important in containerized workloads.
+
+Second, the caller will usually not be able to create any files or access
+directories that have stricter permissions because none of the filesystem's
+kernel ids map up into valid userspace ids in the caller's idmapping
+
+1. Map raw userspace ids down to kernel ids in the filesystem's idmapping::
+
+    make_kuid(u0:k0:r4294967295, u1000) = k1000
+
+2. Map kernel ids up to userspace ids in the caller's idmapping::
+
+    from_kuid(u0:k10000:r10000, k1000) = u-1
+
+Example 4
+~~~~~~~~~
+
+::
+
+ file id:              u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k0:r4294967295
+
+In order to report ownership to userspace the kernel uses the crossmapping
+algorithm introduced in a previous section:
+
+1. Map the userspace id on disk down into a kernel id in the filesystem's
+   idmapping::
+
+    make_kuid(u0:k0:r4294967295, u1000) = k1000
+
+2. Map the kernel id up into a userspace id in the caller's idmapping::
+
+    from_kuid(u0:k10000:r10000, k1000) = u-1
+
+The crossmapping algorithm fails in this case because the kernel id in the
+filesystem idmapping cannot be mapped up to a userspace id in the caller's
+idmapping. Thus, the kernel will report the ownership of this file as the
+overflowid.
+
+Example 5
+~~~~~~~~~
+
+::
+
+ file id:              u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k20000:r10000
+
+In order to report ownership to userspace the kernel uses the crossmapping
+algorithm introduced in a previous section:
+
+1. Map the userspace id on disk down into a kernel id in the filesystem's
+   idmapping::
+
+    make_kuid(u0:k20000:r10000, u1000) = k21000
+
+2. Map the kernel id up into a userspace id in the caller's idmapping::
+
+    from_kuid(u0:k10000:r10000, k21000) = u-1
+
+Again, the crossmapping algorithm fails in this case because the kernel id in
+the filesystem idmapping cannot be mapped to a userspace id in the caller's
+idmapping. Thus, the kernel will report the ownership of this file as the
+overflowid.
+
+Note how in the last two examples things would be simple if the caller would be
+using the initial idmapping. For a filesystem mounted with the initial
+idmapping it would be trivial. So we only consider a filesystem with an
+idmapping of ``u0:k20000:r10000``:
+
+1. Map the userspace id on disk down into a kernel id in the filesystem's
+   idmapping::
+
+    make_kuid(u0:k20000:r10000, u1000) = k21000
+
+2. Map the kernel id up into a userspace id in the caller's idmapping::
+
+    from_kuid(u0:k0:r4294967295, k21000) = u21000
+
+Idmappings on idmapped mounts
+-----------------------------
+
+The examples we've seen in the previous section where the caller's idmapping
+and the filesystem's idmapping are incompatible causes various issues for
+workloads. For a more complex but common example, consider two containers
+started on the host. To completely prevent the two containers from affecting
+each other, an administrator may often use different non-overlapping idmappings
+for the two containers::
+
+ container1 idmapping:  u0:k10000:r10000
+ container2 idmapping:  u0:k20000:r10000
+ filesystem idmapping:  u0:k30000:r10000
+
+An administrator wanting to provide easy read-write access to the following set
+of files::
+
+ dir id:       u0
+ dir/file1 id: u1000
+ dir/file2 id: u2000
+
+to both containers currently can't.
+
+Of course the administrator has the option to recursively change ownership via
+``chown()``. For example, they could change ownership so that ``dir`` and all
+files below it can be crossmapped from the filesystem's into the container's
+idmapping. Let's assume they change ownership so it is compatible with the
+first container's idmapping::
+
+ dir id:       u10000
+ dir/file1 id: u11000
+ dir/file2 id: u12000
+
+This would still leave ``dir`` rather useless to the second container. In fact,
+``dir`` and all files below it would continue to appear owned by the overflowid
+for the second container.
+
+Or consider another increasingly popular example. Some service managers such as
+systemd implement a concept called "portable home directories". A user may want
+to use their home directories on different machines where they are assigned
+different login userspace ids. Most users will have ``u1000`` as the login id
+on their machine at home and all files in their home directory will usually be
+owned by ``u1000``. At uni or at work they may have another login id such as
+``u1125``. This makes it rather difficult to interact with their home directory
+on their work machine.
+
+In both cases changing ownership recursively has grave implications. The most
+obvious one is that ownership is changed globally and permanently. In the home
+directory case this change in ownership would even need to happen everytime the
+user switches from their home to their work machine. For really large sets of
+files this becomes increasingly costly.
+
+If the user is lucky, they are dealing with a filesystem that is mountable
+inside user namespaces. But this would also change ownership globally and the
+change in ownership is tied to the lifetime of the filesystem mount, i.e. the
+superblock. The only way to change ownership is to completely unmount the
+filesystem and mount it again in another user namespace. This is usually
+impossible because it would mean that all users currently accessing the
+filesystem can't anymore. And it means that ``dir`` still can't be shared
+between two containers with different idmappings.
+But usually the user doesn't even have this option since most filesystems
+aren't mountable inside containers. And not having them mountable might be
+desirable as it doesn't require the filesystem to deal with malicious
+filesystem images.
+
+But the usecases mentioned above and more can be handled by idmapped mounts.
+They allow to expose the same set of dentries with different ownership at
+different mounts. This is achieved by marking the mounts with a user namespace
+through the ``mount_setattr()`` system call. The idmapping associated with it
+is then used to translate from the caller's idmapping to the filesystem's
+idmapping and vica versa using the remapping algorithm we introduced above.
+
+Idmapped mounts make it possible to change ownership in a temporary and
+localized way. The ownership changes are restricted to a specific mount and the
+ownership changes are tied to the lifetime of the mount. All other users and
+locations where the filesystem is exposed are unaffected.
+
+Filesystems that support idmapped mounts don't have any real reason to support
+being mountable inside user namespaces. A filesystem could be exposed
+completely under an idmapped mount to get the same effect. This has the
+advantage that filesystems can leave the creation of the superblock to
+privileged users in the initial user namespace.
+
+However, it is perfectly possible to combine idmapped mounts with filesystems
+mountable inside user namespaces. We will touch on this further below.
+
+Remapping helpers
+~~~~~~~~~~~~~~~~~
+
+Idmapping functions were added that translate between idmappings. They make use
+of the remapping algorithm we've introduced earlier. We're going to look at
+two:
+
+- ``i_uid_into_mnt()`` and ``i_gid_into_mnt()``
+
+  The ``i_*id_into_mnt()`` functions translate filesystem's kernel ids into
+  kernel ids in the mount's idmapping::
+
+   /* Map the filesystem's kernel id up into a userspace id in the filesystem's idmapping. */
+   from_kuid(filesystem, kid) = uid
+
+   /* Map the filesystem's userspace id down ito a kernel id in the mount's idmapping. */
+   make_kuid(mount, uid) = kuid
+
+- ``mapped_fsuid()`` and ``mapped_fsgid()``
+
+  The ``mapped_fs*id()`` functions translate the caller's kernel ids into
+  kernel ids in the filesystem's idmapping. This translation is achieved by
+  remapping the caller's kernel ids using the mount's idmapping::
+
+   /* Map the caller's kernel id up into a userspace id in the mount's idmapping. */
+   from_kuid(mount, kid) = uid
+
+   /* Map the mount's userspace id down into a kernel id in the filesystem's idmapping. */
+   make_kuid(filesystem, uid) = kuid
+
+Note that these two functions invert each other. Consider the following
+idmappings::
+
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k20000:r10000
+ mount idmapping:      u0:k10000:r10000
+
+Assume a file owned by ``u1000`` is read from disk. The filesystem maps this id
+to ``k21000`` according to it's idmapping. This is what is stored in the
+inode's ``i_uid`` and ``i_gid`` fields.
+
+When the caller queries the ownership of this file via ``stat()`` the kernel
+would usually simply use the crossmapping algorithm and map the filesystem's
+kernel id up to a userspace id in the caller's idmapping.
+
+But when the caller is accessing the file on an idmapped mount the kernel will
+first call ``i_uid_into_mnt()`` thereby translating the filesystem's kernel id
+into a kernel id in the mount's idmapping::
+
+ i_uid_into_mnt(k21000):
+   /* Map the filesystem's kernel id up into a userspace id. */
+   from_kuid(u0:k20000:r10000, k21000) = u1000
+
+   /* Map the filesystem's userspace id down ito a kernel id in the mount's idmapping. */
+   make_kuid(u0:k10000:r10000, u1000) = k11000
+
+Finally, when the kernel reports the owner to the caller it will turn the
+kernel id in the mount's idmapping into a userspace id in the caller's
+idmapping::
+
+  from_kuid(u0:k10000:r10000, k11000) = u1000
+
+We can test whether this algorithm really works by verifying what happens when
+we create a new file. Let's say the user is creating a file with ``u1000``.
+
+The kernel maps this to ``k11000`` in the caller's idmapping. Usually the
+kernel would now apply the crossmapping, verifying that ``k11000`` can be
+mapped to a userspace id in the filesystem's idmapping. Since ``k11000`` can't
+be mapped up in the filesystem's idmapping directly this creation request
+fails.
+
+But when the caller is accessing the file on an idmapped mount the kernel will
+first call ``mapped_fs*id()`` thereby translating the caller's kernel id into
+a kernel id according to the mount's idmapping::
+
+ mapped_fsuid(k11000):
+    /* Map the caller's kernel id up into a userspace id in the mount's idmapping. */
+    from_kuid(u0:k10000:r10000, k11000) = u1000
+
+    /* Map the mount's userspace id down into a kernel id in the filesystem's idmapping. */
+    make_kuid(u0:k20000:r10000, u1000) = k21000
+
+When finally writing to disk the kernel will then map ``k21000`` up into a
+userspace id in the filesystem's idmapping::
+
+   from_kuid(u0:k20000:r10000, k21000) = u1000
+
+As we can see, we end up with an invertible and therefore information
+preserving algorithm. A file created from ``u1000`` on an idmapped mount will
+also be reported as being owned by ``u1000`` and vica versa.
+
+Let's now briefly reconsider the failing examples from earlier in the context
+of idmapped mounts.
+
+Example 2 reconsidered
+~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ caller id:            u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k20000:r10000
+ mount idmapping:      u0:k10000:r10000
+
+When the caller is using a non-initial idmapping the common case is to attach
+the same idmapping to the mount. We now perform three steps:
+
+1. Map the caller's userspace ids into kernel ids in the caller's idmapping::
+
+    make_kuid(u0:k10000:r10000, u1000) = k11000
+
+2. Translate the caller's kernel id into a kernel id in the filesystem's
+   idmapping::
+
+    mapped_fsuid(k11000):
+      /* Map the kernel id up into a userspace id in the mount's idmapping. */
+      from_kuid(u0:k10000:r10000, k11000) = u1000
+
+      /* Map the userspace id down into a kernel id in the filesystem's idmapping. */
+      make_kuid(u0:k20000:r10000, u1000) = k21000
+
+2. Verify that the caller's kernel ids can be mapped to userspace ids in the
+   filesystem's idmapping::
+
+    from_kuid(u0:k20000:r10000, k21000) = u1000
+
+So the ownership that lands on disk will be ``u1000``.
+
+Example 3 reconsidered
+~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ caller id:            u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k0:r4294967295
+ mount idmapping:      u0:k10000:r10000
+
+The same translation algorithm works with the third example.
+
+1. Map the caller's userspace ids into kernel ids in the caller's idmapping::
+
+    make_kuid(u0:k10000:r10000, u1000) = k11000
+
+2. Translate the caller's kernel id into a kernel id in the filesystem's
+   idmapping::
+
+    mapped_fsuid(k11000):
+       /* Map the kernel id up into a userspace id in the mount's idmapping. */
+       from_kuid(u0:k10000:r10000, k11000) = u1000
+
+       /* Map the userspace id down into a kernel id in the filesystem's idmapping. */
+       make_kuid(u0:k0:r4294967295, u1000) = k1000
+
+2. Verify that the caller's kernel ids can be mapped to userspace ids in the
+   filesystem's idmapping::
+
+    from_kuid(u0:k0:r4294967295, k21000) = u1000
+
+So the ownership that lands on disk will be ``u1000``.
+
+Example 4 reconsidered
+~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ file id:              u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k0:r4294967295
+ mount idmapping:      u0:k10000:r10000
+
+In order to report ownership to userspace the kernel now does three steps using
+the translation algorithm we introduced earlier:
+
+1. Map the userspace id on disk down into a kernel id in the filesystem's
+   idmapping::
+
+    make_kuid(u0:k0:r4294967295, u1000) = k1000
+
+2. Translate the kernel id into a kernel id in the mount's idmapping::
+
+    i_uid_into_mnt(k1000):
+      /* Map the kernel id up into a userspace id in the filesystem's idmapping. */
+      from_kuid(u0:k0:r4294967295, k1000) = u1000
+
+      /* Map the userspace id down into a kernel id in the mounts's idmapping. */
+      make_kuid(u0:k10000:r10000, u1000) = k11000
+
+3. Map the kernel id up into a userspace id in the caller's idmapping::
+
+    from_kuid(u0:k10000:r10000, k11000) = u1000
+
+Earlier, the caller's kernel id couldn't be crossmapped in the filesystems's
+idmapping. With the idmapped mount in place it now can be crossmapped into the
+filesystem's idmapping via the mount's idmapping. The file will now be created
+with ``u1000`` according to the mount's idmapping.
+
+Example 5 reconsidered
+~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ file id:              u1000
+ caller idmapping:     u0:k10000:r10000
+ filesystem idmapping: u0:k20000:r10000
+ mount idmapping:      u0:k10000:r10000
+
+Again, in order to report ownership to userspace the kernel now does three
+steps using the translation algorithm we introduced earlier:
+
+1. Map the userspace id on disk down into a kernel id in the filesystem's
+   idmapping::
+
+    make_kuid(u0:k20000:r10000, u1000) = k21000
+
+2. Translate the kernel id into a kernel id in the mount's idmapping::
+
+    i_uid_into_mnt(k21000):
+      /* Map the kernel id up into a userspace id in the filesystem's idmapping. */
+      from_kuid(u0:k20000:r10000, k21000) = u1000
+
+      /* Map the userspace id down into a kernel id in the mounts's idmapping. */
+      make_kuid(u0:k10000:r10000, u1000) = k11000
+
+3. Map the kernel id up into a userspace id in the caller's idmapping::
+
+    from_kuid(u0:k10000:r10000, k11000) = u1000
+
+Earlier, the file's kernel id couldn't be crossmapped in the filesystems's
+idmapping. With the idmapped mount in place it now can be crossmapped into the
+filesystem's idmapping via the mount's idmapping. The file is now owned by
+``u1000`` according to the mount's idmapping.
+
+Changing ownership on a home directory
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We've seen above how idmapped mounts can be used to translate between
+idmappings when either the caller, the filesystem or both uses a non-initial
+idmapping. A wide range of usecases exist when the caller is using
+a non-initial idmapping. This mostly happens in the context of containerized
+workloads. The consequence is as we have seen that for both, filesystem's
+mounted with the initial idmapping and filesystems mounted with non-initial
+idmappings, access to the filesystem isn't working because the kernel ids can't
+be crossmapped between the caller's and the filesystem's idmapping.
+
+As we've seen above idmapped mounts provide a solution to this by remapping the
+caller's or filesystem's idmapping according to the mount's idmapping.
+
+Aside from containerized workloads, idmapped mounts have the advantage that
+they also work when both the caller and the filesystem use the initial
+idmapping which means users on the host can change the ownership of directories
+and files on a per-mount basis.
+
+Consider our previous example where a user has their home directory on portable
+storage. At home they have id ``u1000`` and all files in their home directory
+are owned by ``u1000`` whereas at uni or work they have login id ``u1125``.
+
+Taking their home directory with them becomes problematic. They can't easily
+access their files, they might not be able to write to disk without applying
+lax permissions or ACLs and even if they can, they will end up with an annoying
+mix of files and directories owned by ``u1000`` and ``u1125``.
+
+Idmapped mounts allow to solve this problem. A user can create an idmapped
+mount for their home directory on their work computer or their computer at home
+depending on what ownership they would prefer to end up on the portable storage
+itself.
+
+Let's assume they want all files on disk to belong to ``u1000``. When the user
+plugs in their portable storage at their work station they can setup a job that
+creates an idmapped mount with the minimal idmapping ``u1000:k1125:r1``. So now
+when they create a file the kernel performs the following steps we already know
+from above:::
+
+ caller id:            u1125
+ caller idmapping:     u0:k0:r4294967295
+ filesystem idmapping: u0:k0:r4294967295
+ mount idmapping:      u1000:k1125:r1
+
+1. Map the caller's userspace ids into kernel ids in the caller's idmapping::
+
+    make_kuid(u0:k0:r4294967295, u1125) = k1125
+
+2. Translate the caller's kernel id into a kernel id in the filesystem's
+   idmapping::
+
+    mapped_fsuid(k1125):
+      /* Map the kernel id up into a userspace id in the mount's idmapping. */
+      from_kuid(u1000:k1125:r1, k1125) = u1000
+
+      /* Map the userspace id down into a kernel id in the filesystem's idmapping. */
+      make_kuid(u0:k0:r4294967295, u1000) = k1000
+
+2. Verify that the caller's kernel ids can be mapped to userspace ids in the
+   filesystem's idmapping::
+
+    from_kuid(u0:k0:r4294967295, k1000) = u1000
+
+So ultimately the file will be created with ``u1000`` on disk.
+
+Now let's briefly look at what ownership the caller with id ``u1125`` will see
+on their work computer:
+
+::
+
+ file id:              u1000
+ caller idmapping:     u0:k0:r4294967295
+ filesystem idmapping: u0:k0:r4294967295
+ mount idmapping:      u1000:k1125:r1
+
+1. Map the userspace id on disk down into a kernel id in the filesystem's
+   idmapping::
+
+    make_kuid(u0:k0:r4294967295, u1000) = k1000
+
+2. Translate the kernel id into a kernel id in the mount's idmapping::
+
+    i_uid_into_mnt(k1000):
+      /* Map the kernel id up into a userspace id in the filesystem's idmapping. */
+      from_kuid(u0:k0:r4294967295, k1000) = u1000
+
+      /* Map the userspace id down into a kernel id in the mounts's idmapping. */
+      make_kuid(u1000:k1125:r1, u1000) = k1125
+
+3. Map the kernel id up into a userspace id in the caller's idmapping::
+
+    from_kuid(u0:k0:r4294967295, k1125) = u1125
+
+So ultimately the caller will be reported that the file belongs to ``u1125``
+which is the caller's userspace id on their workstation in our example.
+
+The raw userspace id that is put on disk is ``u1000`` so when the user takes
+their home directory back to their home computer where they are assigned
+``u1000`` using the initial idmapping and mount the filesystem with the initial
+idmapping they will see all those files owned by ``u1000``.
+
+Shortcircuting
+--------------
+
+Currently, the implementation of idmapped mounts enforces that the filesystem
+is mounted with the initial idmapping. The reason is simply that none of the
+filesystems that we targeted were mountable with a non-initial idmapping. But
+that might change soon enough. As we've seen above, thanks to the properties of
+idmappings the translation works for both filesystems mounted with the initial
+idmapping and filesystem with non-initial idmappings.
+
+Based on this current restriction to filesystem mounted with the initial
+idmapping two noticeable shortcuts have been taken:
+
+1. We always stash a reference to the initial user namespace in ``struct
+   vfsmount``. Idmapped mounts are thus mounts that have a non-initial user
+   namespace attached to them.
+
+   In order to support idmapped mounts this needs to be changed. Instead of
+   stashing the initial user namespace the user namespace the filesystem was
+   mounted with must be stashed. An idmapped mount is then any mount that has
+   a different user namespace attached then the filesystem was mounted with.
+   This has no user-visible consequences.
+
+2. The translation algorithms in ``mapped_fs*id()`` and ``i_*id_into_mnt()``
+   are simplified.
+
+   Let's consider ``mapped_fs*id()`` first. This function translates the
+   caller's kernel id into a kernel id in the filesystem's idmapping via
+   a mount's idmapping. The full algorithm is::
+
+    mapped_fsuid(kid):
+      /* Map the kernel id up into a userspace id in the mount's idmapping. */
+      from_kuid(mount-idmapping, kid) = uid
+
+      /* Map the userspace id down into a kernel id in the filesystem's idmapping. */
+      make_kuid(filesystem-idmapping, uid) = kuid
+
+   We know that the filesystem is always mounted with the initial idmapping as
+   we enforce this in ``mount_setattr()``. So this can be shortened to::
+
+    mapped_fsuid(kid):
+      /* Map the kernel id up into a userspace id in the mount's idmapping. */
+      from_kuid(mount-idmapping, kid) = uid
+
+      /* Map the userspace id down into a kernel id in the filesystem's idmapping. */
+      KUIDT_INIT(uid) = kuid
+
+   Similarly, for ``i_*id_into_mnt()`` which translated the filesystem's kernel
+   id into a mount's kernel id::
+
+    i_uid_into_mnt(kid):
+      /* Map the kernel id up into a userspace id in the filesystem's idmapping. */
+      from_kuid(filesystem-idmapping, kid) = uid
+
+      /* Map the userspace id down into a kernel id in the mounts's idmapping. */
+      make_kuid(mount-idmapping, uid) = kuid
+
+   Again, we know that the filesystem is always mounted with the initial
+   idmapping as we enforce this in ``mount_setattr()``. So this can be
+   shortened to::
+
+    i_uid_into_mnt(kid):
+      /* Map the kernel id up into a userspace id in the filesystem's idmapping. */
+      __kuid_val(kid) = uid
+
+      /* Map the userspace id down into a kernel id in the mounts's idmapping. */
+      make_kuid(mount-idmapping, uid) = kuid
+
+Handling filesystems mounted with non-initial idmappings requires that the
+translation functions be converted to their full form. They can still be
+shortcircuited on non-idmapped mounts. This has no user-visible consequences.
index 246af51..1a2dd4d 100644 (file)
@@ -34,6 +34,7 @@ algorithms work.
    quota
    seq_file
    sharedsubtree
+   idmappings
 
    automount-support
 
@@ -72,7 +73,7 @@ Documentation for filesystem implementations.
    befs
    bfs
    btrfs
-   cifs/cifsroot
+   cifs/index
    ceph
    coda
    configfs
index 2183fd8..2a75dd5 100644 (file)
@@ -271,19 +271,19 @@ prototypes::
 locking rules:
        All except set_page_dirty and freepage may block
 
-====================== ======================== =========
-ops                    PageLocked(page)         i_rwsem
-====================== ======================== =========
+====================== ======================== =========      ===============
+ops                    PageLocked(page)         i_rwsem        invalidate_lock
+====================== ======================== =========      ===============
 writepage:             yes, unlocks (see below)
-readpage:              yes, unlocks
+readpage:              yes, unlocks                            shared
 writepages:
 set_page_dirty         no
-readahead:             yes, unlocks
-readpages:             no
+readahead:             yes, unlocks                            shared
+readpages:             no                                      shared
 write_begin:           locks the page           exclusive
 write_end:             yes, unlocks             exclusive
 bmap:
-invalidatepage:                yes
+invalidatepage:                yes                                     exclusive
 releasepage:           yes
 freepage:              yes
 direct_IO:
@@ -295,7 +295,7 @@ is_partially_uptodate:      yes
 error_remove_page:     yes
 swap_activate:         no
 swap_deactivate:       no
-====================== ======================== =========
+====================== ======================== =========      ===============
 
 ->write_begin(), ->write_end() and ->readpage() may be called from
 the request handler (/dev/loop).
@@ -378,7 +378,10 @@ keep it that way and don't breed new callers.
 ->invalidatepage() is called when the filesystem must attempt to drop
 some or all of the buffers from the page when it is being truncated. It
 returns zero on success. If ->invalidatepage is zero, the kernel uses
-block_invalidatepage() instead.
+block_invalidatepage() instead. The filesystem must exclusively acquire
+invalidate_lock before invalidating page cache in truncate / hole punch path
+(and thus calling into ->invalidatepage) to block races between page cache
+invalidation and page cache filling functions (fault, read, ...).
 
 ->releasepage() is called when the kernel is about to try to drop the
 buffers from the page in preparation for freeing it.  It returns zero to
@@ -506,6 +509,7 @@ prototypes::
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
+       int (*iopoll) (struct kiocb *kiocb, bool spin);
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
@@ -518,12 +522,6 @@ prototypes::
        int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
        int (*fasync) (int, struct file *, int);
        int (*lock) (struct file *, int, struct file_lock *);
-       ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
-                       loff_t *);
-       ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
-                       loff_t *);
-       ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
-                       void __user *);
        ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
                        loff_t *, int);
        unsigned long (*get_unmapped_area)(struct file *, unsigned long,
@@ -536,6 +534,14 @@ prototypes::
                        size_t, unsigned int);
        int (*setlease)(struct file *, long, struct file_lock **, void **);
        long (*fallocate)(struct file *, int, loff_t, loff_t);
+       void (*show_fdinfo)(struct seq_file *m, struct file *f);
+       unsigned (*mmap_capabilities)(struct file *);
+       ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
+                       loff_t, size_t, unsigned int);
+       loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+                       struct file *file_out, loff_t pos_out,
+                       loff_t len, unsigned int remap_flags);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 
 locking rules:
        All may block.
@@ -570,6 +576,25 @@ in sys_read() and friends.
 the lease within the individual filesystem to record the result of the
 operation
 
+->fallocate implementation must be really careful to maintain page cache
+consistency when punching holes or performing other operations that invalidate
+page cache contents. Usually the filesystem needs to call
+truncate_inode_pages_range() to invalidate relevant range of the page cache.
+However the filesystem usually also needs to update its internal (and on disk)
+view of file offset -> disk block mapping. Until this update is finished, the
+filesystem needs to block page faults and reads from reloading now-stale page
+cache contents from the disk. Since VFS acquires mapping->invalidate_lock in
+shared mode when loading pages from disk (filemap_fault(), filemap_read(),
+readahead paths), the fallocate implementation must take the invalidate_lock to
+prevent reloading.
+
+->copy_file_range and ->remap_file_range implementations need to serialize
+against modifications of file data while the operation is running. For
+blocking changes through write(2) and similar operations inode->i_rwsem can be
+used. To block changes to file contents via a memory mapping during the
+operation, the filesystem must take mapping->invalidate_lock to coordinate
+with ->page_mkwrite.
+
 dquot_operations
 ================
 
@@ -627,11 +652,11 @@ pfn_mkwrite:      yes
 access:                yes
 =============  =========       ===========================
 
-->fault() is called when a previously not present pte is about
-to be faulted in. The filesystem must find and return the page associated
-with the passed in "pgoff" in the vm_fault structure. If it is possible that
-the page may be truncated and/or invalidated, then the filesystem must lock
-the page, then ensure it is not already truncated (the page lock will block
+->fault() is called when a previously not present pte is about to be faulted
+in. The filesystem must find and return the page associated with the passed in
+"pgoff" in the vm_fault structure. If it is possible that the page may be
+truncated and/or invalidated, then the filesystem must lock invalidate_lock,
+then ensure the page is not already truncated (invalidate_lock will block
 subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
 locked. The VM will unlock the page.
 
@@ -644,12 +669,14 @@ page table entry. Pointer to entry associated with the page is passed in
 "pte" field in vm_fault structure. Pointers to entries for other offsets
 should be calculated relative to "pte".
 
-->page_mkwrite() is called when a previously read-only pte is
-about to become writeable. The filesystem again must ensure that there are
-no truncate/invalidate races, and then return with the page locked. If
-the page has been truncated, the filesystem should not look up a new page
-like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
-will cause the VM to retry the fault.
+->page_mkwrite() is called when a previously read-only pte is about to become
+writeable. The filesystem again must ensure that there are no
+truncate/invalidate races or races with operations such as ->remap_file_range
+or ->copy_file_range, and then return with the page locked. Usually
+mapping->invalidate_lock is suitable for proper serialization. If the page has
+been truncated, the filesystem should not look up a new page like the ->fault()
+handler, but simply return with VM_FAULT_NOPAGE, which will cause the VM to
+retry the fault.
 
 ->pfn_mkwrite() is the same as page_mkwrite but when the pte is
 VM_PFNMAP or VM_MIXEDMAP with a page-less entry. Expected return is
diff --git a/Documentation/filesystems/mandatory-locking.rst b/Documentation/filesystems/mandatory-locking.rst
deleted file mode 100644 (file)
index 9ce7354..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-=====================================================
-Mandatory File Locking For The Linux Operating System
-=====================================================
-
-               Andy Walker <andy@lysaker.kvaerner.no>
-
-                          15 April 1996
-
-                    (Updated September 2007)
-
-0. Why you should avoid mandatory locking
------------------------------------------
-
-The Linux implementation is prey to a number of difficult-to-fix race
-conditions which in practice make it not dependable:
-
-       - The write system call checks for a mandatory lock only once
-         at its start.  It is therefore possible for a lock request to
-         be granted after this check but before the data is modified.
-         A process may then see file data change even while a mandatory
-         lock was held.
-       - Similarly, an exclusive lock may be granted on a file after
-         the kernel has decided to proceed with a read, but before the
-         read has actually completed, and the reading process may see
-         the file data in a state which should not have been visible
-         to it.
-       - Similar races make the claimed mutual exclusion between lock
-         and mmap similarly unreliable.
-
-1. What is  mandatory locking?
-------------------------------
-
-Mandatory locking is kernel enforced file locking, as opposed to the more usual
-cooperative file locking used to guarantee sequential access to files among
-processes. File locks are applied using the flock() and fcntl() system calls
-(and the lockf() library routine which is a wrapper around fcntl().) It is
-normally a process' responsibility to check for locks on a file it wishes to
-update, before applying its own lock, updating the file and unlocking it again.
-The most commonly used example of this (and in the case of sendmail, the most
-troublesome) is access to a user's mailbox. The mail user agent and the mail
-transfer agent must guard against updating the mailbox at the same time, and
-prevent reading the mailbox while it is being updated.
-
-In a perfect world all processes would use and honour a cooperative, or
-"advisory" locking scheme. However, the world isn't perfect, and there's
-a lot of poorly written code out there.
-
-In trying to address this problem, the designers of System V UNIX came up
-with a "mandatory" locking scheme, whereby the operating system kernel would
-block attempts by a process to write to a file that another process holds a
-"read" -or- "shared" lock on, and block attempts to both read and write to a 
-file that a process holds a "write " -or- "exclusive" lock on.
-
-The System V mandatory locking scheme was intended to have as little impact as
-possible on existing user code. The scheme is based on marking individual files
-as candidates for mandatory locking, and using the existing fcntl()/lockf()
-interface for applying locks just as if they were normal, advisory locks.
-
-.. Note::
-
-   1. In saying "file" in the paragraphs above I am actually not telling
-      the whole truth. System V locking is based on fcntl(). The granularity of
-      fcntl() is such that it allows the locking of byte ranges in files, in
-      addition to entire files, so the mandatory locking rules also have byte
-      level granularity.
-
-   2. POSIX.1 does not specify any scheme for mandatory locking, despite
-      borrowing the fcntl() locking scheme from System V. The mandatory locking
-      scheme is defined by the System V Interface Definition (SVID) Version 3.
-
-2. Marking a file for mandatory locking
----------------------------------------
-
-A file is marked as a candidate for mandatory locking by setting the group-id
-bit in its file mode but removing the group-execute bit. This is an otherwise
-meaningless combination, and was chosen by the System V implementors so as not
-to break existing user programs.
-
-Note that the group-id bit is usually automatically cleared by the kernel when
-a setgid file is written to. This is a security measure. The kernel has been
-modified to recognize the special case of a mandatory lock candidate and to
-refrain from clearing this bit. Similarly the kernel has been modified not
-to run mandatory lock candidates with setgid privileges.
-
-3. Available implementations
-----------------------------
-
-I have considered the implementations of mandatory locking available with
-SunOS 4.1.x, Solaris 2.x and HP-UX 9.x.
-
-Generally I have tried to make the most sense out of the behaviour exhibited
-by these three reference systems. There are many anomalies.
-
-All the reference systems reject all calls to open() for a file on which
-another process has outstanding mandatory locks. This is in direct
-contravention of SVID 3, which states that only calls to open() with the
-O_TRUNC flag set should be rejected. The Linux implementation follows the SVID
-definition, which is the "Right Thing", since only calls with O_TRUNC can
-modify the contents of the file.
-
-HP-UX even disallows open() with O_TRUNC for a file with advisory locks, not
-just mandatory locks. That would appear to contravene POSIX.1.
-
-mmap() is another interesting case. All the operating systems mentioned
-prevent mandatory locks from being applied to an mmap()'ed file, but  HP-UX
-also disallows advisory locks for such a file. SVID actually specifies the
-paranoid HP-UX behaviour.
-
-In my opinion only MAP_SHARED mappings should be immune from locking, and then
-only from mandatory locks - that is what is currently implemented.
-
-SunOS is so hopeless that it doesn't even honour the O_NONBLOCK flag for
-mandatory locks, so reads and writes to locked files always block when they
-should return EAGAIN.
-
-I'm afraid that this is such an esoteric area that the semantics described
-below are just as valid as any others, so long as the main points seem to
-agree. 
-
-4. Semantics
-------------
-
-1. Mandatory locks can only be applied via the fcntl()/lockf() locking
-   interface - in other words the System V/POSIX interface. BSD style
-   locks using flock() never result in a mandatory lock.
-
-2. If a process has locked a region of a file with a mandatory read lock, then
-   other processes are permitted to read from that region. If any of these
-   processes attempts to write to the region it will block until the lock is
-   released, unless the process has opened the file with the O_NONBLOCK
-   flag in which case the system call will return immediately with the error
-   status EAGAIN.
-
-3. If a process has locked a region of a file with a mandatory write lock, all
-   attempts to read or write to that region block until the lock is released,
-   unless a process has opened the file with the O_NONBLOCK flag in which case
-   the system call will return immediately with the error status EAGAIN.
-
-4. Calls to open() with O_TRUNC, or to creat(), on a existing file that has
-   any mandatory locks owned by other processes will be rejected with the
-   error status EAGAIN.
-
-5. Attempts to apply a mandatory lock to a file that is memory mapped and
-   shared (via mmap() with MAP_SHARED) will be rejected with the error status
-   EAGAIN.
-
-6. Attempts to create a shared memory map of a file (via mmap() with MAP_SHARED)
-   that has any mandatory locks in effect will be rejected with the error status
-   EAGAIN.
-
-5. Which system calls are affected?
------------------------------------
-
-Those which modify a file's contents, not just the inode. That gives read(),
-write(), readv(), writev(), open(), creat(), mmap(), truncate() and
-ftruncate(). truncate() and ftruncate() are considered to be "write" actions
-for the purposes of mandatory locking.
-
-The affected region is usually defined as stretching from the current position
-for the total number of bytes read or written. For the truncate calls it is
-defined as the bytes of a file removed or added (we must also consider bytes
-added, as a lock can specify just "the whole file", rather than a specific
-range of bytes.)
-
-Note 3: I may have overlooked some system calls that need mandatory lock
-checking in my eagerness to get this code out the door. Please let me know, or
-better still fix the system calls yourself and submit a patch to me or Linus.
-
-6. Warning!
------------
-
-Not even root can override a mandatory lock, so runaway processes can wreak
-havoc if they lock crucial files. The way around it is to change the file
-permissions (remove the setgid bit) before trying to read or write to it.
-Of course, that might be a bit tricky if the system is hung :-(
-
-7. The "mand" mount option
---------------------------
-Mandatory locking is disabled on all filesystems by default, and must be
-administratively enabled by mounting with "-o mand". That mount option
-is only allowed if the mounting task has the CAP_SYS_ADMIN capability.
-
-Since kernel v4.5, it is possible to disable mandatory locking
-altogether by setting CONFIG_MANDATORY_FILE_LOCKING to "n". A kernel
-with this disabled will reject attempts to mount filesystems with the
-"mand" mount option with the error status EPERM.
index 675ba86..b421a3c 100644 (file)
@@ -18,114 +18,5 @@ real, with all the uAPI bits is:
         * Route shmem backend over to TTM SYSTEM for discrete
         * TTM purgeable object support
         * Move i915 buddy allocator over to TTM
-        * MMAP ioctl mode(see `I915 MMAP`_)
-        * SET/GET ioctl caching(see `I915 SET/GET CACHING`_)
 * Send RFC(with mesa-dev on cc) for final sign off on the uAPI
 * Add pciid for DG1 and turn on uAPI for real
-
-New object placement and region query uAPI
-==========================================
-Starting from DG1 we need to give userspace the ability to allocate buffers from
-device local-memory. Currently the driver supports gem_create, which can place
-buffers in system memory via shmem, and the usual assortment of other
-interfaces, like dumb buffers and userptr.
-
-To support this new capability, while also providing a uAPI which will work
-beyond just DG1, we propose to offer three new bits of uAPI:
-
-DRM_I915_QUERY_MEMORY_REGIONS
------------------------------
-New query ID which allows userspace to discover the list of supported memory
-regions(like system-memory and local-memory) for a given device. We identify
-each region with a class and instance pair, which should be unique. The class
-here would be DEVICE or SYSTEM, and the instance would be zero, on platforms
-like DG1.
-
-Side note: The class/instance design is borrowed from our existing engine uAPI,
-where we describe every physical engine in terms of its class, and the
-particular instance, since we can have more than one per class.
-
-In the future we also want to expose more information which can further
-describe the capabilities of a region.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_memory_class drm_i915_gem_memory_class_instance drm_i915_memory_region_info drm_i915_query_memory_regions
-
-GEM_CREATE_EXT
---------------
-New ioctl which is basically just gem_create but now allows userspace to provide
-a chain of possible extensions. Note that if we don't provide any extensions and
-set flags=0 then we get the exact same behaviour as gem_create.
-
-Side note: We also need to support PXP[1] in the near future, which is also
-applicable to integrated platforms, and adds its own gem_create_ext extension,
-which basically lets userspace mark a buffer as "protected".
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_create_ext
-
-I915_GEM_CREATE_EXT_MEMORY_REGIONS
-----------------------------------
-Implemented as an extension for gem_create_ext, we would now allow userspace to
-optionally provide an immutable list of preferred placements at creation time,
-in priority order, for a given buffer object.  For the placements we expect
-them each to use the class/instance encoding, as per the output of the regions
-query. Having the list in priority order will be useful in the future when
-placing an object, say during eviction.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_create_ext_memory_regions
-
-One fair criticism here is that this seems a little over-engineered[2]. If we
-just consider DG1 then yes, a simple gem_create.flags or something is totally
-all that's needed to tell the kernel to allocate the buffer in local-memory or
-whatever. However looking to the future we need uAPI which can also support
-upcoming Xe HP multi-tile architecture in a sane way, where there can be
-multiple local-memory instances for a given device, and so using both class and
-instance in our uAPI to describe regions is desirable, although specifically
-for DG1 it's uninteresting, since we only have a single local-memory instance.
-
-Existing uAPI issues
-====================
-Some potential issues we still need to resolve.
-
-I915 MMAP
----------
-In i915 there are multiple ways to MMAP GEM object, including mapping the same
-object using different mapping types(WC vs WB), i.e multiple active mmaps per
-object. TTM expects one MMAP at most for the lifetime of the object. If it
-turns out that we have to backpedal here, there might be some potential
-userspace fallout.
-
-I915 SET/GET CACHING
---------------------
-In i915 we have set/get_caching ioctl. TTM doesn't let us to change this, but
-DG1 doesn't support non-snooped pcie transactions, so we can just always
-allocate as WB for smem-only buffers.  If/when our hw gains support for
-non-snooped pcie transactions then we must fix this mode at allocation time as
-a new GEM extension.
-
-This is related to the mmap problem, because in general (meaning, when we're
-not running on intel cpus) the cpu mmap must not, ever, be inconsistent with
-allocation mode.
-
-Possible idea is to let the kernel picks the mmap mode for userspace from the
-following table:
-
-smem-only: WB. Userspace does not need to call clflush.
-
-smem+lmem: We only ever allow a single mode, so simply allocate this as uncached
-memory, and always give userspace a WC mapping. GPU still does snooped access
-here(assuming we can't turn it off like on DG1), which is a bit inefficient.
-
-lmem only: always WC
-
-This means on discrete you only get a single mmap mode, all others must be
-rejected. That's probably going to be a new default mode or something like
-that.
-
-Links
-=====
-[1] https://patchwork.freedesktop.org/series/86798/
-
-[2] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5599#note_553791
index 8b76217..6270f1f 100644 (file)
@@ -17,6 +17,7 @@ Introduction
    busses/index
    i2c-topology
    muxes/i2c-mux-gpio
+   i2c-sysfs
 
 Writing device drivers
 ======================
index 91b2cf7..e26532f 100644 (file)
@@ -228,6 +228,23 @@ before posting to the mailing list. The patchwork build bot instance
 gets overloaded very easily and netdev@vger really doesn't need more
 traffic if we can help it.
 
+netdevsim is great, can I extend it for my out-of-tree tests?
+-------------------------------------------------------------
+
+No, `netdevsim` is a test vehicle solely for upstream tests.
+(Please add your tests under tools/testing/selftests/.)
+
+We also give no guarantees that `netdevsim` won't change in the future
+in a way which would break what would normally be considered uAPI.
+
+Is netdevsim considered a "user" of an API?
+-------------------------------------------
+
+Linux kernel has a long standing rule that no API should be added unless
+it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are
+strongly encouraged when adding new APIs, but `netdevsim` in itself
+is **not** considered a use case/user.
+
 Any other tips to help ensure my net/net-next patch gets OK'd?
 --------------------------------------------------------------
 Attention to detail.  Re-read your own work as if you were the
index d31ed6c..024d784 100644 (file)
@@ -191,19 +191,9 @@ nf_flowtable_tcp_timeout - INTEGER (seconds)
         TCP connections may be offloaded from nf conntrack to nf flow table.
         Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
 
-nf_flowtable_tcp_pickup - INTEGER (seconds)
-        default 120
-
-        TCP connection timeout after being aged from nf flow table offload.
-
 nf_flowtable_udp_timeout - INTEGER (seconds)
         default 30
 
         Control offload timeout for udp connections.
         UDP connections may be offloaded from nf conntrack to nf flow table.
         Once aged, the connection is returned to nf conntrack with udp pickup timeout.
-
-nf_flowtable_udp_pickup - INTEGER (seconds)
-        default 30
-
-        UDP connection timeout after being aged from nf flow table offload.
index 9c918f7..1ee2141 100644 (file)
@@ -73,7 +73,9 @@ IF_OPER_LOWERLAYERDOWN (3):
  state (f.e. VLAN).
 
 IF_OPER_TESTING (4):
- Unused in current kernel.
+ Interface is in testing mode, for example executing driver self-tests
+ or media (cable) test. It can't be used for normal traffic until tests
+ complete.
 
 IF_OPER_DORMANT (5):
  Interface is L1 up, but waiting for an external event, f.e. for a
@@ -111,7 +113,7 @@ it as lower layer.
 
 Note that for certain kind of soft-devices, which are not managing any
 real hardware, it is possible to set this bit from userspace.  One
-should use TVL IFLA_CARRIER to do so.
+should use TLV IFLA_CARRIER to do so.
 
 netif_carrier_ok() can be used to query that bit.
 
index cfc81e9..4e5b26f 100644 (file)
@@ -2762,7 +2762,7 @@ listed in:
   put_prev_task_idle
   kmem_cache_create
   pick_next_task_rt
-  get_online_cpus
+  cpus_read_lock
   pick_next_task_fair
   mutex_lock
   [...]
index 1409e40..b7070d7 100644 (file)
@@ -160,7 +160,6 @@ Code  Seq#    Include File                                           Comments
 'K'   all    linux/kd.h
 'L'   00-1F  linux/loop.h                                            conflict!
 'L'   10-1F  drivers/scsi/mpt3sas/mpt3sas_ctl.h                      conflict!
-'L'   20-2F  linux/lightnvm.h
 'L'   E0-FF  linux/ppdd.h                                            encrypted disk device driver
                                                                      <http://linux01.gwdg.de/~alatham/ppdd.html>
 'M'   all    linux/soundcard.h                                       conflict!
index d612198..539e9d4 100644 (file)
@@ -263,7 +263,7 @@ Userspace can also add file descriptors to the notifying process via
 ``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
 ``struct seccomp_notif_addfd`` should be the same ``id`` as in
 ``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
-like O_EXEC on the file descriptor in the notifying process. If the supervisor
+like O_CLOEXEC on the file descriptor in the notifying process. If the supervisor
 wants to inject the file descriptor with a specific number, the
 ``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
 the specific number to use. If that file descriptor is already open in the
index 7ddd8f6..5e8ed9e 100644 (file)
@@ -106,3 +106,11 @@ Speculation misfeature controls
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
+
+- PR_SPEC_L1D_FLUSH: Flush L1D Cache on context switch out of the task
+                        (works only when tasks run on non SMT cores)
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_L1D_FLUSH, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_L1D_FLUSH, PR_SPEC_ENABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_L1D_FLUSH, PR_SPEC_DISABLE, 0, 0);
index 35eca37..88fa495 100644 (file)
@@ -25,10 +25,10 @@ On x86:
 
 - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
 
-- kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock is
-  taken inside kvm->arch.mmu_lock, and cannot be taken without already
-  holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
-  there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
+- kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock and
+  kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
+  cannot be taken without already holding kvm->arch.mmu_lock (typically with
+  ``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
 
 Everything else is a leaf: no other lock is taken inside the critical
 sections.
index 5f62b3b..ccb7e86 100644 (file)
@@ -126,7 +126,7 @@ Idle loop
 Rebooting
 =========
 
-   reboot=b[ios] | t[riple] | k[bd] | a[cpi] | e[fi] [, [w]arm | [c]old]
+   reboot=b[ios] | t[riple] | k[bd] | a[cpi] | e[fi] | p[ci] [, [w]arm | [c]old]
       bios
         Use the CPU reboot vector for warm reset
       warm
@@ -145,6 +145,8 @@ Rebooting
         Use efi reset_system runtime service. If EFI is not configured or
         the EFI reset does not work, the reboot path attempts the reset using
         the keyboard controller.
+      pci
+        Use a write to the PCI config space register 0xcf9 to trigger reboot.
 
    Using warm reset will be much faster especially on big memory
    systems because the BIOS will not go through the memory check.
@@ -155,6 +157,13 @@ Rebooting
      Don't stop other CPUs on reboot. This can make reboot more reliable
      in some cases.
 
+   reboot=default
+     There are some built-in platform specific "quirks" - you may see:
+     "reboot: <name> series board detected. Selecting <type> for reboots."
+     In the case where you think the quirk is in error (e.g. you have
+     newer BIOS, or newer board) using this option will ignore the built-in
+     quirk table, and use the generic default reboot actions.
+
 Non Executable Mappings
 =======================
 
index 4ce7512..142e275 100644 (file)
@@ -3873,6 +3873,16 @@ L:       bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mtd/nand/raw/brcmnand/
 
+BROADCOM STB PCIE DRIVER
+M:     Jim Quinlan <jim2101024@gmail.com>
+M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
+M:     bcm-kernel-feedback-list@broadcom.com
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+F:     drivers/pci/controller/pcie-brcmstb.c
+
 BROADCOM SYSTEMPORT ETHERNET DRIVER
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     bcm-kernel-feedback-list@broadcom.com
@@ -4505,7 +4515,7 @@ L:        clang-built-linux@googlegroups.com
 S:     Supported
 W:     https://clangbuiltlinux.github.io/
 B:     https://github.com/ClangBuiltLinux/linux/issues
-C:     irc://chat.freenode.net/clangbuiltlinux
+C:     irc://irc.libera.chat/clangbuiltlinux
 F:     Documentation/kbuild/llvm.rst
 F:     include/linux/compiler-clang.h
 F:     scripts/clang-tools/
@@ -4617,7 +4627,7 @@ F:        include/linux/clk/
 F:     include/linux/of_clk.h
 X:     drivers/clk/clkdev.c
 
-COMMON INTERNET FILE SYSTEM (CIFS)
+COMMON INTERNET FILE SYSTEM CLIENT (CIFS)
 M:     Steve French <sfrench@samba.org>
 L:     linux-cifs@vger.kernel.org
 L:     samba-technical@lists.samba.org (moderated for non-subscribers)
@@ -4626,6 +4636,7 @@ W:        http://linux-cifs.samba.org/
 T:     git git://git.samba.org/sfrench/cifs-2.6.git
 F:     Documentation/admin-guide/cifs/
 F:     fs/cifs/
+F:     fs/cifs_common/
 
 COMPACTPCI HOTPLUG CORE
 M:     Scott Murray <scott@spiteful.org>
@@ -6952,7 +6963,7 @@ F:        include/uapi/linux/mdio.h
 F:     include/uapi/linux/mii.h
 
 EXFAT FILE SYSTEM
-M:     Namjae Jeon <namjae.jeon@samsung.com>
+M:     Namjae Jeon <linkinjeon@kernel.org>
 M:     Sungjong Seo <sj1557.seo@samsung.com>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
@@ -10110,6 +10121,17 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
 F:     Documentation/dev-tools/kselftest*
 F:     tools/testing/selftests/
 
+KERNEL SMB3 SERVER (KSMBD)
+M:     Namjae Jeon <linkinjeon@kernel.org>
+M:     Sergey Senozhatsky <senozhatsky@chromium.org>
+M:     Steve French <sfrench@samba.org>
+M:     Hyunchul Lee <hyc.lee@gmail.com>
+L:     linux-cifs@vger.kernel.org
+S:     Maintained
+T:     git git://git.samba.org/ksmbd.git
+F:     fs/cifs_common/
+F:     fs/ksmbd/
+
 KERNEL UNIT TESTING FRAMEWORK (KUnit)
 M:     Brendan Higgins <brendanhiggins@google.com>
 L:     linux-kselftest@vger.kernel.org
@@ -10616,15 +10638,6 @@ F:     LICENSES/
 F:     scripts/spdxcheck-test.sh
 F:     scripts/spdxcheck.py
 
-LIGHTNVM PLATFORM SUPPORT
-M:     Matias Bjorling <mb@lightnvm.io>
-L:     linux-block@vger.kernel.org
-S:     Maintained
-W:     http://github/OpenChannelSSD
-F:     drivers/lightnvm/
-F:     include/linux/lightnvm.h
-F:     include/uapi/linux/lightnvm.h
-
 LINEAR RANGES HELPERS
 M:     Mark Brown <broonie@kernel.org>
 R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
@@ -11334,7 +11347,7 @@ W:      https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-maxiradio*
 
-MCAB MICROCHIP CAN BUS ANALYZER TOOL DRIVER
+MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
 R:     Yasushi SHOJI <yashi@spacecubics.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
@@ -14437,6 +14450,13 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
 F:     drivers/pci/controller/dwc/pcie-histb.c
 
+PCIE DRIVER FOR INTEL LGM GW SOC
+M:     Rahul Tanwar <rtanwar@maxlinear.com>
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
+F:     drivers/pci/controller/dwc/pcie-intel-gw.c
+
 PCIE DRIVER FOR MEDIATEK
 M:     Ryder Lee <ryder.lee@mediatek.com>
 M:     Jianjun Wang <jianjun.wang@mediatek.com>
@@ -15810,7 +15830,7 @@ F:      Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
 F:     drivers/i2c/busses/i2c-emev2.c
 
 RENESAS ETHERNET DRIVERS
-R:     Sergei Shtylyov <sergei.shtylyov@gmail.com>
+R:     Sergey Shtylyov <s.shtylyov@omp.ru>
 L:     netdev@vger.kernel.org
 L:     linux-renesas-soc@vger.kernel.org
 F:     Documentation/devicetree/bindings/net/renesas,*.yaml
@@ -17822,7 +17842,7 @@ F:      include/linux/sync_file.h
 F:     include/uapi/linux/sync_file.h
 
 SYNOPSYS ARC ARCHITECTURE
-M:     Vineet Gupta <vgupta@synopsys.com>
+M:     Vineet Gupta <vgupta@kernel.org>
 L:     linux-snps-arc@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
@@ -19723,6 +19743,15 @@ S:     Maintained
 F:     include/uapi/linux/virtio_snd.h
 F:     sound/virtio/*
 
+VIRTIO I2C DRIVER
+M:     Jie Deng <jie.deng@intel.com>
+M:     Viresh Kumar <viresh.kumar@linaro.org>
+L:     linux-i2c@vger.kernel.org
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/i2c/busses/i2c-virtio.c
+F:     include/uapi/linux/virtio_i2c.h
+
 VIRTUAL BOX GUEST DEVICE DRIVER
 M:     Hans de Goede <hdegoede@redhat.com>
 M:     Arnd Bergmann <arnd@arndb.de>
@@ -20024,7 +20053,8 @@ F:      Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
 F:     Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
 F:     Documentation/devicetree/bindings/mfd/wm831x.txt
 F:     Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
-F:     Documentation/devicetree/bindings/sound/wlf,arizona.yaml
+F:     Documentation/devicetree/bindings/sound/wlf,*.yaml
+F:     Documentation/devicetree/bindings/sound/wm*
 F:     Documentation/hwmon/wm83??.rst
 F:     arch/arm/mach-s3c/mach-crag6410*
 F:     drivers/clk/clk-wm83*.c
index 27a072c..61741e9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
@@ -546,7 +546,6 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
 PHONY += scripts_basic
 scripts_basic:
        $(Q)$(MAKE) $(build)=scripts/basic
-       $(Q)rm -f .tmp_quiet_recordmcount
 
 PHONY += outputmakefile
 ifdef building_out_of_srctree
@@ -1317,6 +1316,16 @@ PHONY += scripts_unifdef
 scripts_unifdef: scripts_basic
        $(Q)$(MAKE) $(build)=scripts scripts/unifdef
 
+# ---------------------------------------------------------------------------
+# Install
+
+# Many distributions have the custom install script, /sbin/installkernel.
+# If DKMS is installed, 'make install' will eventually recuses back
+# to the this Makefile to build and install external modules.
+# Cancel sub_make_done so that options such as M=, V=, etc. are parsed.
+
+install: sub_make_done :=
+
 # ---------------------------------------------------------------------------
 # Tools
 
index 129df49..98db634 100644 (file)
@@ -1282,6 +1282,9 @@ config ARCH_SPLIT_ARG64
 config ARCH_HAS_ELFCORE_COMPAT
        bool
 
+config ARCH_HAS_PARANOID_L1D_FLUSH
+       bool
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index d8f51eb..b5bf68e 100644 (file)
@@ -409,7 +409,7 @@ choice
        help
          Depending on the configuration, CPU can contain DSP registers
          (ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL).
-         Bellow is options describing how to handle these registers in
+         Below are options describing how to handle these registers in
          interrupt entry / exit and in context switch.
 
 config ARC_DSP_NONE
index 69debd7..0b48580 100644 (file)
@@ -24,7 +24,7 @@
  */
 static inline __sum16 csum_fold(__wsum s)
 {
-       unsigned r = s << 16 | s >> 16; /* ror */
+       unsigned int r = s << 16 | s >> 16;     /* ror */
        s = ~s;
        s -= r;
        return s >> 16;
index 30b9ae5..e1971d3 100644 (file)
@@ -123,7 +123,7 @@ static const char * const arc_pmu_ev_hw_map[] = {
 #define C(_x)                  PERF_COUNT_HW_CACHE_##_x
 #define CACHE_OP_UNSUPPORTED   0xffff
 
-static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        [C(L1D)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = PERF_COUNT_ARC_LDC,
index c67c0f0..ec64021 100644 (file)
@@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 
 void fpu_init_task(struct pt_regs *regs)
 {
+       const unsigned int fwe = 0x80000000;
+
        /* default rounding mode */
        write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
 
-       /* set "Write enable" to allow explicit write to exception flags */
-       write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
+       /* Initialize to zero: setting requires FWE be set */
+       write_aux_reg(ARC_REG_FPU_STATUS, fwe);
 }
 
 void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 {
        struct arc_fpu *save = &prev->thread.fpu;
        struct arc_fpu *restore = &next->thread.fpu;
+       const unsigned int fwe = 0x80000000;
 
        save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
        save->status = read_aux_reg(ARC_REG_FPU_STATUS);
 
        write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
-       write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
+       write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
 }
 
 #endif
index abf9398..f9fdb55 100644 (file)
@@ -352,7 +352,7 @@ static void idu_cascade_isr(struct irq_desc *desc)
        irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
 
        chained_irq_enter(core_chip, desc);
-       generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+       generic_handle_domain_irq(idu_domain, idu_hwirq);
        chained_irq_exit(core_chip, desc);
 }
 
index 47bab67..9e28058 100644 (file)
@@ -260,7 +260,7 @@ static void init_unwind_hdr(struct unwind_table *table,
 {
        const u8 *ptr;
        unsigned long tableSize = table->size, hdrSize;
-       unsigned n;
+       unsigned int n;
        const u32 *fde;
        struct {
                u8 version;
@@ -462,7 +462,7 @@ static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        uleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -483,7 +483,7 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        sleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -609,7 +609,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
 static signed fde_pointer_type(const u32 *cie)
 {
        const u8 *ptr = (const u8 *)(cie + 2);
-       unsigned version = *ptr;
+       unsigned int version = *ptr;
 
        if (*++ptr) {
                const char *aug;
@@ -904,7 +904,7 @@ int arc_unwind(struct unwind_frame_info *frame)
        const u8 *ptr = NULL, *end = NULL;
        unsigned long pc = UNW_PC(frame) - frame->call_frame;
        unsigned long startLoc = 0, endLoc = 0, cfa;
-       unsigned i;
+       unsigned int i;
        signed ptrType = -1;
        uleb128_t retAddrReg = 0;
        const struct unwind_table *table;
index e2146a8..529ae50 100644 (file)
@@ -88,6 +88,8 @@ SECTIONS
                CPUIDLE_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
        }
index 40ef397..ba58e6b 100644 (file)
                                compatible = "ti,am4372-d_can", "ti,am3352-d_can";
                                reg = <0x0 0x2000>;
                                clocks = <&dcan1_fck>;
-                               clock-name = "fck";
+                               clock-names = "fck";
                                syscon-raminit = <&scm_conf 0x644 1>;
                                interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
index aae0af1..2aa75ab 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c0_pins>;
-       clock-frequency = <400000>;
+       clock-frequency = <100000>;
 
        tps65218: tps65218@24 {
                reg = <0x24>;
index f98691a..d3082b9 100644 (file)
 
                pinctrl_power_button: powerbutgrp {
                        fsl,pins = <
-                               MX53_PAD_SD2_DATA2__GPIO1_13            0x1e4
+                               MX53_PAD_SD2_DATA0__GPIO1_15            0x1e4
                        >;
                };
 
                pinctrl_power_out: poweroutgrp {
                        fsl,pins = <
-                               MX53_PAD_SD2_DATA0__GPIO1_15            0x1e4
+                               MX53_PAD_SD2_DATA2__GPIO1_13            0x1e4
                        >;
                };
 
index 0ad8ccd..f86efd0 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
        phy-mode = "rgmii-id";
-       phy-reset-duration = <2>;
+
+       /*
+        * The PHY seems to require a long-enough reset duration to avoid
+        * some rare issues where the PHY gets stuck in an inconsistent and
+        * non-functional state at boot-up. 10ms proved to be fine .
+        */
+       phy-reset-duration = <10>;
        phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
        status = "okay";
 
index a054543..9f1e382 100644 (file)
@@ -43,6 +43,7 @@
        assigned-clock-rates = <0>, <198000000>;
        cap-power-off-card;
        keep-power-in-suspend;
+       max-frequency = <25000000>;
        mmc-pwrseq = <&wifi_pwrseq>;
        no-1-8-v;
        non-removable;
index 45435bb..373984c 100644 (file)
                regulator-max-microvolt = <5000000>;
        };
 
-       vdds_1v8_main: fixedregulator-vdds_1v8_main {
-               compatible = "regulator-fixed";
-               regulator-name = "vdds_1v8_main";
-               vin-supply = <&smps7_reg>;
-               regulator-min-microvolt = <1800000>;
-               regulator-max-microvolt = <1800000>;
-       };
-
        vmmcsd_fixed: fixedregulator-mmcsd {
                compatible = "regulator-fixed";
                regulator-name = "vmmcsd_fixed";
                                        regulator-boot-on;
                                };
 
+                               vdds_1v8_main:
                                smps7_reg: smps7 {
                                        /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
                                        regulator-name = "smps7";
index c9b9064..1815361 100644 (file)
                        status = "disabled";
                };
 
-               vica: intc@10140000 {
+               vica: interrupt-controller@10140000 {
                        compatible = "arm,versatile-vic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        reg = <0x10140000 0x20>;
                };
 
-               vicb: intc@10140020 {
+               vicb: interrupt-controller@10140020 {
                        compatible = "arm,versatile-vic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
index c5ea08f..6cf1c8b 100644 (file)
@@ -37,7 +37,7 @@
                poll-interval = <20>;
 
                /*
-                * The EXTi IRQ line 3 is shared with touchscreen and ethernet,
+                * The EXTi IRQ line 3 is shared with ethernet,
                 * so mark this as polled GPIO key.
                 */
                button-0 {
                        gpios = <&gpiof 3 GPIO_ACTIVE_LOW>;
                };
 
+               /*
+                * The EXTi IRQ line 6 is shared with touchscreen,
+                * so mark this as polled GPIO key.
+                */
+               button-1 {
+                       label = "TA2-GPIO-B";
+                       linux,code = <KEY_B>;
+                       gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
+               };
+
                /*
                 * The EXTi IRQ line 0 is shared with PMIC,
                 * so mark this as polled GPIO key.
        gpio-keys {
                compatible = "gpio-keys";
 
-               button-1 {
-                       label = "TA2-GPIO-B";
-                       linux,code = <KEY_B>;
-                       gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
-                       wakeup-source;
-               };
-
                button-3 {
                        label = "TA4-GPIO-D";
                        linux,code = <KEY_D>;
@@ -82,6 +85,7 @@
                        label = "green:led5";
                        gpios = <&gpioc 6 GPIO_ACTIVE_HIGH>;
                        default-state = "off";
+                       status = "disabled";
                };
 
                led-1 {
        touchscreen@38 {
                compatible = "edt,edt-ft5406";
                reg = <0x38>;
-               interrupt-parent = <&gpiog>;
-               interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
+               interrupt-parent = <&gpioc>;
+               interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
        };
 };
 
index 2af0a67..8c41f81 100644 (file)
@@ -12,6 +12,8 @@
        aliases {
                ethernet0 = &ethernet0;
                ethernet1 = &ksz8851;
+               rtc0 = &hwrtc;
+               rtc1 = &rtc;
        };
 
        memory@c0000000 {
                        reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
                        reset-assert-us = <500>;
                        reset-deassert-us = <500>;
+                       smsc,disable-energy-detect;
                        interrupt-parent = <&gpioi>;
                        interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
                };
        /delete-property/dmas;
        /delete-property/dma-names;
 
-       rtc@32 {
+       hwrtc: rtc@32 {
                compatible = "microcrystal,rv8803";
                reg = <0x32>;
        };
index ff5e0d0..d17083c 100644 (file)
@@ -196,14 +196,6 @@ static int sa1111_map_irq(struct sa1111 *sachip, irq_hw_number_t hwirq)
        return irq_create_mapping(sachip->irqdomain, hwirq);
 }
 
-static void sa1111_handle_irqdomain(struct irq_domain *irqdomain, int irq)
-{
-       struct irq_desc *d = irq_to_desc(irq_linear_revmap(irqdomain, irq));
-
-       if (d)
-               generic_handle_irq_desc(d);
-}
-
 /*
  * SA1111 interrupt support.  Since clearing an IRQ while there are
  * active IRQs causes the interrupt output to pulse, the upper levels
@@ -234,11 +226,11 @@ static void sa1111_irq_handler(struct irq_desc *desc)
 
        for (i = 0; stat0; i++, stat0 >>= 1)
                if (stat0 & 1)
-                       sa1111_handle_irqdomain(irqdomain, i);
+                       generic_handle_domain_irq(irqdomain, i);
 
        for (i = 32; stat1; i++, stat1 >>= 1)
                if (stat1 & 1)
-                       sa1111_handle_irqdomain(irqdomain, i);
+                       generic_handle_domain_irq(irqdomain, i);
 
        /* For level-based interrupts */
        desc->irq_data.chip->irq_unmask(&desc->irq_data);
index 3f35761..23595fc 100644 (file)
@@ -15,8 +15,6 @@ CONFIG_SLAB=y
 CONFIG_ARCH_NOMADIK=y
 CONFIG_MACH_NOMADIK_8815NHK=y
 CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_GENERIC=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
 CONFIG_MTD_RAW_NAND=y
 CONFIG_MTD_NAND_FSMC=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
@@ -97,6 +95,7 @@ CONFIG_REGULATOR=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_TPO_TPG110=y
 CONFIG_DRM_PL111=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_SHA1=y
 CONFIG_CRYPTO_DES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
index 31eb75b..9bdafd5 100644 (file)
@@ -112,7 +112,7 @@ static struct kpp_alg curve25519_alg = {
        .max_size               = curve25519_max_size,
 };
 
-static int __init mod_init(void)
+static int __init arm_curve25519_init(void)
 {
        if (elf_hwcap & HWCAP_NEON) {
                static_branch_enable(&have_neon);
@@ -122,14 +122,14 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit arm_curve25519_exit(void)
 {
        if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON)
                crypto_unregister_kpp(&curve25519_alg);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(arm_curve25519_init);
+module_exit(arm_curve25519_exit);
 
 MODULE_ALIAS_CRYPTO("curve25519");
 MODULE_ALIAS_CRYPTO("curve25519-neon");
index cfc9dfd..f673e13 100644 (file)
@@ -160,10 +160,11 @@ extern unsigned long vectors_base;
 
 /*
  * Physical start and end address of the kernel sections. These addresses are
- * 2MB-aligned to match the section mappings placed over the kernel.
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
  */
-extern u32 kernel_sec_start;
-extern u32 kernel_sec_end;
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
 
 /*
  * Physical vs virtual RAM address space conversion.  These are
index 9eb0b4d..29070eb 100644 (file)
@@ -49,7 +49,8 @@
 
        /*
         * This needs to be assigned at runtime when the linker symbols are
-        * resolved.
+        * resolved. These are unsigned 64bit really, but in this assembly code
+        * We store them as 32bit.
         */
        .pushsection .data
        .align  2
@@ -57,7 +58,9 @@
        .globl  kernel_sec_end
 kernel_sec_start:
        .long   0
+       .long   0
 kernel_sec_end:
+       .long   0
        .long   0
        .popsection
 
@@ -250,7 +253,11 @@ __create_page_tables:
        add     r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
        ldr     r6, =(_end - 1)
        adr_l   r5, kernel_sec_start            @ _pa(kernel_sec_start)
-       str     r8, [r5]                        @ Save physical start of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r8, [r5, #4]                    @ Save physical start of kernel (BE)
+#else
+       str     r8, [r5]                        @ Save physical start of kernel (LE)
+#endif
        orr     r3, r8, r7                      @ Add the MMU flags
        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 1:     str     r3, [r0], #1 << PMD_ORDER
@@ -259,7 +266,11 @@ __create_page_tables:
        bls     1b
        eor     r3, r3, r7                      @ Remove the MMU flags
        adr_l   r5, kernel_sec_end              @ _pa(kernel_sec_end)
-       str     r3, [r5]                        @ Save physical end of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r3, [r5, #4]                    @ Save physical end of kernel (BE)
+#else
+       str     r3, [r5]                        @ Save physical end of kernel (LE)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        /*
index f0a073a..13f3068 100644 (file)
@@ -68,7 +68,6 @@ void imx_set_cpu_arg(int cpu, u32 arg);
 void v7_secondary_startup(void);
 void imx_scu_map_io(void);
 void imx_smp_prepare(void);
-void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
 #else
 static inline void imx_scu_map_io(void) {}
 static inline void imx_smp_prepare(void) {}
@@ -81,6 +80,7 @@ void imx_gpc_mask_all(void);
 void imx_gpc_restore_all(void);
 void imx_gpc_hwirq_mask(unsigned int hwirq);
 void imx_gpc_hwirq_unmask(unsigned int hwirq);
+void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
 void imx_anatop_init(void);
 void imx_anatop_pre_suspend(void);
 void imx_anatop_post_resume(void);
index 0dfd0ae..af12668 100644 (file)
@@ -103,6 +103,7 @@ struct mmdc_pmu {
        struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
        struct hlist_node node;
        struct fsl_mmdc_devtype_data *devtype_data;
+       struct clk *mmdc_ipg_clk;
 };
 
 /*
@@ -462,11 +463,14 @@ static int imx_mmdc_remove(struct platform_device *pdev)
 
        cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
        perf_pmu_unregister(&pmu_mmdc->pmu);
+       iounmap(pmu_mmdc->mmdc_base);
+       clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
        kfree(pmu_mmdc);
        return 0;
 }
 
-static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
+static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
+                             struct clk *mmdc_ipg_clk)
 {
        struct mmdc_pmu *pmu_mmdc;
        char *name;
@@ -494,6 +498,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
        }
 
        mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+       pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
        if (mmdc_num == 0)
                name = "mmdc";
        else
@@ -529,7 +534,7 @@ pmu_free:
 
 #else
 #define imx_mmdc_remove NULL
-#define imx_mmdc_perf_init(pdev, mmdc_base) 0
+#define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
 #endif
 
 static int imx_mmdc_probe(struct platform_device *pdev)
@@ -567,7 +572,13 @@ static int imx_mmdc_probe(struct platform_device *pdev)
        val &= ~(1 << BP_MMDC_MAPSR_PSD);
        writel_relaxed(val, reg);
 
-       return imx_mmdc_perf_init(pdev, mmdc_base);
+       err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
+       if (err) {
+               iounmap(mmdc_base);
+               clk_disable_unprepare(mmdc_ipg_clk);
+       }
+
+       return err;
 }
 
 int imx_mmdc_get_ddr_type(void)
index bf14d65..34a1c77 100644 (file)
@@ -91,6 +91,7 @@ config MACH_IXDP465
 
 config MACH_GORAMO_MLR
        bool "GORAMO Multi Link Router"
+       depends on IXP4XX_PCI_LEGACY
        help
          Say 'Y' here if you want your kernel to support GORAMO
          MultiLink router.
index abb07f1..74e63d4 100644 (file)
 /*
  * PCI Control/Status Registers
  */
-#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
-
-#define PCI_NP_AD               IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
-#define PCI_NP_CBE              IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
-#define PCI_NP_WDATA            IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
-#define PCI_NP_RDATA            IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
-#define PCI_CRP_AD_CBE          IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
-#define PCI_CRP_WDATA           IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
-#define PCI_CRP_RDATA           IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
-#define PCI_CSR                 IXP4XX_PCI_CSR(PCI_CSR_OFFSET) 
-#define PCI_ISR                 IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
-#define PCI_INTEN               IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
-#define PCI_DMACTRL             IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
-#define PCI_AHBMEMBASE          IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
-#define PCI_AHBIOBASE           IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
-#define PCI_PCIMEMBASE          IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
-#define PCI_AHBDOORBELL         IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
-#define PCI_PCIDOORBELL         IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
-#define PCI_ATPDMA0_AHBADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
-#define PCI_ATPDMA0_PCIADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
-#define PCI_ATPDMA0_LENADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
-#define PCI_ATPDMA1_AHBADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
-#define PCI_ATPDMA1_PCIADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
-#define PCI_ATPDMA1_LENADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
+#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
+
+#define PCI_NP_AD               _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
+#define PCI_NP_CBE              _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
+#define PCI_NP_WDATA            _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
+#define PCI_NP_RDATA            _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
+#define PCI_CRP_AD_CBE          _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
+#define PCI_CRP_WDATA           _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
+#define PCI_CRP_RDATA           _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
+#define PCI_CSR                 _IXP4XX_PCI_CSR(PCI_CSR_OFFSET) 
+#define PCI_ISR                 _IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
+#define PCI_INTEN               _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
+#define PCI_DMACTRL             _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
+#define PCI_AHBMEMBASE          _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
+#define PCI_AHBIOBASE           _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
+#define PCI_PCIMEMBASE          _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
+#define PCI_AHBDOORBELL         _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
+#define PCI_PCIDOORBELL         _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
+#define PCI_ATPDMA0_AHBADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
+#define PCI_ATPDMA0_PCIADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
+#define PCI_ATPDMA0_LENADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
+#define PCI_ATPDMA1_AHBADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
+#define PCI_ATPDMA1_PCIADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
+#define PCI_ATPDMA1_LENADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
 
 /*
  * PCI register values and bit definitions 
index 65934b2..12b26e0 100644 (file)
@@ -3776,6 +3776,7 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
        struct omap_hwmod_ocp_if *oi;
        struct clockdomain *clkdm;
        struct clk_hw_omap *clk;
+       struct clk_hw *hw;
 
        if (!oh)
                return NULL;
@@ -3792,7 +3793,14 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
                c = oi->_clk;
        }
 
-       clk = to_clk_hw_omap(__clk_get_hw(c));
+       hw = __clk_get_hw(c);
+       if (!hw)
+               return NULL;
+
+       clk = to_clk_hw_omap(hw);
+       if (!clk)
+               return NULL;
+
        clkdm = clk->clkdm;
        if (!clkdm)
                return NULL;
index bddfc7c..eda5a47 100644 (file)
@@ -39,10 +39,8 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d)
 
        do {
                pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
-               for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) {
-                       generic_handle_irq(irq_find_mapping(fpga->irqdomain,
-                                                           bit));
-               }
+               for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
+                       generic_handle_domain_irq(fpga->irqdomain, bit);
        } while (pending);
 
        return IRQ_HANDLED;
index 0c631c1..3edc5f6 100644 (file)
@@ -298,7 +298,7 @@ static void s3c_irq_demux(struct irq_desc *desc)
        struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
        struct s3c_irq_intc *intc = irq_data->intc;
        struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
-       unsigned int n, offset, irq;
+       unsigned int n, offset;
        unsigned long src, msk;
 
        /* we're using individual domains for the non-dt case
@@ -318,8 +318,7 @@ static void s3c_irq_demux(struct irq_desc *desc)
        while (src) {
                n = __ffs(src);
                src &= ~(1 << n);
-               irq = irq_find_mapping(sub_intc->domain, offset + n);
-               generic_handle_irq(irq);
+               generic_handle_domain_irq(sub_intc->domain, offset + n);
        }
 
        chained_irq_exit(chip, desc);
index 7583bda..a4e0060 100644 (file)
@@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
        if (offset == 0)
                return;
 
+       /*
+        * Offset the kernel section physical offsets so that the kernel
+        * mapping will work out later on.
+        */
+       kernel_sec_start += offset;
+       kernel_sec_end += offset;
+
        /*
         * Get the address of the remap function in the 1:1 identity
         * mapping setup by the early page table assembly code.  We
@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
 {
        void *zero_page;
 
-       pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
+       pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
                 kernel_sec_start, kernel_sec_end);
 
        prepare_page_table();
index 5c5e195..f8e11f7 100644 (file)
@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
        ldr     r6, =(_end - 1)
        add     r7, r2, #0x1000
        add     r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
-       add     r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
+       add     r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
 1:     ldrd    r4, r5, [r7]
        adds    r4, r4, r0
        adc     r5, r5, r1
index b5b13a9..62c3c1d 100644 (file)
@@ -156,6 +156,7 @@ config ARM64
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+       select HAVE_ARCH_PFN_VALID
        select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_SECCOMP_FILTER
@@ -1800,11 +1801,11 @@ config RANDOMIZE_BASE
          If unsure, say N.
 
 config RANDOMIZE_MODULE_REGION_FULL
-       bool "Randomize the module region over a 4 GB range"
+       bool "Randomize the module region over a 2 GB range"
        depends on RANDOMIZE_BASE
        default y
        help
-         Randomizes the location of the module region inside a 4 GB window
+         Randomizes the location of the module region inside a 2 GB window
          covering the core kernel. This way, it is less likely for modules
          to leak information about the location of core kernel data structures
          but it does imply that function calls between modules and the core
@@ -1812,7 +1813,10 @@ config RANDOMIZE_MODULE_REGION_FULL
 
          When this option is not set, the module region will be randomized over
          a limited range that contains the [_stext, _etext] interval of the
-         core kernel, so branch relocations are always in range.
+         core kernel, so branch relocations are almost always in range unless
+         ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
+         particular case of region exhaustion, modules might be able to fall
+         back to a larger 2GB area.
 
 config CC_HAVE_STACKPROTECTOR_SYSREG
        def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
index 7bc37d0..1110d38 100644 (file)
@@ -21,19 +21,11 @@ LDFLAGS_vmlinux             += -shared -Bsymbolic -z notext \
 endif
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
-  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
-$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
-  else
+  ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
 LDFLAGS_vmlinux        += --fix-cortex-a53-843419
   endif
 endif
 
-ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
-  ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
-$(warning LSE atomics not supported by binutils)
-  endif
-endif
-
 cc_has_k_constraint := $(call try-run,echo                             \
        'int main(void) {                                               \
                asm volatile("and w0, w0, %w0" :: "K" (4294967295));    \
@@ -176,10 +168,23 @@ vdso_install:
 
 archprepare:
        $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
+  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
+       @echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
+  endif
+endif
+ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
+  ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
+       @echo "warning: LSE atomics not supported by binutils" >&2
+  endif
+endif
+
 
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
+       $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso
+       $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32
 
 ifeq ($(KBUILD_EXTMOD),)
 # We need to generate vdso-offsets.h before compiling certain files in kernel/.
index dd764b7..f6a79c8 100644 (file)
@@ -54,6 +54,7 @@
 
 &mscc_felix_port0 {
        label = "swp0";
+       managed = "in-band-status";
        phy-handle = <&phy0>;
        phy-mode = "sgmii";
        status = "okay";
@@ -61,6 +62,7 @@
 
 &mscc_felix_port1 {
        label = "swp1";
+       managed = "in-band-status";
        phy-handle = <&phy1>;
        phy-mode = "sgmii";
        status = "okay";
index b2e3e5d..343ecf0 100644 (file)
@@ -66,7 +66,7 @@
                };
        };
 
-       sysclk: clock-sysclk {
+       sysclk: sysclk {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <100000000>;
index ce2bcdd..a05b1ab 100644 (file)
@@ -19,6 +19,8 @@
        aliases {
                spi0 = &spi0;
                ethernet1 = &eth1;
+               mmc0 = &sdhci0;
+               mmc1 = &sdhci1;
        };
 
        chosen {
        pinctrl-names = "default";
        pinctrl-0 = <&i2c1_pins>;
        clock-frequency = <100000>;
+       /delete-property/ mrvl,i2c-fast-mode;
        status = "okay";
 
        rtc@6f {
index 076d5ef..5ba7a45 100644 (file)
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE1R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE1W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE1>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE1 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14120000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE2AR &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE2AW &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE2>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE2 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14140000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE3R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE3W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE3>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE3 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14160000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE4>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14180000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE0>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@141a0000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE5>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@14160000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE4>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@14180000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE0>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@141a0000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE5>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        sram@40000000 {
index 23cdcc9..1ccca83 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015, LGE Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
  */
 
 /dts-v1/;
@@ -9,6 +10,9 @@
 #include "pm8994.dtsi"
 #include "pmi8994.dtsi"
 
+/* cont_splash_mem has different memory mapping */
+/delete-node/ &cont_splash_mem;
+
 / {
        model = "LG Nexus 5X";
        compatible = "lg,bullhead", "qcom,msm8992";
@@ -17,6 +21,9 @@
        qcom,board-id = <0xb64 0>;
        qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
 
+       /* Bullhead firmware doesn't support PSCI */
+       /delete-node/ psci;
+
        aliases {
                serial0 = &blsp1_uart2;
        };
                        ftrace-size = <0x10000>;
                        pmsg-size = <0x20000>;
                };
+
+               cont_splash_mem: memory@3400000 {
+                       reg = <0 0x03400000 0 0x1200000>;
+                       no-map;
+               };
        };
 };
 
index ffe1a9b..c096b77 100644 (file)
@@ -1,12 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015, Huawei Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
  */
 
 /dts-v1/;
 
 #include "msm8994.dtsi"
 
+/* Angler's firmware does not report where the memory is allocated */
+/delete-node/ &cont_splash_mem;
+
 / {
        model = "Huawei Nexus 6P";
        compatible = "huawei,angler", "qcom,msm8994";
index a9a052f..e7f0e5c 100644 (file)
                                                <&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
                                                <&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
                                interconnect-names = "qup-core", "qup-config",
                                                        "qup-memory";
+                               power-domains = <&rpmhpd SC7180_CX>;
+                               required-opps = <&rpmhpd_opp_low_svs>;
                                status = "disabled";
                        };
 
index a8c274a..188c576 100644 (file)
                                           &BIG_CPU_SLEEP_1
                                           &CLUSTER_SLEEP_0>;
                        next-level-cache = <&L2_700>;
-                       qcom,freq-domain = <&cpufreq_hw 1>;
+                       qcom,freq-domain = <&cpufreq_hw 2>;
                        #cooling-cells = <2>;
                        L2_700: l2-cache {
                                compatible = "cache";
index 4d052e3..eb6b1d1 100644 (file)
@@ -69,7 +69,7 @@
                };
                rmtfs_upper_guard: memory@f5d01000 {
                        no-map;
-                       reg = <0 0xf5d01000 0 0x2000>;
+                       reg = <0 0xf5d01000 0 0x1000>;
                };
 
                /*
@@ -78,7 +78,7 @@
                 */
                removed_region: memory@88f00000 {
                        no-map;
-                       reg = <0 0x88f00000 0 0x200000>;
+                       reg = <0 0x88f00000 0 0x1c00000>;
                };
 
                ramoops: ramoops@ac300000 {
index c2a709a..d7591a4 100644 (file)
                left_spkr: wsa8810-left{
                        compatible = "sdw10217211000";
                        reg = <0 3>;
-                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrLeft";
                        #sound-dai-cells = <0>;
 
                right_spkr: wsa8810-right{
                        compatible = "sdw10217211000";
-                       powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
                        reg = <0 4>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrRight";
index b8eb045..55f1945 100644 (file)
@@ -51,7 +51,7 @@ config CRYPTO_SM4_ARM64_CE
        tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)"
        depends on KERNEL_MODE_NEON
        select CRYPTO_ALGAPI
-       select CRYPTO_SM4
+       select CRYPTO_LIB_SM4
 
 config CRYPTO_GHASH_ARM64_CE
        tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
index 2754c87..9c93cfc 100644 (file)
@@ -17,12 +17,20 @@ MODULE_LICENSE("GPL v2");
 
 asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in);
 
+static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key,
+                      unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
 static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
        if (!crypto_simd_usable()) {
-               crypto_sm4_encrypt(tfm, out, in);
+               sm4_crypt_block(ctx->rkey_enc, out, in);
        } else {
                kernel_neon_begin();
                sm4_ce_do_crypt(ctx->rkey_enc, out, in);
@@ -32,10 +40,10 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 
 static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
        if (!crypto_simd_usable()) {
-               crypto_sm4_decrypt(tfm, out, in);
+               sm4_crypt_block(ctx->rkey_dec, out, in);
        } else {
                kernel_neon_begin();
                sm4_ce_do_crypt(ctx->rkey_dec, out, in);
@@ -49,12 +57,12 @@ static struct crypto_alg sm4_ce_alg = {
        .cra_priority                   = 200,
        .cra_flags                      = CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize                  = SM4_BLOCK_SIZE,
-       .cra_ctxsize                    = sizeof(struct crypto_sm4_ctx),
+       .cra_ctxsize                    = sizeof(struct sm4_ctx),
        .cra_module                     = THIS_MODULE,
        .cra_u.cipher = {
                .cia_min_keysize        = SM4_KEY_SIZE,
                .cia_max_keysize        = SM4_KEY_SIZE,
-               .cia_setkey             = crypto_sm4_set_key,
+               .cia_setkey             = sm4_ce_setkey,
                .cia_encrypt            = sm4_ce_encrypt,
                .cia_decrypt            = sm4_ce_decrypt
        }
index bd68e1b..7535dc7 100644 (file)
@@ -50,6 +50,9 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
 #define acpi_os_ioremap acpi_os_ioremap
 
+void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
+#define acpi_os_memmap acpi_os_memmap
+
 typedef u64 phys_cpuid_t;
 #define PHYS_CPUID_INVALID INVALID_HWID
 
index 21fa330..b83fb24 100644 (file)
@@ -33,8 +33,7 @@
  * EL2.
  */
 .macro __init_el2_timers
-       mrs     x0, cnthctl_el2
-       orr     x0, x0, #3                      // Enable EL1 physical timers
+       mov     x0, #3                          // Enable EL1 physical timers
        msr     cnthctl_el2, x0
        msr     cntvoff_el2, xzr                // Clear virtual offset
 .endm
index 993a27e..f98c91b 100644 (file)
@@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to);
 
 typedef struct page *pgtable_t;
 
+int pfn_valid(unsigned long pfn);
 int pfn_is_map_memory(unsigned long pfn);
 
 #include <asm/memory.h>
index e58bca8..41b332c 100644 (file)
@@ -320,7 +320,17 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
-       return regs->regs[0];
+       unsigned long val = regs->regs[0];
+
+       /*
+        * Audit currently uses regs_return_value() instead of
+        * syscall_get_return_value(). Apply the same sign-extension here until
+        * audit is updated to use syscall_get_return_value().
+        */
+       if (compat_user_mode(regs))
+               val = sign_extend64(val, 31);
+
+       return val;
 }
 
 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
index 1801399..8aebc00 100644 (file)
@@ -35,7 +35,7 @@ struct stack_info {
  * accounting information necessary for robust unwinding.
  *
  * @fp:          The fp value in the frame record (or the real fp)
- * @pc:          The fp value in the frame record (or the real lr)
+ * @pc:          The lr value in the frame record (or the real lr)
  *
  * @stacks_done: Stacks which have been entirely unwound, for which it is no
  *               longer valid to unwind to.
index cfc0672..03e2089 100644 (file)
@@ -29,22 +29,23 @@ static inline void syscall_rollback(struct task_struct *task,
        regs->regs[0] = regs->orig_x0;
 }
 
-
-static inline long syscall_get_error(struct task_struct *task,
-                                    struct pt_regs *regs)
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
 {
-       unsigned long error = regs->regs[0];
+       unsigned long val = regs->regs[0];
 
        if (is_compat_thread(task_thread_info(task)))
-               error = sign_extend64(error, 31);
+               val = sign_extend64(val, 31);
 
-       return IS_ERR_VALUE(error) ? error : 0;
+       return val;
 }
 
-static inline long syscall_get_return_value(struct task_struct *task,
-                                           struct pt_regs *regs)
+static inline long syscall_get_error(struct task_struct *task,
+                                    struct pt_regs *regs)
 {
-       return regs->regs[0];
+       unsigned long error = syscall_get_return_value(task, regs);
+
+       return IS_ERR_VALUE(error) ? error : 0;
 }
 
 static inline void syscall_set_return_value(struct task_struct *task,
index f385172..1c9c2f7 100644 (file)
@@ -273,7 +273,8 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
        return __pgprot(PROT_DEVICE_nGnRnE);
 }
 
-void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
+                                      acpi_size size, bool memory)
 {
        efi_memory_desc_t *md, *region = NULL;
        pgprot_t prot;
@@ -299,9 +300,11 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
         * It is fine for AML to remap regions that are not represented in the
         * EFI memory map at all, as it only describes normal memory, and MMIO
         * regions that require a virtual mapping to make them accessible to
-        * the EFI runtime services.
+        * the EFI runtime services. Determine the region default
+        * attributes by checking the requested memory semantics.
         */
-       prot = __pgprot(PROT_DEVICE_nGnRnE);
+       prot = memory ? __pgprot(PROT_NORMAL_NC) :
+                       __pgprot(PROT_DEVICE_nGnRnE);
        if (region) {
                switch (region->type) {
                case EFI_LOADER_CODE:
@@ -361,6 +364,16 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
        return __ioremap(phys, size, prot);
 }
 
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+{
+       return __acpi_os_ioremap(phys, size, false);
+}
+
+void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
+{
+       return __acpi_os_ioremap(phys, size, true);
+}
+
 /*
  * Claim Synchronous External Aborts as a firmware first notification.
  *
index cfa2cfd..418b2bb 100644 (file)
@@ -162,7 +162,9 @@ u64 __init kaslr_early_init(void)
                 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
                 * _stext) . This guarantees that the resulting region still
                 * covers [_stext, _etext], and that all relative branches can
-                * be resolved without veneers.
+                * be resolved without veneers unless this region is exhausted
+                * and we fall back to a larger 2GB window in module_alloc()
+                * when ARM64_MODULE_PLTS is enabled.
                 */
                module_range = MODULES_VSIZE - (u64)(_etext - _stext);
                module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
index 499b6b2..b381a1e 100644 (file)
@@ -1862,7 +1862,7 @@ void syscall_trace_exit(struct pt_regs *regs)
        audit_syscall_exit(regs);
 
        if (flags & _TIF_SYSCALL_TRACEPOINT)
-               trace_sys_exit(regs, regs_return_value(regs));
+               trace_sys_exit(regs, syscall_get_return_value(current, regs));
 
        if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
                tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
index f8192f4..2303633 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/unistd.h>
 #include <asm/fpsimd.h>
 #include <asm/ptrace.h>
+#include <asm/syscall.h>
 #include <asm/signal32.h>
 #include <asm/traps.h>
 #include <asm/vdso.h>
@@ -890,7 +891,7 @@ static void do_signal(struct pt_regs *regs)
                     retval == -ERESTART_RESTARTBLOCK ||
                     (retval == -ERESTARTSYS &&
                      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
-                       regs->regs[0] = -EINTR;
+                       syscall_set_return_value(current, regs, -EINTR, 0);
                        regs->pc = continue_addr;
                }
 
index b83c8d9..8982a2b 100644 (file)
@@ -218,7 +218,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 
 #ifdef CONFIG_STACKTRACE
 
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
                              void *cookie, struct task_struct *task,
                              struct pt_regs *regs)
 {
index 263d6c1..50a0f1a 100644 (file)
@@ -54,10 +54,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
                ret = do_ni_syscall(regs, scno);
        }
 
-       if (is_compat_task())
-               ret = lower_32_bits(ret);
-
-       regs->regs[0] = ret;
+       syscall_set_return_value(current, regs, 0, ret);
 
        /*
         * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
@@ -115,7 +112,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                 * syscall. do_notify_resume() will send a signal to userspace
                 * before the syscall is restarted.
                 */
-               regs->regs[0] = -ERESTARTNOINTR;
+               syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0);
                return;
        }
 
@@ -136,7 +133,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                 * anyway.
                 */
                if (scno == NO_SYSCALL)
-                       regs->regs[0] = -ENOSYS;
+                       syscall_set_return_value(current, regs, -ENOSYS, 0);
                scno = syscall_trace_enter(regs);
                if (scno == NO_SYSCALL)
                        goto trace_exit;
index e9a2b8f..0ca72f5 100644 (file)
@@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                kvm->arch.return_nisv_io_abort_to_user = true;
                break;
        case KVM_CAP_ARM_MTE:
-               if (!system_supports_mte() || kvm->created_vcpus)
-                       return -EINVAL;
-               r = 0;
-               kvm->arch.mte_enabled = true;
+               mutex_lock(&kvm->lock);
+               if (!system_supports_mte() || kvm->created_vcpus) {
+                       r = -EINVAL;
+               } else {
+                       r = 0;
+                       kvm->arch.mte_enabled = true;
+               }
+               mutex_unlock(&kvm->lock);
                break;
        default:
                r = -EINVAL;
index d938ce9..a6ce991 100644 (file)
@@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
 {
        struct kvm_mem_range r1, r2;
 
-       if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+       if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
                return false;
        if (r1.start != r2.start)
                return false;
index 8490ed2..1fdb7bb 100644 (file)
@@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
        free_area_init(max_zone_pfns);
 }
 
+int pfn_valid(unsigned long pfn)
+{
+       phys_addr_t addr = PFN_PHYS(pfn);
+       struct mem_section *ms;
+
+       /*
+        * Ensure the upper PAGE_SHIFT bits are clear in the
+        * pfn. Else it might lead to false positives when
+        * some of the upper bits are set, but the lower bits
+        * match a valid pfn.
+        */
+       if (PHYS_PFN(addr) != pfn)
+               return 0;
+
+       if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+               return 0;
+
+       ms = __pfn_to_section(pfn);
+       if (!valid_section(ms))
+               return 0;
+
+       /*
+        * ZONE_DEVICE memory does not have the memblock entries.
+        * memblock_is_map_memory() check for ZONE_DEVICE based
+        * addresses will always fail. Even the normal hotplugged
+        * memory will never have MEMBLOCK_NOMAP flag set in their
+        * memblock entries. Skip memblock search for all non early
+        * memory sections covering all of hotplug memory including
+        * both normal and ZONE_DEVICE based.
+        */
+       if (!early_section(ms))
+               return pfn_section_valid(ms, pfn);
+
+       return memblock_is_memory(addr);
+}
+EXPORT_SYMBOL(pfn_valid);
+
 int pfn_is_map_memory(unsigned long pfn)
 {
        phys_addr_t addr = PFN_PHYS(pfn);
index 29e9463..277d61a 100644 (file)
@@ -26,6 +26,7 @@ config COLDFIRE
        bool "Coldfire CPU family support"
        select ARCH_HAVE_CUSTOM_GPIO_H
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_MULDIV64
        select GENERIC_CSUM
        select GPIOLIB
@@ -39,6 +40,7 @@ config M68000
        bool
        depends on !MMU
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_MULDIV64
        select CPU_HAS_NO_UNALIGNED
        select GENERIC_CSUM
@@ -54,6 +56,7 @@ config M68000
 config MCPU32
        bool
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_UNALIGNED
        select CPU_NO_EFFICIENT_FFS
        help
@@ -383,7 +386,7 @@ config ADVANCED
 
 config RMW_INSNS
        bool "Use read-modify-write instructions"
-       depends on ADVANCED
+       depends on ADVANCED && !CPU_HAS_NO_CAS
        help
          This allows to use certain instructions that work with indivisible
          read-modify-write bus cycles. While this is faster than the
@@ -450,6 +453,9 @@ config M68K_L2_CACHE
 config CPU_HAS_NO_BITFIELDS
        bool
 
+config CPU_HAS_NO_CAS
+       bool
+
 config CPU_HAS_NO_MULDIV64
        bool
 
index 0a2cacf..5f53628 100644 (file)
@@ -84,6 +84,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -323,7 +324,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -502,6 +502,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -616,6 +617,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -624,7 +626,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -636,6 +637,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 4dc6dcf..d956864 100644 (file)
@@ -80,6 +80,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -458,6 +459,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -580,7 +582,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -592,6 +593,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 23d910a..dbf1960 100644 (file)
@@ -87,6 +87,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -324,7 +325,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -480,6 +480,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -594,6 +595,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -602,7 +604,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -614,6 +615,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 2c3f428..7620db3 100644 (file)
@@ -77,6 +77,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -451,6 +452,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -573,7 +575,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -585,6 +586,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 5b1898d..113a02d 100644 (file)
@@ -79,6 +79,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -460,6 +461,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -582,7 +584,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -594,6 +595,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 9606ccd..a8e006e 100644 (file)
@@ -78,6 +78,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -315,7 +316,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -482,6 +482,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -596,6 +597,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -604,7 +606,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -616,6 +617,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 3175ba5..b665590 100644 (file)
@@ -98,6 +98,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -344,7 +345,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -567,6 +567,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -681,6 +682,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -689,7 +691,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -701,6 +702,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 793085f..563ba47 100644 (file)
@@ -76,6 +76,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -450,6 +451,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -572,7 +574,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -584,6 +585,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 56fbac7..9f1b44d 100644 (file)
@@ -77,6 +77,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -451,6 +452,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -573,7 +575,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -585,6 +586,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 0e15431..1993433 100644 (file)
@@ -78,6 +78,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -314,7 +315,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -469,6 +469,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -583,6 +584,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -591,7 +593,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -603,6 +604,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index d923064..8898ae3 100644 (file)
@@ -22,7 +22,6 @@ CONFIG_RAMSIZE=0x8000000
 CONFIG_VECTORBASE=0x40000000
 CONFIG_KERNELBASE=0x40001000
 # CONFIG_BLK_DEV_BSG is not set
-CONFIG_BLK_CMDLINE_PARSER=y
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 CONFIG_BINFMT_MISC=y
index 3490a05..56dbc63 100644 (file)
@@ -74,6 +74,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -453,6 +454,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -574,7 +576,6 @@ CONFIG_TEST_LOCKUP=m
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -586,6 +587,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 4e92c8c..6bd1bba 100644 (file)
@@ -74,6 +74,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -452,6 +453,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -574,7 +576,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -586,6 +587,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index d2875e3..79e5542 100644 (file)
@@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
 
        for (i = 0; i < MAX_UNIT; i++) {
                if (nfeth_dev[i]) {
-                       unregister_netdev(nfeth_dev[0]);
-                       free_netdev(nfeth_dev[0]);
+                       unregister_netdev(nfeth_dev[i]);
+                       free_netdev(nfeth_dev[i]);
                }
        }
        free_irq(nfEtherIRQ, nfeth_interrupt);
index 8637bf8..cfba83d 100644 (file)
@@ -48,7 +48,7 @@ static inline int arch_atomic_##op##_return(int i, atomic_t *v)               \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (arch_atomic_read(v)));          \
+                       : "di" (i), "2" (arch_atomic_read(v)));         \
        return t;                                                       \
 }
 
@@ -63,7 +63,7 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v)          \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (arch_atomic_read(v)));          \
+                       : "di" (i), "2" (arch_atomic_read(v)));         \
        return tmp;                                                     \
 }
 
index 4e942b7..653befc 100644 (file)
@@ -321,7 +321,7 @@ KBUILD_LDFLAGS              += -m $(ld-emul)
 
 ifdef CONFIG_MIPS
 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
-       egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+       egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
        sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
 endif
 
index 9dbed7b..76e43a7 100644 (file)
@@ -69,24 +69,24 @@ static void ar2315_misc_irq_handler(struct irq_desc *desc)
 {
        u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
                      ar2315_rst_reg_read(AR2315_IMR);
-       unsigned nr, misc_irq = 0;
+       unsigned nr;
+       int ret = 0;
 
        if (pending) {
                struct irq_domain *domain = irq_desc_get_handler_data(desc);
 
                nr = __ffs(pending);
-               misc_irq = irq_find_mapping(domain, nr);
-       }
 
-       if (misc_irq) {
                if (nr == AR2315_MISC_IRQ_GPIO)
                        ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_GPIO);
                else if (nr == AR2315_MISC_IRQ_WATCHDOG)
                        ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_WD);
-               generic_handle_irq(misc_irq);
-       } else {
-               spurious_interrupt();
+
+               ret = generic_handle_domain_irq(domain, nr);
        }
+
+       if (!pending || ret)
+               spurious_interrupt();
 }
 
 static void ar2315_misc_irq_unmask(struct irq_data *d)
index 23c879f..822b639 100644 (file)
@@ -73,22 +73,21 @@ static void ar5312_misc_irq_handler(struct irq_desc *desc)
 {
        u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
                      ar5312_rst_reg_read(AR5312_IMR);
-       unsigned nr, misc_irq = 0;
+       unsigned nr;
+       int ret = 0;
 
        if (pending) {
                struct irq_domain *domain = irq_desc_get_handler_data(desc);
 
                nr = __ffs(pending);
-               misc_irq = irq_find_mapping(domain, nr);
-       }
 
-       if (misc_irq) {
-               generic_handle_irq(misc_irq);
+               ret = generic_handle_domain_irq(domain, nr);
                if (nr == AR5312_MISC_IRQ_TIMER)
                        ar5312_rst_reg_read(AR5312_TIMER);
-       } else {
-               spurious_interrupt();
        }
+
+       if (!pending || ret)
+               spurious_interrupt();
 }
 
 /* Enable the specified AR5312_MISC_IRQ interrupt */
index d502673..34d179c 100644 (file)
@@ -7,8 +7,6 @@
 #ifndef __ASM_RC32434_RB_H
 #define __ASM_RC32434_RB_H
 
-#include <linux/genhd.h>
-
 #define REGBASE                0x18000000
 #define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
 #define UART0BASE      0x58000
index 4b2567d..c7925d0 100644 (file)
@@ -58,15 +58,20 @@ do {                                                        \
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *pmd = NULL;
+       pmd_t *pmd;
        struct page *pg;
 
-       pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
-       if (pg) {
-               pgtable_pmd_page_ctor(pg);
-               pmd = (pmd_t *)page_address(pg);
-               pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+       pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+       if (!pg)
+               return NULL;
+
+       if (!pgtable_pmd_page_ctor(pg)) {
+               __free_pages(pg, PMD_ORDER);
+               return NULL;
        }
+
+       pmd = (pmd_t *)page_address(pg);
+       pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
        return pmd;
 }
 
index acfbdc0..b732495 100644 (file)
@@ -300,7 +300,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
         */
        irq = __fls(irq);
        hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
-       generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
+       generic_handle_domain_irq(ltq_domain, hwirq);
 
        /* if this is a EBU irq, we need to ack it or get a deadlock */
        if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
index ee74719..4ffbcc5 100644 (file)
@@ -48,7 +48,8 @@ static struct plat_serial8250_port uart8250_data[] = {
                .mapbase        = 0x1f000900,   /* The CBUS UART */
                .irq            = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
                .uartclk        = 3686400,      /* Twice the usual clk! */
-               .iotype         = UPIO_MEM32,
+               .iotype         = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ?
+                                 UPIO_MEM32BE : UPIO_MEM32,
                .flags          = CBUS_UART_FLAGS,
                .regshift       = 3,
        },
index c1a655a..9a4bfb4 100644 (file)
@@ -337,14 +337,12 @@ static void ar2315_pci_irq_handler(struct irq_desc *desc)
        struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
        u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
                      ar2315_pci_reg_read(apc, AR2315_PCI_IMR);
-       unsigned pci_irq = 0;
+       int ret = 0;
 
        if (pending)
-               pci_irq = irq_find_mapping(apc->domain, __ffs(pending));
+               ret = generic_handle_domain_irq(apc->domain, __ffs(pending));
 
-       if (pci_irq)
-               generic_handle_irq(pci_irq);
-       else
+       if (!pending || ret)
                spurious_interrupt();
 }
 
index c48e23c..d3c947f 100644 (file)
@@ -140,10 +140,9 @@ static void rt3883_pci_irq_handler(struct irq_desc *desc)
        }
 
        while (pending) {
-               unsigned irq, bit = __ffs(pending);
+               unsigned bit = __ffs(pending);
 
-               irq = irq_find_mapping(rpc->irq_domain, bit);
-               generic_handle_irq(irq);
+               generic_handle_domain_irq(rpc->irq_domain, bit);
 
                pending &= ~BIT(bit);
        }
index 220ca0c..fa353bc 100644 (file)
@@ -100,7 +100,7 @@ static void ralink_intc_irq_handler(struct irq_desc *desc)
 
        if (pending) {
                struct irq_domain *domain = irq_desc_get_handler_data(desc);
-               generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+               generic_handle_domain_irq(domain, __ffs(pending));
        } else {
                spurious_interrupt();
        }
index 95c1bff..a0dd3bd 100644 (file)
@@ -190,7 +190,7 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
        unsigned long *mask = per_cpu(irq_enable_mask, cpu);
        struct irq_domain *domain;
        u64 pend0;
-       int irq;
+       int ret;
 
        /* copied from Irix intpend0() */
        pend0 = LOCAL_HUB_L(PI_INT_PEND0);
@@ -216,10 +216,8 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
 #endif
        {
                domain = irq_desc_get_handler_data(desc);
-               irq = irq_linear_revmap(domain, __ffs(pend0));
-               if (irq)
-                       generic_handle_irq(irq);
-               else
+               ret = generic_handle_domain_irq(domain, __ffs(pend0));
+               if (ret)
                        spurious_interrupt();
        }
 
@@ -232,7 +230,7 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
        unsigned long *mask = per_cpu(irq_enable_mask, cpu);
        struct irq_domain *domain;
        u64 pend1;
-       int irq;
+       int ret;
 
        /* copied from Irix intpend0() */
        pend1 = LOCAL_HUB_L(PI_INT_PEND1);
@@ -242,10 +240,8 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
                return;
 
        domain = irq_desc_get_handler_data(desc);
-       irq = irq_linear_revmap(domain, __ffs(pend1) + 64);
-       if (irq)
-               generic_handle_irq(irq);
-       else
+       ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
+       if (ret)
                spurious_interrupt();
 
        LOCAL_HUB_L(PI_INT_PEND1);
index ba87704..423c32c 100644 (file)
@@ -99,7 +99,7 @@ static void ip30_normal_irq(struct irq_desc *desc)
        int cpu = smp_processor_id();
        struct irq_domain *domain;
        u64 pend, mask;
-       int irq;
+       int ret;
 
        pend = heart_read(&heart_regs->isr);
        mask = (heart_read(&heart_regs->imr[cpu]) &
@@ -130,10 +130,8 @@ static void ip30_normal_irq(struct irq_desc *desc)
 #endif
        {
                domain = irq_desc_get_handler_data(desc);
-               irq = irq_linear_revmap(domain, __ffs(pend));
-               if (irq)
-                       generic_handle_irq(irq);
-               else
+               ret = generic_handle_domain_irq(domain, __ffs(pend));
+               if (ret)
                        spurious_interrupt();
        }
 }
index c6a1a9f..6b7890e 100644 (file)
@@ -19,11 +19,9 @@ static u32 ienable;
 asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
 {
        struct pt_regs *oldregs = set_irq_regs(regs);
-       int irq;
 
        irq_enter();
-       irq = irq_find_mapping(NULL, hwirq);
-       generic_handle_irq(irq);
+       generic_handle_domain_irq(NULL, hwirq);
        irq_exit();
 
        set_irq_regs(oldregs);
index 4a0c9db..f6e1132 100644 (file)
@@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
 #define __HAVE_ARCH_MEMCPY
 void * memcpy(void * dest,const void *src,size_t count);
 
-#define __HAVE_ARCH_STRLEN
-extern size_t strlen(const char *s);
-
-#define __HAVE_ARCH_STRCPY
-extern char *strcpy(char *dest, const char *src);
-
-#define __HAVE_ARCH_STRNCPY
-extern char *strncpy(char *dest, const char *src, size_t count);
-
-#define __HAVE_ARCH_STRCAT
-extern char *strcat(char *dest, const char *src);
-
-#define __HAVE_ARCH_MEMSET
-extern void *memset(void *, int, size_t);
-
 #endif
index 8ed409e..e8a6a75 100644 (file)
 
 #include <linux/string.h>
 EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strcat);
 
 #include <linux/atomic.h>
 EXPORT_SYMBOL(__xchg8);
index 2d7a997..7b19766 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for parisc-specific library files
 #
 
-lib-y  := lusercopy.o bitops.o checksum.o io.o memcpy.o \
-          ucmpdi2.o delay.o string.o
+lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
+          ucmpdi2.o delay.o
 
 obj-y  := iomap.o
diff --git a/arch/parisc/lib/memset.c b/arch/parisc/lib/memset.c
new file mode 100644 (file)
index 0000000..133e480
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <linux/types.h>
+#include <asm/string.h>
+
+#define OPSIZ (BITS_PER_LONG/8)
+typedef unsigned long op_t;
+
+void *
+memset (void *dstpp, int sc, size_t len)
+{
+  unsigned int c = sc;
+  long int dstp = (long int) dstpp;
+
+  if (len >= 8)
+    {
+      size_t xlen;
+      op_t cccc;
+
+      cccc = (unsigned char) c;
+      cccc |= cccc << 8;
+      cccc |= cccc << 16;
+      if (OPSIZ > 4)
+       /* Do the shift in two steps to avoid warning if long has 32 bits.  */
+       cccc |= (cccc << 16) << 16;
+
+      /* There are at least some bytes to set.
+        No need to test for LEN == 0 in this alignment loop.  */
+      while (dstp % OPSIZ != 0)
+       {
+         ((unsigned char *) dstp)[0] = c;
+         dstp += 1;
+         len -= 1;
+       }
+
+      /* Write 8 `op_t' per iteration until less than 8 `op_t' remain.  */
+      xlen = len / (OPSIZ * 8);
+      while (xlen > 0)
+       {
+         ((op_t *) dstp)[0] = cccc;
+         ((op_t *) dstp)[1] = cccc;
+         ((op_t *) dstp)[2] = cccc;
+         ((op_t *) dstp)[3] = cccc;
+         ((op_t *) dstp)[4] = cccc;
+         ((op_t *) dstp)[5] = cccc;
+         ((op_t *) dstp)[6] = cccc;
+         ((op_t *) dstp)[7] = cccc;
+         dstp += 8 * OPSIZ;
+         xlen -= 1;
+       }
+      len %= OPSIZ * 8;
+
+      /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain.  */
+      xlen = len / OPSIZ;
+      while (xlen > 0)
+       {
+         ((op_t *) dstp)[0] = cccc;
+         dstp += OPSIZ;
+         xlen -= 1;
+       }
+      len %= OPSIZ;
+    }
+
+  /* Write the last few bytes.  */
+  while (len > 0)
+    {
+      ((unsigned char *) dstp)[0] = c;
+      dstp += 1;
+      len -= 1;
+    }
+
+  return dstpp;
+}
diff --git a/arch/parisc/lib/string.S b/arch/parisc/lib/string.S
deleted file mode 100644 (file)
index 4a64264..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *    PA-RISC assembly string functions
- *
- *    Copyright (C) 2019 Helge Deller <deller@gmx.de>
- */
-
-#include <asm/assembly.h>
-#include <linux/linkage.h>
-
-       .section .text.hot
-       .level PA_ASM_LEVEL
-
-       t0 = r20
-       t1 = r21
-       t2 = r22
-
-ENTRY_CFI(strlen, frame=0,no_calls)
-       or,COND(<>) arg0,r0,ret0
-       b,l,n   .Lstrlen_null_ptr,r0
-       depwi   0,31,2,ret0
-       cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
-       ldw,ma  4(ret0),t0
-       cmpib,tr 0,r0,.Lstrlen_loop
-       uxor,nbz r0,t0,r0
-.Lstrlen_not_aligned:
-       uaddcm  arg0,ret0,t1
-       shladd  t1,3,r0,t1
-       mtsar   t1
-       depwi   -1,%sar,32,t0
-       uxor,nbz r0,t0,r0
-.Lstrlen_loop:
-       b,l,n   .Lstrlen_end_loop,r0
-       ldw,ma  4(ret0),t0
-       cmpib,tr 0,r0,.Lstrlen_loop
-       uxor,nbz r0,t0,r0
-.Lstrlen_end_loop:
-       extrw,u,<> t0,7,8,r0
-       addib,tr,n -3,ret0,.Lstrlen_out
-       extrw,u,<> t0,15,8,r0
-       addib,tr,n -2,ret0,.Lstrlen_out
-       extrw,u,<> t0,23,8,r0
-       addi    -1,ret0,ret0
-.Lstrlen_out:
-       bv r0(rp)
-       uaddcm ret0,arg0,ret0
-.Lstrlen_null_ptr:
-       bv,n r0(rp)
-ENDPROC_CFI(strlen)
-
-
-ENTRY_CFI(strcpy, frame=0,no_calls)
-       ldb     0(arg1),t0
-       stb     t0,0(arg0)
-       ldo     0(arg0),ret0
-       ldo     1(arg1),t1
-       cmpb,=  r0,t0,2f
-       ldo     1(arg0),t2
-1:     ldb     0(t1),arg1
-       stb     arg1,0(t2)
-       ldo     1(t1),t1
-       cmpb,<> r0,arg1,1b
-       ldo     1(t2),t2
-2:     bv,n    r0(rp)
-ENDPROC_CFI(strcpy)
-
-
-ENTRY_CFI(strncpy, frame=0,no_calls)
-       ldb     0(arg1),t0
-       stb     t0,0(arg0)
-       ldo     1(arg1),t1
-       ldo     0(arg0),ret0
-       cmpb,=  r0,t0,2f
-       ldo     1(arg0),arg1
-1:     ldo     -1(arg2),arg2
-       cmpb,COND(=),n r0,arg2,2f
-       ldb     0(t1),arg0
-       stb     arg0,0(arg1)
-       ldo     1(t1),t1
-       cmpb,<> r0,arg0,1b
-       ldo     1(arg1),arg1
-2:     bv,n    r0(rp)
-ENDPROC_CFI(strncpy)
-
-
-ENTRY_CFI(strcat, frame=0,no_calls)
-       ldb     0(arg0),t0
-       cmpb,=  t0,r0,2f
-       ldo     0(arg0),ret0
-       ldo     1(arg0),arg0
-1:     ldb     0(arg0),t1
-       cmpb,<>,n r0,t1,1b
-       ldo     1(arg0),arg0
-2:     ldb     0(arg1),t2
-       stb     t2,0(arg0)
-       ldo     1(arg0),arg0
-       ldb     0(arg1),t0
-       cmpb,<> r0,t0,2b
-       ldo     1(arg1),arg1
-       bv,n    r0(rp)
-ENDPROC_CFI(strcat)
-
-
-ENTRY_CFI(memset, frame=0,no_calls)
-       copy    arg0,ret0
-       cmpb,COND(=) r0,arg0,4f
-       copy    arg0,t2
-       cmpb,COND(=) r0,arg2,4f
-       ldo     -1(arg2),arg3
-       subi    -1,arg3,t0
-       subi    0,t0,t1
-       cmpiclr,COND(>=) 0,t1,arg2
-       ldo     -1(t1),arg2
-       extru arg2,31,2,arg0
-2:     stb     arg1,0(t2)
-       ldo     1(t2),t2
-       addib,>= -1,arg0,2b
-       ldo     -1(arg3),arg3
-       cmpiclr,COND(<=) 4,arg2,r0
-       b,l,n   4f,r0
-#ifdef CONFIG_64BIT
-       depd,*  r0,63,2,arg2
-#else
-       depw    r0,31,2,arg2
-#endif
-       ldo     1(t2),t2
-3:     stb     arg1,-1(t2)
-       stb     arg1,0(t2)
-       stb     arg1,1(t2)
-       stb     arg1,2(t2)
-       addib,COND(>) -4,arg2,3b
-       ldo     4(t2),t2
-4:     bv,n    r0(rp)
-ENDPROC_CFI(memset)
-
-       .end
index 6420112..d4b145b 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <asm/bug.h>
 #include <asm/book3s/32/mmu-hash.h>
+#include <asm/mmu.h>
+#include <asm/synch.h>
 
 #ifndef __ASSEMBLY__
 
@@ -28,6 +30,15 @@ static inline void kuep_lock(void)
                return;
 
        update_user_segments(mfsr(0) | SR_NX);
+       /*
+        * This isync() shouldn't be necessary as the kernel is not excepted to
+        * run any instruction in userspace soon after the update of segments,
+        * but hash based cores (at least G3) seem to exhibit a random
+        * behaviour when the 'isync' is not there. 603 cores don't have this
+        * behaviour so don't do the 'isync' as it saves several CPU cycles.
+        */
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               isync();        /* Context sync required after mtsr() */
 }
 
 static inline void kuep_unlock(void)
@@ -36,6 +47,15 @@ static inline void kuep_unlock(void)
                return;
 
        update_user_segments(mfsr(0) & ~SR_NX);
+       /*
+        * This isync() shouldn't be necessary as a 'rfi' will soon be executed
+        * to return to userspace, but hash based cores (at least G3) seem to
+        * exhibit a random behaviour when the 'isync' is not there. 603 cores
+        * don't have this behaviour so don't do the 'isync' as it saves several
+        * CPU cycles.
+        */
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               isync();        /* Context sync required after mtsr() */
 }
 
 #ifdef CONFIG_PPC_KUAP
index d4bdf7d..6b800d3 100644 (file)
@@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
 
 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
 
+/* irq.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
+
 void __noreturn unrecoverable_exception(struct pt_regs *regs);
 
 void replay_system_reset(void);
index 4982f37..2b32785 100644 (file)
@@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
 extern void *hardirq_ctx[NR_CPUS];
 extern void *softirq_ctx[NR_CPUS];
 
-extern void do_IRQ(struct pt_regs *regs);
+void __do_IRQ(struct pt_regs *regs);
 extern void __init init_IRQ(void);
 extern void __do_irq(struct pt_regs *regs);
 
index 3e5d470..14422e8 100644 (file)
@@ -70,6 +70,22 @@ struct pt_regs
                unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
        };
 #endif
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+       struct { /* Must be a multiple of 16 bytes */
+               unsigned long mas0;
+               unsigned long mas1;
+               unsigned long mas2;
+               unsigned long mas3;
+               unsigned long mas6;
+               unsigned long mas7;
+               unsigned long srr0;
+               unsigned long srr1;
+               unsigned long csrr0;
+               unsigned long csrr1;
+               unsigned long dsrr0;
+               unsigned long dsrr1;
+       };
+#endif
 };
 #endif
 
index a47eefa..5bee245 100644 (file)
@@ -309,24 +309,21 @@ int main(void)
        STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
 #endif
 
-#if defined(CONFIG_PPC32)
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
-       DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
-       DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+       STACK_PT_REGS_OFFSET(MAS0, mas0);
        /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
-       DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
-       DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
-       DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
-       DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
-       DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
-       DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
-       DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
-       DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
-       DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
-       DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
-       DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
-       DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
-#endif
+       STACK_PT_REGS_OFFSET(MMUCR, mas0);
+       STACK_PT_REGS_OFFSET(MAS1, mas1);
+       STACK_PT_REGS_OFFSET(MAS2, mas2);
+       STACK_PT_REGS_OFFSET(MAS3, mas3);
+       STACK_PT_REGS_OFFSET(MAS6, mas6);
+       STACK_PT_REGS_OFFSET(MAS7, mas7);
+       STACK_PT_REGS_OFFSET(_SRR0, srr0);
+       STACK_PT_REGS_OFFSET(_SRR1, srr1);
+       STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
+       STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
+       STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
+       STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
 #endif
 
        /* About the CPU features table */
index 4aec59a..37859e6 100644 (file)
@@ -812,7 +812,6 @@ __start_interrupts:
  * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
  */
 EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
-1:
        /* SCV 0 */
        mr      r9,r13
        GET_PACA(r13)
@@ -842,10 +841,12 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
        b       system_call_vectored_sigill
 #endif
        .endr
-2:
 EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
 
-SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+// Treat scv vectors as soft-masked, see comment above.
+// Use absolute values rather than labels here, so they don't get relocated,
+// because this code runs unrelocated.
+SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
 
 #ifdef CONFIG_RELOCATABLE
 TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
index 764edd8..68e5c0a 100644 (file)
@@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
        EXCEPTION_PROLOG_1
        EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
        prepare_transfer_to_handler
-       lwz     r5, _DSISR(r11)
+       lwz     r5, _DSISR(r1)
        andis.  r0, r5, DSISR_DABRMATCH@h
        bne-    1f
        bl      do_page_fault
index 87b806e..e550342 100644 (file)
@@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
 /* only on e500mc */
 #define DBG_STACK_BASE         dbgirq_ctx
 
-#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
-
 #ifdef CONFIG_SMP
 #define BOOKE_LOAD_EXC_LEVEL_STACK(level)              \
        mfspr   r8,SPRN_PIR;                            \
        slwi    r8,r8,2;                                \
        addis   r8,r8,level##_STACK_BASE@ha;            \
        lwz     r8,level##_STACK_BASE@l(r8);            \
-       addi    r8,r8,EXC_LVL_FRAME_OVERHEAD;
+       addi    r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
 #else
 #define BOOKE_LOAD_EXC_LEVEL_STACK(level)              \
        lis     r8,level##_STACK_BASE@ha;               \
        lwz     r8,level##_STACK_BASE@l(r8);            \
-       addi    r8,r8,EXC_LVL_FRAME_OVERHEAD;
+       addi    r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
 #endif
 
 /*
@@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
        mtmsr   r11;                                                    \
        mfspr   r11,SPRN_SPRG_THREAD;   /* if from user, start at top of   */\
        lwz     r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
-       addi    r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame    */\
+       addi    r11,r11,THREAD_SIZE - INT_FRAME_SIZE;   /* allocate stack frame    */\
        beq     1f;                                                          \
        /* COMING FROM USER MODE */                                          \
        stw     r9,_CCR(r11);           /* save CR                         */\
@@ -516,24 +514,5 @@ label:
        bl      kernel_fp_unavailable_exception;                              \
        b       interrupt_return
 
-#else /* __ASSEMBLY__ */
-struct exception_regs {
-       unsigned long mas0;
-       unsigned long mas1;
-       unsigned long mas2;
-       unsigned long mas3;
-       unsigned long mas6;
-       unsigned long mas7;
-       unsigned long srr0;
-       unsigned long srr1;
-       unsigned long csrr0;
-       unsigned long csrr1;
-       unsigned long dsrr0;
-       unsigned long dsrr1;
-};
-
-/* ensure this structure is always sized to a multiple of the stack alignment */
-#define STACK_EXC_LVL_FRAME_SIZE       ALIGN(sizeof (struct exception_regs), 16)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __HEAD_BOOKE_H__ */
index 91e63ea..551b653 100644 (file)
@@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
        trace_irq_exit(regs);
 }
 
-DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+void __do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
        void *cursp, *irqsp, *sirqsp;
@@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
        set_irq_regs(old_regs);
 }
 
+DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+{
+       __do_IRQ(regs);
+}
+
 static void *__init alloc_vm_stack(void)
 {
        return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
index cbc28d1..7a7cd6b 100644 (file)
@@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
        if (user_mode(regs))
                return 0;
 
-       if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
+       if (!IS_ENABLED(CONFIG_BOOKE) &&
+           (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
                return 0;
 
        /*
index 5ff0e55..defecb3 100644 (file)
@@ -1167,7 +1167,7 @@ static int __init topology_init(void)
                 * CPU.  For instance, the boot cpu might never be valid
                 * for hotplugging.
                 */
-               if (smp_ops->cpu_offline_self)
+               if (smp_ops && smp_ops->cpu_offline_self)
                        c->hotpluggable = 1;
 #endif
 
index e45ce42..c487ba5 100644 (file)
@@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
 
 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
-               do_IRQ(regs);
+               __do_IRQ(regs);
 #endif
 
        old_regs = set_irq_regs(regs);
index dfbce52..d56254f 100644 (file)
@@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
        _exception(SIGTRAP, regs, TRAP_UNK, 0);
 }
 
-DEFINE_INTERRUPT_HANDLER(single_step_exception)
+static void __single_step_exception(struct pt_regs *regs)
 {
        clear_single_step(regs);
        clear_br_trace(regs);
@@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
 }
 
+DEFINE_INTERRUPT_HANDLER(single_step_exception)
+{
+       __single_step_exception(regs);
+}
+
 /*
  * After we have successfully emulated an instruction, we have to
  * check if the instruction was being single-stepped, and if so,
@@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
 static void emulate_single_step(struct pt_regs *regs)
 {
        if (single_stepping(regs))
-               single_step_exception(regs);
+               __single_step_exception(regs);
 }
 
 static inline int __parse_fpscr(unsigned long fpscr)
index 0876216..edea388 100644 (file)
 /*
  * Updates the attributes of a page in three steps:
  *
- * 1. invalidate the page table entry
- * 2. flush the TLB
- * 3. install the new entry with the updated attributes
- *
- * Invalidating the pte means there are situations where this will not work
- * when in theory it should.
- * For example:
- * - removing write from page whilst it is being executed
- * - setting a page read-only whilst it is being read by another CPU
+ * 1. take the page_table_lock
+ * 2. install the new entry with the updated attributes
+ * 3. flush the TLB
  *
+ * This sequence is safe against concurrent updates, and also allows updating the
+ * attributes of a page currently being executed or accessed.
  */
 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 {
@@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 
        spin_lock(&init_mm.page_table_lock);
 
-       /* invalidate the PTE so it's safe to modify */
-       pte = ptep_get_and_clear(&init_mm, addr, ptep);
-       flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+       pte = ptep_get(ptep);
 
        /* modify the PTE bits as desired, then apply */
        switch (action) {
@@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
                break;
        }
 
-       set_pte_at(&init_mm, addr, ptep, pte);
+       pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
 
        /* See ptesync comment in radix__set_pte_at() */
        if (radix_enabled())
                asm volatile("ptesync": : :"memory");
+
+       flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
        spin_unlock(&init_mm.page_table_lock);
 
        return 0;
index 36fb66c..89e2587 100644 (file)
@@ -198,7 +198,6 @@ static void uic_irq_cascade(struct irq_desc *desc)
        struct uic *uic = irq_desc_get_handler_data(desc);
        u32 msr;
        int src;
-       int subvirq;
 
        raw_spin_lock(&desc->lock);
        if (irqd_is_level_type(idata))
@@ -213,8 +212,7 @@ static void uic_irq_cascade(struct irq_desc *desc)
 
        src = 32 - ffs(msr);
 
-       subvirq = irq_linear_revmap(uic->irqhost, src);
-       generic_handle_irq(subvirq);
+       generic_handle_domain_irq(uic->irqhost, src);
 
 uic_irq_ret:
        raw_spin_lock(&desc->lock);
index b298163..ea46870 100644 (file)
@@ -81,11 +81,10 @@ static struct irq_chip cpld_pic = {
        .irq_unmask = cpld_unmask_irq,
 };
 
-static int
+static unsigned int
 cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
                            u8 __iomem *maskp)
 {
-       int cpld_irq;
        u8 status = in_8(statusp);
        u8 mask = in_8(maskp);
 
@@ -93,28 +92,26 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
        status |= (ignore | mask);
 
        if (status == 0xff)
-               return 0;
-
-       cpld_irq = ffz(status) + offset;
+               return ~0;
 
-       return irq_linear_revmap(cpld_pic_host, cpld_irq);
+       return ffz(status) + offset;
 }
 
 static void cpld_pic_cascade(struct irq_desc *desc)
 {
-       unsigned int irq;
+       unsigned int hwirq;
 
-       irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
+       hwirq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
                &cpld_regs->pci_mask);
-       if (irq) {
-               generic_handle_irq(irq);
+       if (hwirq != ~0) {
+               generic_handle_domain_irq(cpld_pic_host, hwirq);
                return;
        }
 
-       irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
+       hwirq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
                &cpld_regs->misc_mask);
-       if (irq) {
-               generic_handle_irq(irq);
+       if (hwirq != ~0) {
+               generic_handle_domain_irq(cpld_pic_host, hwirq);
                return;
        }
 }
index efb8bde..110c444 100644 (file)
@@ -78,7 +78,7 @@ static struct irq_chip media5200_irq_chip = {
 static void media5200_irq_cascade(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       int sub_virq, val;
+       int val;
        u32 status, enable;
 
        /* Mask off the cascaded IRQ */
@@ -92,11 +92,10 @@ static void media5200_irq_cascade(struct irq_desc *desc)
        enable = in_be32(media5200_irq.regs + MEDIA5200_IRQ_STATUS);
        val = ffs((status & enable) >> MEDIA5200_IRQ_SHIFT);
        if (val) {
-               sub_virq = irq_linear_revmap(media5200_irq.irqhost, val - 1);
-               /* pr_debug("%s: virq=%i s=%.8x e=%.8x hwirq=%i subvirq=%i\n",
-                *          __func__, virq, status, enable, val - 1, sub_virq);
+               generic_handle_domain_irq(media5200_irq.irqhost, val - 1);
+               /* pr_debug("%s: virq=%i s=%.8x e=%.8x hwirq=%i\n",
+                *          __func__, virq, status, enable, val - 1);
                 */
-               generic_handle_irq(sub_virq);
        }
 
        /* Processing done; can reenable the cascade now */
index 3823df2..f862b48 100644 (file)
@@ -190,14 +190,11 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
 static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
 {
        struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
-       int sub_virq;
        u32 status;
 
        status = in_be32(&gpt->regs->status) & MPC52xx_GPT_STATUS_IRQMASK;
-       if (status) {
-               sub_virq = irq_linear_revmap(gpt->irqhost, 0);
-               generic_handle_irq(sub_virq);
-       }
+       if (status)
+               generic_handle_domain_irq(gpt->irqhost, 0);
 }
 
 static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
index f82f75a..285bfe1 100644 (file)
@@ -91,10 +91,8 @@ static void pq2ads_pci_irq_demux(struct irq_desc *desc)
                        break;
 
                for (bit = 0; pend != 0; ++bit, pend <<= 1) {
-                       if (pend & 0x80000000) {
-                               int virq = irq_linear_revmap(priv->host, bit);
-                               generic_handle_irq(virq);
-                       }
+                       if (pend & 0x80000000)
+                               generic_handle_domain_irq(priv->host, bit);
                }
        }
 }
index 6794145..a208997 100644 (file)
@@ -98,7 +98,7 @@ config PPC_BOOK3S_64
        select PPC_HAVE_PMU_SUPPORT
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
-       select ARCH_ENABLE_PMD_SPLIT_PTLOCK
+       select ARCH_ENABLE_SPLIT_PMD_PTLOCK
        select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
        select ARCH_SUPPORTS_HUGETLBFS
        select ARCH_SUPPORTS_NUMA_BALANCING
index c0ab62b..0873a7a 100644 (file)
@@ -106,13 +106,9 @@ static void iic_ioexc_cascade(struct irq_desc *desc)
                        out_be64(&node_iic->iic_is, ack);
                /* handle them */
                for (cascade = 63; cascade >= 0; cascade--)
-                       if (bits & (0x8000000000000000UL >> cascade)) {
-                               unsigned int cirq =
-                                       irq_linear_revmap(iic_host,
+                       if (bits & (0x8000000000000000UL >> cascade))
+                               generic_handle_domain_irq(iic_host,
                                                          base | cascade);
-                               if (cirq)
-                                       generic_handle_irq(cirq);
-                       }
                /* post-ack level interrupts */
                ack = bits & ~IIC_ISR_EDGE_MASK;
                if (ack)
index 210785f..8af7586 100644 (file)
@@ -190,16 +190,11 @@ static void spider_irq_cascade(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct spider_pic *pic = irq_desc_get_handler_data(desc);
-       unsigned int cs, virq;
+       unsigned int cs;
 
        cs = in_be32(pic->regs + TIR_CS) >> 24;
-       if (cs == SPIDER_IRQ_INVALID)
-               virq = 0;
-       else
-               virq = irq_linear_revmap(pic->host, cs);
-
-       if (virq)
-               generic_handle_irq(virq);
+       if (cs != SPIDER_IRQ_INVALID)
+               generic_handle_domain_irq(pic->host, cs);
 
        chip->irq_eoi(&desc->irq_data);
 }
index a1b7f79..1539633 100644 (file)
@@ -108,7 +108,6 @@ static const struct irq_domain_ops hlwd_irq_domain_ops = {
 static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
 {
        void __iomem *io_base = h->host_data;
-       int irq;
        u32 irq_status;
 
        irq_status = in_be32(io_base + HW_BROADWAY_ICR) &
@@ -116,23 +115,22 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
        if (irq_status == 0)
                return 0;       /* no more IRQs pending */
 
-       irq = __ffs(irq_status);
-       return irq_linear_revmap(h, irq);
+       return __ffs(irq_status);
 }
 
 static void hlwd_pic_irq_cascade(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
-       unsigned int virq;
+       unsigned int hwirq;
 
        raw_spin_lock(&desc->lock);
        chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
        raw_spin_unlock(&desc->lock);
 
-       virq = __hlwd_pic_get_irq(irq_domain);
-       if (virq)
-               generic_handle_irq(virq);
+       hwirq = __hlwd_pic_get_irq(irq_domain);
+       if (hwirq)
+               generic_handle_domain_irq(irq_domain, hwirq);
        else
                pr_err("spurious interrupt!\n");
 
@@ -190,7 +188,8 @@ static struct irq_domain *hlwd_pic_init(struct device_node *np)
 
 unsigned int hlwd_pic_get_irq(void)
 {
-       return __hlwd_pic_get_irq(hlwd_irq_host);
+       unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host);
+       return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0;
 }
 
 /*
index c164419..d55652b 100644 (file)
@@ -46,18 +46,15 @@ void opal_handle_events(void)
        e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
 again:
        while (e) {
-               int virq, hwirq;
+               int hwirq;
 
                hwirq = fls64(e) - 1;
                e &= ~BIT_ULL(hwirq);
 
                local_irq_disable();
-               virq = irq_find_mapping(opal_event_irqchip.domain, hwirq);
-               if (virq) {
-                       irq_enter();
-                       generic_handle_irq(virq);
-                       irq_exit();
-               }
+               irq_enter();
+               generic_handle_domain_irq(opal_event_irqchip.domain, hwirq);
+               irq_exit();
                local_irq_enable();
 
                cond_resched();
index 6b08866..0dfaa6a 100644 (file)
@@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
         * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
         * H_CPU_BEHAV_FAVOUR_SECURITY is.
         */
-       if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
+       if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
                security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
-       else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
+               pseries_security_flavor = 0;
+       } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
                pseries_security_flavor = 1;
        else
                pseries_security_flavor = 2;
index 5fa5fa2..9a98bb2 100644 (file)
@@ -99,7 +99,6 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
        struct mpic *mpic = (struct mpic *) data;
        u32 eisr, eimr;
        int errint;
-       unsigned int cascade_irq;
 
        eisr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EISR);
        eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
@@ -108,13 +107,11 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
                return IRQ_NONE;
 
        while (eisr) {
+               int ret;
                errint = __builtin_clz(eisr);
-               cascade_irq = irq_linear_revmap(mpic->irqhost,
-                                mpic->err_int_vecs[errint]);
-               WARN_ON(!cascade_irq);
-               if (cascade_irq) {
-                       generic_handle_irq(cascade_irq);
-               } else {
+               ret = generic_handle_domain_irq(mpic->irqhost,
+                                               mpic->err_int_vecs[errint]);
+               if (WARN_ON(ret)) {
                        eimr |=  1 << (31 - errint);
                        mpic_fsl_err_write(mpic->err_regs, eimr);
                }
index 808e711..e6b06c3 100644 (file)
@@ -266,7 +266,6 @@ out_free:
 
 static irqreturn_t fsl_msi_cascade(int irq, void *data)
 {
-       unsigned int cascade_irq;
        struct fsl_msi *msi_data;
        int msir_index = -1;
        u32 msir_value = 0;
@@ -279,9 +278,6 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
 
        msir_index = cascade_data->index;
 
-       if (msir_index >= NR_MSI_REG_MAX)
-               cascade_irq = 0;
-
        switch (msi_data->feature & FSL_PIC_IP_MASK) {
        case FSL_PIC_IP_MPIC:
                msir_value = fsl_msi_read(msi_data->msi_regs,
@@ -305,15 +301,15 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
        }
 
        while (msir_value) {
+               int err;
                intr_index = ffs(msir_value) - 1;
 
-               cascade_irq = irq_linear_revmap(msi_data->irqhost,
+               err = generic_handle_domain_irq(msi_data->irqhost,
                                msi_hwirq(msi_data, msir_index,
                                          intr_index + have_shift));
-               if (cascade_irq) {
-                       generic_handle_irq(cascade_irq);
+               if (!err)
                        ret = IRQ_HANDLED;
-               }
+
                have_shift += intr_index + 1;
                msir_value = msir_value >> (intr_index + 1);
        }
index dbdbbc2..8183ca3 100644 (file)
@@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
 static struct xive_ipi_desc {
        unsigned int irq;
        char name[16];
+       atomic_t started;
 } *xive_ipis;
 
 /*
@@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
        .alloc  = xive_ipi_irq_domain_alloc,
 };
 
-static int __init xive_request_ipi(void)
+static int __init xive_init_ipis(void)
 {
        struct fwnode_handle *fwnode;
        struct irq_domain *ipi_domain;
@@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void)
                struct xive_ipi_desc *xid = &xive_ipis[node];
                struct xive_ipi_alloc_info info = { node };
 
-               /* Skip nodes without CPUs */
-               if (cpumask_empty(cpumask_of_node(node)))
-                       continue;
-
                /*
                 * Map one IPI interrupt per node for all cpus of that node.
                 * Since the HW interrupt number doesn't have any meaning,
@@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void)
                xid->irq = ret;
 
                snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
-
-               ret = request_irq(xid->irq, xive_muxed_ipi_action,
-                                 IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
-
-               WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
        }
 
        return ret;
@@ -1178,6 +1170,22 @@ out:
        return ret;
 }
 
+static int xive_request_ipi(unsigned int cpu)
+{
+       struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
+       int ret;
+
+       if (atomic_inc_return(&xid->started) > 1)
+               return 0;
+
+       ret = request_irq(xid->irq, xive_muxed_ipi_action,
+                         IRQF_PERCPU | IRQF_NO_THREAD,
+                         xid->name, NULL);
+
+       WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
+       return ret;
+}
+
 static int xive_setup_cpu_ipi(unsigned int cpu)
 {
        unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
@@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
        if (xc->hw_ipi != XIVE_BAD_IRQ)
                return 0;
 
+       /* Register the IPI */
+       xive_request_ipi(cpu);
+
        /* Grab an IPI from the backend, this will populate xc->hw_ipi */
        if (xive_ops->get_ipi(cpu, xc))
                return -EIO;
@@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
        if (xc->hw_ipi == XIVE_BAD_IRQ)
                return;
 
+       /* TODO: clear IPI mapping */
+
        /* Mask the IPI */
        xive_do_source_set_mask(&xc->ipi_data, true);
 
@@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void)
        smp_ops->cause_ipi = xive_cause_ipi;
 
        /* Register the IPI */
-       xive_request_ipi();
+       xive_init_ipis();
 
        /* Allocate and setup IPI for the boot CPU */
        xive_setup_cpu_ipi(smp_processor_id());
index 8fcceb8..4f7b70a 100644 (file)
@@ -492,10 +492,16 @@ config CC_HAVE_STACKPROTECTOR_TLS
 
 config STACKPROTECTOR_PER_TASK
        def_bool y
+       depends on !GCC_PLUGIN_RANDSTRUCT
        depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
 
+config PHYS_RAM_BASE_FIXED
+       bool "Explicitly specified physical RAM address"
+       default n
+
 config PHYS_RAM_BASE
        hex "Platform Physical RAM address"
+       depends on PHYS_RAM_BASE_FIXED
        default "0x80000000"
        help
          This is the physical address of RAM in the system. It has to be
@@ -508,6 +514,7 @@ config XIP_KERNEL
        # This prevents XIP from being enabled by all{yes,mod}config, which
        # fail to build since XIP doesn't support large kernels.
        depends on !COMPILE_TEST
+       select PHYS_RAM_BASE_FIXED
        help
          Execute-In-Place allows the kernel to run from non-volatile storage
          directly addressable by the CPU, such as NOR flash. This saves RAM
index ec79944..baea7d2 100644 (file)
        model = "Microchip PolarFire-SoC Icicle Kit";
        compatible = "microchip,mpfs-icicle-kit";
 
+       aliases {
+               ethernet0 = &emac1;
+       };
+
        chosen {
                stdout-path = &serial0;
        };
index b981957..9d2fbbc 100644 (file)
                        reg = <0x0 0x20112000 0x0 0x2000>;
                        interrupt-parent = <&plic>;
                        interrupts = <70 71 72 73>;
-                       mac-address = [00 00 00 00 00 00];
+                       local-mac-address = [00 00 00 00 00 00];
                        clocks = <&clkcfg 5>, <&clkcfg 2>;
                        status = "disabled";
                        clock-names = "pclk", "hclk";
index b1c3c59..2e4ea84 100644 (file)
@@ -24,7 +24,7 @@
 
        memory@80000000 {
                device_type = "memory";
-               reg = <0x0 0x80000000 0x2 0x00000000>;
+               reg = <0x0 0x80000000 0x4 0x00000000>;
        };
 
        soc {
index 1f2be23..bc68231 100644 (file)
@@ -132,7 +132,6 @@ CONFIG_DEBUG_PLIST=y
 CONFIG_DEBUG_SG=y
 # CONFIG_RCU_TRACE is not set
 CONFIG_RCU_EQS_DEBUG=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 # CONFIG_FTRACE is not set
 # CONFIG_RUNTIME_TESTING_MENU is not set
 CONFIG_MEMTEST=y
index 8dd02b8..434ef5b 100644 (file)
@@ -127,7 +127,6 @@ CONFIG_DEBUG_PLIST=y
 CONFIG_DEBUG_SG=y
 # CONFIG_RCU_TRACE is not set
 CONFIG_RCU_EQS_DEBUG=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 # CONFIG_FTRACE is not set
 # CONFIG_RUNTIME_TESTING_MENU is not set
 CONFIG_MEMTEST=y
index cca8764..b0ca505 100644 (file)
@@ -103,6 +103,7 @@ struct kernel_mapping {
 };
 
 extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
 
 #ifdef CONFIG_64BIT
 #define is_kernel_mapping(x)   \
@@ -113,9 +114,9 @@ extern struct kernel_mapping kernel_map;
 #define linear_mapping_pa_to_va(x)     ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
 #define kernel_mapping_pa_to_va(y)     ({                                              \
        unsigned long _y = y;                                                           \
-       (_y >= CONFIG_PHYS_RAM_BASE) ?                                                  \
-               (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) :   \
-               (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset);             \
+       (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ?                                 \
+               (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) :            \
+               (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET);    \
        })
 #define __pa_to_va_nodebug(x)          linear_mapping_pa_to_va(x)
 
index d3081e4..3397dda 100644 (file)
@@ -11,7 +11,7 @@ endif
 CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
 
 ifdef CONFIG_KEXEC
-AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
+AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
 endif
 
 extra-y += head.o
index 1a85305..9c05111 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/ptrace.h>
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
+#include <asm/switch_to.h>
 #include <linux/audit.h>
 #include <linux/ptrace.h>
 #include <linux/elf.h>
@@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_struct *target,
 {
        struct __riscv_d_ext_state *fstate = &target->thread.fstate;
 
+       if (target == current)
+               fstate_save(current, task_pt_regs(current));
+
        membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
        membuf_store(&to, fstate->fcsr);
        return membuf_zero(&to, 4);     // explicitly pad
index 18bd0e4..120b2f6 100644 (file)
@@ -229,8 +229,8 @@ static void __init init_resources(void)
        }
 
        /* Clean-up any unused pre-allocated resources */
-       mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
-       memblock_free(__pa(mem_res), mem_res_sz);
+       if (res_idx >= 0)
+               memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res));
        return;
 
  error:
index ac75936..315db3d 100644 (file)
@@ -27,7 +27,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                fp = frame_pointer(regs);
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
-       } else if (task == current) {
+       } else if (task == NULL || task == current) {
                fp = (unsigned long)__builtin_frame_address(1);
                sp = (unsigned long)__builtin_frame_address(0);
                pc = (unsigned long)__builtin_return_address(0);
index a14bf39..7cb4f39 100644 (file)
@@ -36,6 +36,9 @@ EXPORT_SYMBOL(kernel_map);
 #define kernel_map     (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
 #endif
 
+phys_addr_t phys_ram_base __ro_after_init;
+EXPORT_SYMBOL(phys_ram_base);
+
 #ifdef CONFIG_XIP_KERNEL
 extern char _xiprom[], _exiprom[];
 #endif
@@ -160,7 +163,7 @@ static void __init setup_bootmem(void)
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
        phys_addr_t __maybe_unused max_mapped_addr;
-       phys_addr_t dram_end;
+       phys_addr_t phys_ram_end;
 
 #ifdef CONFIG_XIP_KERNEL
        vmlinux_start = __pa_symbol(&_sdata);
@@ -181,9 +184,12 @@ static void __init setup_bootmem(void)
 #endif
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
-       dram_end = memblock_end_of_DRAM();
 
+       phys_ram_end = memblock_end_of_DRAM();
 #ifndef CONFIG_64BIT
+#ifndef CONFIG_XIP_KERNEL
+       phys_ram_base = memblock_start_of_DRAM();
+#endif
        /*
         * memblock allocator is not aware of the fact that last 4K bytes of
         * the addressable memory can not be mapped because of IS_ERR_VALUE
@@ -191,15 +197,15 @@ static void __init setup_bootmem(void)
         * if end of dram is equal to maximum addressable memory.  For 64-bit
         * kernel, this problem can't happen here as the end of the virtual
         * address space is occupied by the kernel mapping then this check must
-        * be done in create_kernel_page_table.
+        * be done as soon as the kernel mapping base address is determined.
         */
        max_mapped_addr = __pa(~(ulong)0);
-       if (max_mapped_addr == (dram_end - 1))
+       if (max_mapped_addr == (phys_ram_end - 1))
                memblock_set_current_limit(max_mapped_addr - 4096);
 #endif
 
-       min_low_pfn = PFN_UP(memblock_start_of_DRAM());
-       max_low_pfn = max_pfn = PFN_DOWN(dram_end);
+       min_low_pfn = PFN_UP(phys_ram_base);
+       max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
 
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
        set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -558,6 +564,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
        kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
 
+       phys_ram_base = CONFIG_PHYS_RAM_BASE;
        kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
        kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
 
index a0e2130..92c0a1b 100644 (file)
@@ -138,6 +138,8 @@ config S390
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN
        select HAVE_ARCH_KASAN_VMALLOC
+       select HAVE_ARCH_KCSAN
+       select HAVE_ARCH_KFENCE
        select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
index 1e31728..17dc4f1 100644 (file)
@@ -142,7 +142,8 @@ all: bzImage
 KBUILD_IMAGE   := $(boot)/bzImage
 
 install:
-       $(Q)$(MAKE) $(build)=$(boot) $@
+       sh -x $(srctree)/$(boot)/install.sh $(KERNELRELEASE) $(KBUILD_IMAGE) \
+             System.map "$(INSTALL_PATH)"
 
 bzImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
index 41a64b8..0ba6468 100644 (file)
@@ -7,6 +7,7 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
@@ -36,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
 
 obj-y  := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
 obj-y  += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y  += version.o pgm_check_info.o ctype.o text_dma.o
+obj-y  += version.o pgm_check_info.o ctype.o
 obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE))  += uv.o
 obj-$(CONFIG_RELOCATABLE)      += machine_kexec_reloc.o
 obj-$(CONFIG_RANDOMIZE_BASE)   += kaslr.o
@@ -69,7 +70,3 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
 
 $(obj)/startup.a: $(OBJECTS) FORCE
        $(call if_changed,ar)
-
-install:
-       sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
-             System.map "$(INSTALL_PATH)"
index ae04e1c..641ce0f 100644 (file)
@@ -2,14 +2,9 @@
 #ifndef BOOT_BOOT_H
 #define BOOT_BOOT_H
 
+#include <asm/extable.h>
 #include <linux/types.h>
 
-#define BOOT_STACK_OFFSET 0x8000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/compiler.h>
-
 void startup_kernel(void);
 unsigned long detect_memory(void);
 bool is_ipl_block_dump(void);
@@ -18,17 +13,22 @@ void setup_boot_command_line(void);
 void parse_boot_command_line(void);
 void verify_facilities(void);
 void print_missing_facilities(void);
+void sclp_early_setup_buffer(void);
 void print_pgm_check_info(void);
 unsigned long get_random_base(unsigned long safe_addr);
 void __printf(1, 2) decompressor_printk(const char *fmt, ...);
 
+/* Symbols defined by linker scripts */
 extern const char kernel_version[];
 extern unsigned long memory_limit;
 extern unsigned long vmalloc_size;
 extern int vmalloc_size_set;
 extern int kaslr_enabled;
+extern char __boot_data_start[], __boot_data_end[];
+extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+extern char _decompressor_syms_start[], _decompressor_syms_end[];
+extern char _stack_start[], _stack_end[];
 
 unsigned long read_ipl_report(unsigned long safe_offset);
 
-#endif /* __ASSEMBLY__ */
 #endif /* BOOT_BOOT_H */
index 660c799..3b86006 100644 (file)
@@ -9,8 +9,10 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 obj-y  := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
+obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
 obj-all := $(obj-y) piggy.o syms.o
 targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
diff --git a/arch/s390/boot/compressed/clz_ctz.c b/arch/s390/boot/compressed/clz_ctz.c
new file mode 100644 (file)
index 0000000..c3ebf24
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../../lib/clz_ctz.c"
index 37a4a8d..e27c214 100644 (file)
 #define memmove memmove
 #define memzero(s, n) memset((s), 0, (n))
 
-/* Symbols defined by linker scripts */
-extern char _end[];
-extern unsigned char _compressed_start[];
-extern unsigned char _compressed_end[];
-
 #ifdef CONFIG_KERNEL_BZIP2
 #define BOOT_HEAP_SIZE 0x400000
 #elif CONFIG_KERNEL_ZSTD
index 41f0ad9..a59f75c 100644 (file)
@@ -26,7 +26,12 @@ struct vmlinux_info {
        unsigned long rela_dyn_end;
 };
 
+/* Symbols defined by linker scripts */
+extern char _end[];
+extern unsigned char _compressed_start[];
+extern unsigned char _compressed_end[];
 extern char _vmlinux_info[];
+
 #define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
 
 #endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
index 27a09c1..918e051 100644 (file)
@@ -1,6 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/sclp.h>
 
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
@@ -34,27 +37,6 @@ SECTIONS
                *(.data.*)
                _edata = . ;
        }
-       /*
-       * .dma section for code, data, ex_table that need to stay below 2 GB,
-       * even when the kernel is relocate: above 2 GB.
-       */
-       . = ALIGN(PAGE_SIZE);
-       _sdma = .;
-       .dma.text : {
-               _stext_dma = .;
-               *(.dma.text)
-               . = ALIGN(PAGE_SIZE);
-               _etext_dma = .;
-       }
-       . = ALIGN(16);
-       .dma.ex_table : {
-               _start_dma_ex_table = .;
-               KEEP(*(.dma.ex_table))
-               _stop_dma_ex_table = .;
-       }
-       .dma.data : { *(.dma.data) }
-       . = ALIGN(PAGE_SIZE);
-       _edma = .;
 
        BOOT_DATA
        BOOT_DATA_PRESERVED
@@ -69,6 +51,17 @@ SECTIONS
                *(.bss)
                *(.bss.*)
                *(COMMON)
+               /*
+                * Stacks for the decompressor
+                */
+               . = ALIGN(PAGE_SIZE);
+               _dump_info_stack_start = .;
+               . += PAGE_SIZE;
+               _dump_info_stack_end = .;
+               . = ALIGN(PAGE_SIZE);
+               _stack_start = .;
+               . += BOOT_STACK_SIZE;
+               _stack_end = .;
                _ebss = .;
        }
 
index 51693cf..40f4cff 100644 (file)
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
-#include "boot.h"
+#include <asm/sclp.h>
 
 #define ARCH_OFFSET    4
 
+#define EP_OFFSET      0x10008
+#define EP_STRING      "S390EP"
+
 __HEAD
 
 #define IPL_BS 0x730
@@ -275,11 +277,11 @@ iplstart:
 .Lcpuid:.fill  8,1,0
 
 #
-# startup-code at 0x10000, running in absolute addressing mode
+# normal startup-code, running in absolute addressing mode
 # this is called either by the ipl loader or directly by PSW restart
 # or linload or SALIPL
 #
-       .org    0x10000
+       .org    STARTUP_NORMAL_OFFSET
 SYM_CODE_START(startup)
        j       startup_normal
        .org    EP_OFFSET
@@ -292,9 +294,9 @@ SYM_CODE_START(startup)
        .ascii  EP_STRING
        .byte   0x00,0x01
 #
-# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
+# kdump startup-code, running in 64 bit absolute addressing mode
 #
-       .org    0x10010
+       .org    STARTUP_KDUMP_OFFSET
        j       startup_kdump
 SYM_CODE_END(startup)
 SYM_CODE_START_LOCAL(startup_normal)
@@ -315,18 +317,16 @@ SYM_CODE_START_LOCAL(startup_normal)
        xc      0x300(256),0x300
        xc      0xe00(256),0xe00
        xc      0xf00(256),0xf00
-       lctlg   %c0,%c15,.Lctl-.LPG0(%r13)      # load control registers
        stcke   __LC_BOOT_CLOCK
        mvc     __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
        spt     6f-.LPG0(%r13)
        mvc     __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
-       l       %r15,.Lstack-.LPG0(%r13)
+       larl    %r15,_stack_end-STACK_FRAME_OVERHEAD
+       brasl   %r14,sclp_early_setup_buffer
        brasl   %r14,verify_facilities
        brasl   %r14,startup_kernel
 SYM_CODE_END(startup_normal)
 
-.Lstack:
-       .long   BOOT_STACK_OFFSET + BOOT_STACK_SIZE - STACK_FRAME_OVERHEAD
        .align  8
 6:     .long   0x7fffffff,0xffffffff
 .Lext_new_psw:
@@ -335,35 +335,6 @@ SYM_CODE_END(startup_normal)
        .quad   0x0000000180000000,startup_pgm_check_handler
 .Lio_new_psw:
        .quad   0x0002000180000000,0x1f0        # disabled wait
-.Lctl: .quad   0x04040000              # cr0: AFP registers & secondary space
-       .quad   0                       # cr1: primary space segment table
-       .quad   .Lduct                  # cr2: dispatchable unit control table
-       .quad   0                       # cr3: instruction authorization
-       .quad   0xffff                  # cr4: instruction authorization
-       .quad   .Lduct                  # cr5: primary-aste origin
-       .quad   0                       # cr6:  I/O interrupts
-       .quad   0                       # cr7:  secondary space segment table
-       .quad   0x0000000000008000      # cr8:  access registers translation
-       .quad   0                       # cr9:  tracing off
-       .quad   0                       # cr10: tracing off
-       .quad   0                       # cr11: tracing off
-       .quad   0                       # cr12: tracing off
-       .quad   0                       # cr13: home space segment table
-       .quad   0xc0000000              # cr14: machine check handling off
-       .quad   .Llinkage_stack         # cr15: linkage stack operations
-
-       .section .dma.data,"aw",@progbits
-.Lduct: .long  0,.Laste,.Laste,0,.Lduald,0,0,0
-       .long   0,0,0,0,0,0,0,0
-.Llinkage_stack:
-       .long   0,0,0x89000000,0,0,0,0x8a000000,0
-       .align 64
-.Laste:        .quad   0,0xffffffffffffffff,0,0,0,0,0,0
-       .align  128
-.Lduald:.rept  8
-       .long   0x80000000,0,0,0        # invalid access-list entries
-       .endr
-       .previous
 
 #include "head_kdump.S"
 
@@ -386,15 +357,13 @@ SYM_CODE_START_LOCAL(startup_pgm_check_handler)
        oi      __LC_RETURN_PSW+1,0x2   # set wait state bit
        larl    %r9,.Lold_psw_disabled_wait
        stg     %r9,__LC_PGM_NEW_PSW+8
-       l       %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
+       larl    %r15,_dump_info_stack_end-STACK_FRAME_OVERHEAD
        brasl   %r14,print_pgm_check_info
 .Lold_psw_disabled_wait:
        la      %r8,4095
        lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
        lpswe   __LC_RETURN_PSW         # disabled wait
 SYM_CODE_END(startup_pgm_check_handler)
-.Ldump_info_stack:
-       .long   0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
 
 #
 # params at 10400 (setup.h)
@@ -415,7 +384,4 @@ SYM_DATA_START(parmarea)
        .org    PARMAREA+__PARMAREA_SIZE
 SYM_DATA_END(parmarea)
 
-       .org    EARLY_SCCB_OFFSET
-       .fill   4096
-
        .org    HEAD_END
index 0b49655..9b14045 100644 (file)
@@ -54,9 +54,9 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
         * not overlap with any component or any certificate.
         */
 repeat:
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
-           intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
-               safe_addr = INITRD_START + INITRD_SIZE;
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+           intersects(initrd_data.start, initrd_data.size, safe_addr, size))
+               safe_addr = initrd_data.start + initrd_data.size;
        for_each_rb_entry(comp, comps)
                if (intersects(safe_addr, size, comp->addr, comp->len)) {
                        safe_addr = comp->addr + comp->len;
index 0dd48fb..d898446 100644 (file)
@@ -186,9 +186,9 @@ unsigned long get_random_base(unsigned long safe_addr)
         */
        memory_limit -= kasan_estimate_memory_needs(memory_limit);
 
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
-               if (safe_addr < INITRD_START + INITRD_SIZE)
-                       safe_addr = INITRD_START + INITRD_SIZE;
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
+               if (safe_addr < initrd_data.start + initrd_data.size)
+                       safe_addr = initrd_data.start + initrd_data.size;
        }
        safe_addr = ALIGN(safe_addr, THREAD_SIZE);
 
index 4e17adb..2f949cd 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
 #include <asm/sclp.h>
 #include <asm/sections.h>
 #include <asm/mem_detect.h>
@@ -24,9 +26,9 @@ static void *mem_detect_alloc_extended(void)
 {
        unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
 
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
-           INITRD_START < offset + ENTRIES_EXTENDED_MAX)
-               offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+           initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
+               offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
 
        return (void *)offset;
 }
index 3a46abe..209f6ae 100644 (file)
@@ -29,7 +29,6 @@ static char *symstart(char *p)
        return p + 1;
 }
 
-extern char _decompressor_syms_start[], _decompressor_syms_end[];
 static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
 {
        /* symbol entries are in a form "10000 c4 startup\0" */
@@ -126,8 +125,8 @@ out:
 
 static noinline void print_stacktrace(void)
 {
-       struct stack_info boot_stack = { STACK_TYPE_TASK, BOOT_STACK_OFFSET,
-                                        BOOT_STACK_OFFSET + BOOT_STACK_SIZE };
+       struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
+                                        (unsigned long)_stack_end };
        unsigned long sp = S390_lowcore.gpregs_save_area[15];
        bool first = true;
 
index 5a19fd7..6f30646 100644 (file)
@@ -1,2 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
+#include "boot.h"
 #include "../../../drivers/s390/char/sclp_early_core.c"
+
+/* SCLP early buffer must stay page-aligned and below 2GB */
+static char __sclp_early_sccb[EXT_SCCB_READ_SCP] __aligned(PAGE_SIZE);
+
+void sclp_early_setup_buffer(void)
+{
+       sclp_early_set_buffer(&__sclp_early_sccb);
+}
index d0cf216..6dc8d0a 100644 (file)
@@ -12,9 +12,8 @@
 #include <asm/uv.h>
 #include "compressed/decompressor.h"
 #include "boot.h"
+#include "uv.h"
 
-extern char __boot_data_start[], __boot_data_end[];
-extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
 unsigned long __bootdata_preserved(__kaslr_offset);
 unsigned long __bootdata_preserved(VMALLOC_START);
 unsigned long __bootdata_preserved(VMALLOC_END);
@@ -24,44 +23,11 @@ unsigned long __bootdata_preserved(MODULES_VADDR);
 unsigned long __bootdata_preserved(MODULES_END);
 unsigned long __bootdata(ident_map_size);
 int __bootdata(is_full_image) = 1;
+struct initrd_data __bootdata(initrd_data);
 
 u64 __bootdata_preserved(stfle_fac_list[16]);
 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
-
-/*
- * Some code and data needs to stay below 2 GB, even when the kernel would be
- * relocated above 2 GB, because it has to use 31 bit addresses.
- * Such code and data is part of the .dma section, and its location is passed
- * over to the decompressed / relocated kernel via the .boot.preserved.data
- * section.
- */
-extern char _sdma[], _edma[];
-extern char _stext_dma[], _etext_dma[];
-extern struct exception_table_entry _start_dma_ex_table[];
-extern struct exception_table_entry _stop_dma_ex_table[];
-unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
-unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
-unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
-unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
-struct exception_table_entry *
-       __bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
-struct exception_table_entry *
-       __bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
-
-int _diag210_dma(struct diag210 *addr);
-int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
-int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
-void _diag0c_dma(struct hypfs_diag0c_entry *entry);
-void _diag308_reset_dma(void);
-struct diag_ops __bootdata_preserved(diag_dma_ops) = {
-       .diag210 = _diag210_dma,
-       .diag26c = _diag26c_dma,
-       .diag14 = _diag14_dma,
-       .diag0c = _diag0c_dma,
-       .diag308_reset = _diag308_reset_dma
-};
-static struct diag210 _diag210_tmp_dma __section(".dma.data");
-struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
+struct oldmem_data __bootdata_preserved(oldmem_data);
 
 void error(char *x)
 {
@@ -91,12 +57,12 @@ static void rescue_initrd(unsigned long addr)
 {
        if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
                return;
-       if (!INITRD_START || !INITRD_SIZE)
+       if (!initrd_data.start || !initrd_data.size)
                return;
-       if (addr <= INITRD_START)
+       if (addr <= initrd_data.start)
                return;
-       memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
-       INITRD_START = addr;
+       memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
+       initrd_data.start = addr;
 }
 
 static void copy_bootdata(void)
@@ -169,9 +135,9 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
        ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
 
 #ifdef CONFIG_CRASH_DUMP
-       if (OLDMEM_BASE) {
+       if (oldmem_data.start) {
                kaslr_enabled = 0;
-               ident_map_size = min(ident_map_size, OLDMEM_SIZE);
+               ident_map_size = min(ident_map_size, oldmem_data.size);
        } else if (ipl_block_valid && is_ipl_block_dump()) {
                kaslr_enabled = 0;
                if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
@@ -282,12 +248,28 @@ static void setup_vmalloc_size(void)
        vmalloc_size = max(size, vmalloc_size);
 }
 
+static void offset_vmlinux_info(unsigned long offset)
+{
+       vmlinux.default_lma += offset;
+       *(unsigned long *)(&vmlinux.entry) += offset;
+       vmlinux.bootdata_off += offset;
+       vmlinux.bootdata_preserved_off += offset;
+       vmlinux.rela_dyn_start += offset;
+       vmlinux.rela_dyn_end += offset;
+       vmlinux.dynsym_start += offset;
+}
+
 void startup_kernel(void)
 {
        unsigned long random_lma;
        unsigned long safe_addr;
        void *img;
 
+       initrd_data.start = parmarea.initrd_start;
+       initrd_data.size = parmarea.initrd_size;
+       oldmem_data.start = parmarea.oldmem_base;
+       oldmem_data.size = parmarea.oldmem_size;
+
        setup_lpp();
        store_ipl_parmblock();
        safe_addr = mem_safe_offset();
@@ -297,23 +279,17 @@ void startup_kernel(void)
        sclp_early_read_info();
        setup_boot_command_line();
        parse_boot_command_line();
+       sanitize_prot_virt_host();
        setup_ident_map_size(detect_memory());
        setup_vmalloc_size();
        setup_kernel_memory_layout();
 
-       random_lma = __kaslr_offset = 0;
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
                random_lma = get_random_base(safe_addr);
                if (random_lma) {
                        __kaslr_offset = random_lma - vmlinux.default_lma;
                        img = (void *)vmlinux.default_lma;
-                       vmlinux.default_lma += __kaslr_offset;
-                       vmlinux.entry += __kaslr_offset;
-                       vmlinux.bootdata_off += __kaslr_offset;
-                       vmlinux.bootdata_preserved_off += __kaslr_offset;
-                       vmlinux.rela_dyn_start += __kaslr_offset;
-                       vmlinux.rela_dyn_end += __kaslr_offset;
-                       vmlinux.dynsym_start += __kaslr_offset;
+                       offset_vmlinux_info(__kaslr_offset);
                }
        }
 
diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S
deleted file mode 100644 (file)
index 5ff5fee..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Code that needs to run below 2 GB.
- *
- * Copyright IBM Corp. 2019
- */
-
-#include <linux/linkage.h>
-#include <asm/errno.h>
-#include <asm/sigp.h>
-
-       .section .dma.text,"ax"
-/*
- * Simplified version of expoline thunk. The normal thunks can not be used here,
- * because they might be more than 2 GB away, and not reachable by the relative
- * branch. No comdat, exrl, etc. optimizations used here, because it only
- * affects a few functions that are not performance-relevant.
- */
-       .macro BR_EX_DMA_r14
-       larl    %r1,0f
-       ex      0,0(%r1)
-       j       .
-0:     br      %r14
-       .endm
-
-/*
- * int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
- */
-ENTRY(_diag14_dma)
-       lgr     %r1,%r2
-       lgr     %r2,%r3
-       lgr     %r3,%r4
-       lhi     %r5,-EIO
-       sam31
-       diag    %r1,%r2,0x14
-.Ldiag14_ex:
-       ipm     %r5
-       srl     %r5,28
-.Ldiag14_fault:
-       sam64
-       lgfr    %r2,%r5
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
-ENDPROC(_diag14_dma)
-
-/*
- * int _diag210_dma(struct diag210 *addr)
- */
-ENTRY(_diag210_dma)
-       lgr     %r1,%r2
-       lhi     %r2,-1
-       sam31
-       diag    %r1,%r0,0x210
-.Ldiag210_ex:
-       ipm     %r2
-       srl     %r2,28
-.Ldiag210_fault:
-       sam64
-       lgfr    %r2,%r2
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
-ENDPROC(_diag210_dma)
-
-/*
- * int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
- */
-ENTRY(_diag26c_dma)
-       lghi    %r5,-EOPNOTSUPP
-       sam31
-       diag    %r2,%r4,0x26c
-.Ldiag26c_ex:
-       sam64
-       lgfr    %r2,%r5
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
-ENDPROC(_diag26c_dma)
-
-/*
- * void _diag0c_dma(struct hypfs_diag0c_entry *entry)
- */
-ENTRY(_diag0c_dma)
-       sam31
-       diag    %r2,%r2,0x0c
-       sam64
-       BR_EX_DMA_r14
-ENDPROC(_diag0c_dma)
-
-/*
- * void _diag308_reset_dma(void)
- *
- * Calls diag 308 subcode 1 and continues execution
- */
-ENTRY(_diag308_reset_dma)
-       larl    %r4,.Lctlregs           # Save control registers
-       stctg   %c0,%c15,0(%r4)
-       lg      %r2,0(%r4)              # Disable lowcore protection
-       nilh    %r2,0xefff
-       larl    %r4,.Lctlreg0
-       stg     %r2,0(%r4)
-       lctlg   %c0,%c0,0(%r4)
-       larl    %r4,.Lfpctl             # Floating point control register
-       stfpc   0(%r4)
-       larl    %r4,.Lprefix            # Save prefix register
-       stpx    0(%r4)
-       larl    %r4,.Lprefix_zero       # Set prefix register to 0
-       spx     0(%r4)
-       larl    %r4,.Lcontinue_psw      # Save PSW flags
-       epsw    %r2,%r3
-       stm     %r2,%r3,0(%r4)
-       larl    %r4,restart_part2       # Setup restart PSW at absolute 0
-       larl    %r3,.Lrestart_diag308_psw
-       og      %r4,0(%r3)              # Save PSW
-       lghi    %r3,0
-       sturg   %r4,%r3                 # Use sturg, because of large pages
-       lghi    %r1,1
-       lghi    %r0,0
-       diag    %r0,%r1,0x308
-restart_part2:
-       lhi     %r0,0                   # Load r0 with zero
-       lhi     %r1,2                   # Use mode 2 = ESAME (dump)
-       sigp    %r1,%r0,SIGP_SET_ARCHITECTURE   # Switch to ESAME mode
-       sam64                           # Switch to 64 bit addressing mode
-       larl    %r4,.Lctlregs           # Restore control registers
-       lctlg   %c0,%c15,0(%r4)
-       larl    %r4,.Lfpctl             # Restore floating point ctl register
-       lfpc    0(%r4)
-       larl    %r4,.Lprefix            # Restore prefix register
-       spx     0(%r4)
-       larl    %r4,.Lcontinue_psw      # Restore PSW flags
-       lpswe   0(%r4)
-.Lcontinue:
-       BR_EX_DMA_r14
-ENDPROC(_diag308_reset_dma)
-
-       .section .dma.data,"aw",@progbits
-.align 8
-.Lrestart_diag308_psw:
-       .long   0x00080000,0x80000000
-
-.align 8
-.Lcontinue_psw:
-       .quad   0,.Lcontinue
-
-.align 8
-.Lctlreg0:
-       .quad   0
-.Lctlregs:
-       .rept   16
-       .quad   0
-       .endr
-.Lfpctl:
-       .long   0
-.Lprefix:
-       .long   0
-.Lprefix_zero:
-       .long   0
index f6b0c4f..e6be155 100644 (file)
@@ -1,8 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <asm/uv.h>
+#include <asm/boot_data.h>
 #include <asm/facility.h>
 #include <asm/sections.h>
 
+#include "boot.h"
+#include "uv.h"
+
 /* will be used in arch/s390/kernel/uv.c */
 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
 int __bootdata_preserved(prot_virt_guest);
@@ -47,26 +51,34 @@ void uv_query_info(void)
 }
 
 #if IS_ENABLED(CONFIG_KVM)
-static bool has_uv_sec_stor_limit(void)
+void adjust_to_uv_max(unsigned long *vmax)
 {
-       /*
-        * keep these conditions in line with setup_uv()
-        */
-       if (!is_prot_virt_host())
-               return false;
+       if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
+               *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+}
 
+static int is_prot_virt_host_capable(void)
+{
+       /* disable if no prot_virt=1 given on command-line */
+       if (!is_prot_virt_host())
+               return 0;
+       /* disable if protected guest virtualization is enabled */
        if (is_prot_virt_guest())
-               return false;
-
+               return 0;
+       /* disable if no hardware support */
        if (!test_facility(158))
-               return false;
-
-       return !!uv_info.max_sec_stor_addr;
+               return 0;
+       /* disable if kdump */
+       if (oldmem_data.start)
+               return 0;
+       /* disable if stand-alone dump */
+       if (ipl_block_valid && is_ipl_block_dump())
+               return 0;
+       return 1;
 }
 
-void adjust_to_uv_max(unsigned long *vmax)
+void sanitize_prot_virt_host(void)
 {
-       if (has_uv_sec_stor_limit())
-               *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+       prot_virt_host = is_prot_virt_host_capable();
 }
 #endif
diff --git a/arch/s390/boot/uv.h b/arch/s390/boot/uv.h
new file mode 100644 (file)
index 0000000..690ce01
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_UV_H
+#define BOOT_UV_H
+
+#if IS_ENABLED(CONFIG_KVM)
+void adjust_to_uv_max(unsigned long *vmax);
+void sanitize_prot_virt_host(void);
+#else
+static inline void adjust_to_uv_max(unsigned long *vmax) {}
+static inline void sanitize_prot_virt_host(void) {}
+#endif
+
+#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
+void uv_query_info(void);
+#else
+static inline void uv_query_info(void) {}
+#endif
+
+#endif /* BOOT_UV_H */
index 7de253f..11ffc7c 100644 (file)
@@ -10,7 +10,6 @@ CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
-CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -75,7 +74,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_MODULE_SIG_SHA256=y
-CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -335,7 +333,7 @@ CONFIG_L2TP_DEBUGFS=m
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
@@ -466,6 +464,7 @@ CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
 CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
 CONFIG_DM_SWITCH=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
index b671642..e1642d2 100644 (file)
@@ -8,7 +8,6 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
-CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -325,7 +324,7 @@ CONFIG_L2TP_DEBUGFS=m
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
index 6c43d2b..9a27860 100644 (file)
@@ -21,7 +21,7 @@
 static void diag0c_fn(void *data)
 {
        diag_stat_inc(DIAG_STAT_X00C);
-       diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
+       diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
 }
 
 /*
@@ -33,12 +33,12 @@ static void *diag0c_store(unsigned int *count)
        unsigned int cpu_count, cpu, i;
        void **cpu_vec;
 
-       get_online_cpus();
+       cpus_read_lock();
        cpu_count = num_online_cpus();
        cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
                                GFP_KERNEL);
        if (!cpu_vec)
-               goto fail_put_online_cpus;
+               goto fail_unlock_cpus;
        /* Note: Diag 0c needs 8 byte alignment and real storage */
        diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
                              GFP_KERNEL | GFP_DMA);
@@ -54,13 +54,13 @@ static void *diag0c_store(unsigned int *count)
        on_each_cpu(diag0c_fn, cpu_vec, 1);
        *count = cpu_count;
        kfree(cpu_vec);
-       put_online_cpus();
+       cpus_read_unlock();
        return diag0c_data;
 
 fail_kfree_cpu_vec:
        kfree(cpu_vec);
-fail_put_online_cpus:
-       put_online_cpus();
+fail_unlock_cpus:
+       cpus_read_unlock();
        return ERR_PTR(-ENOMEM);
 }
 
index f58c92f..1effac6 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef _ASM_S390_CIO_H_
 #define _ASM_S390_CIO_H_
 
-#include <linux/spinlock.h>
 #include <linux/bitops.h>
 #include <linux/genalloc.h>
 #include <asm/types.h>
index c0f3bfe..646b129 100644 (file)
@@ -173,17 +173,16 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
  */
 static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 {
-       register unsigned long r0 asm("0") = 0; /* query function */
-       register unsigned long r1 asm("1") = (unsigned long) mask;
-
        asm volatile(
-               "       spm 0\n" /* pckmo doesn't change the cc */
+               "       lghi    0,0\n" /* query function */
+               "       lgr     1,%[mask]\n"
+               "       spm     0\n" /* pckmo doesn't change the cc */
                /* Parameter regs are ignored, but must be nonzero and unique */
                "0:     .insn   rrf,%[opc] << 16,2,4,6,0\n"
                "       brc     1,0b\n" /* handle partial completion */
                : "=m" (*mask)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
-               : "cc");
+               : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
+               : "cc", "0", "1");
 }
 
 static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@@ -249,20 +248,22 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
 static inline int cpacf_km(unsigned long func, void *param,
                           u8 *dest, const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KM)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KM)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -279,20 +280,22 @@ static inline int cpacf_km(unsigned long func, void *param,
 static inline int cpacf_kmc(unsigned long func, void *param,
                            u8 *dest, const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMC)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMC)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -306,17 +309,19 @@ static inline int cpacf_kmc(unsigned long func, void *param,
 static inline void cpacf_kimd(unsigned long func, void *param,
                              const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
+                 [opc] "i" (CPACF_KIMD)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -329,17 +334,19 @@ static inline void cpacf_kimd(unsigned long func, void *param,
 static inline void cpacf_klmd(unsigned long func, void *param,
                              const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KLMD)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -355,19 +362,21 @@ static inline void cpacf_klmd(unsigned long func, void *param,
 static inline int cpacf_kmac(unsigned long func, void *param,
                             const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMAC)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMAC)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -385,22 +394,24 @@ static inline int cpacf_kmac(unsigned long func, void *param,
 static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
                              const u8 *src, long src_len, u8 *counter)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
-       register unsigned long r6 asm("6") = (unsigned long) counter;
+       union register_pair d, s, c;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
+       c.even = (unsigned long)counter;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3),
-                 [dst] "+a" (r4), [ctr] "+a" (r6)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMCTR)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair),
+                 [ctr] "+&d" (c.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMCTR)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -417,20 +428,21 @@ static inline void cpacf_prno(unsigned long func, void *param,
                              u8 *dest, unsigned long dest_len,
                              const u8 *seed, unsigned long seed_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) dest;
-       register unsigned long r3 asm("3") = (unsigned long) dest_len;
-       register unsigned long r4 asm("4") = (unsigned long) seed;
-       register unsigned long r5 asm("5") = (unsigned long) seed_len;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       d.odd  = (unsigned long)dest_len;
+       s.even = (unsigned long)seed;
+       s.odd  = (unsigned long)seed_len;
        asm volatile (
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[seed]\n"
                "       brc     1,0b\n"   /* handle partial completion */
-               : [dst] "+a" (r2), [dlen] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1),
-                 [seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PRNO)
-               : "cc", "memory");
+               : [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [seed] "d" (s.pair), [opc] "i" (CPACF_PRNO)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -443,19 +455,19 @@ static inline void cpacf_prno(unsigned long func, void *param,
 static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
                              u8 *cbuf, unsigned long cbuf_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) CPACF_PRNO_TRNG;
-       register unsigned long r2 asm("2") = (unsigned long) ucbuf;
-       register unsigned long r3 asm("3") = (unsigned long) ucbuf_len;
-       register unsigned long r4 asm("4") = (unsigned long) cbuf;
-       register unsigned long r5 asm("5") = (unsigned long) cbuf_len;
+       union register_pair u, c;
 
+       u.even = (unsigned long)ucbuf;
+       u.odd  = (unsigned long)ucbuf_len;
+       c.even = (unsigned long)cbuf;
+       c.odd  = (unsigned long)cbuf_len;
        asm volatile (
+               "       lghi    0,%[fc]\n"
                "0:     .insn   rre,%[opc] << 16,%[ucbuf],%[cbuf]\n"
                "       brc     1,0b\n"   /* handle partial completion */
-               : [ucbuf] "+a" (r2), [ucbuflen] "+d" (r3),
-                 [cbuf] "+a" (r4), [cbuflen] "+d" (r5)
-               : [fc] "d" (r0), [opc] "i" (CPACF_PRNO)
-               : "cc", "memory");
+               : [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair)
+               : [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO)
+               : "cc", "memory", "0");
 }
 
 /**
@@ -466,15 +478,15 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
  */
 static inline void cpacf_pcc(unsigned long func, void *param)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,0\n" /* PCC opcode */
                "       brc     1,0b\n" /* handle partial completion */
                :
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
-               : "cc", "memory");
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_PCC)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -487,14 +499,14 @@ static inline void cpacf_pcc(unsigned long func, void *param)
  */
 static inline void cpacf_pckmo(long func, void *param)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "       .insn   rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
                :
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
-               : "cc", "memory");
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_PCKMO)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -512,21 +524,23 @@ static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
                             const u8 *src, unsigned long src_len,
                             const u8 *aad, unsigned long aad_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) aad;
-       register unsigned long r5 asm("5") = (unsigned long) aad_len;
-       register unsigned long r6 asm("6") = (unsigned long) dest;
+       union register_pair d, s, a;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
+       a.even = (unsigned long)aad;
+       a.odd  = (unsigned long)aad_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
-                 [aad] "+a" (r4), [alen] "+d" (r5)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
-               : "cc", "memory");
+               : [dst] "+&d" (d.pair), [src] "+&d" (s.pair),
+                 [aad] "+&d" (a.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMA)
+               : "cc", "memory", "0", "1");
 }
 
 #endif /* _ASM_S390_CPACF_H */
index 1d007c6..14cfd48 100644 (file)
@@ -23,7 +23,7 @@
 #define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap))
 #define MAX_CPU_FEATURES       MAX_ELF_HWCAP_FEATURES
 
-#define cpu_feature(feat)      ilog2(HWCAP_S390_ ## feat)
+#define cpu_feature(feat)      ilog2(HWCAP_ ## feat)
 
 int cpu_have_feature(unsigned int nr);
 
index adc0179..04dc65f 100644 (file)
@@ -111,6 +111,23 @@ union ctlreg2 {
        };
 };
 
+union ctlreg5 {
+       unsigned long val;
+       struct {
+               unsigned long       : 33;
+               unsigned long pasteo: 25;
+               unsigned long       : 6;
+       };
+};
+
+union ctlreg15 {
+       unsigned long val;
+       struct {
+               unsigned long lsea  : 61;
+               unsigned long       : 3;
+       };
+};
+
 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 
index c1b82bc..19a55e1 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/time.h>
 #include <linux/refcount.h>
 #include <linux/fs.h>
+#include <linux/init.h>
 
 #define DEBUG_MAX_LEVEL                   6  /* debug levels range from 0 to 6 */
 #define DEBUG_OFF_LEVEL                   -1 /* level where debug is switched off */
@@ -391,38 +392,99 @@ int debug_register_view(debug_info_t *id, struct debug_view *view);
 
 int debug_unregister_view(debug_info_t *id, struct debug_view *view);
 
+#ifndef MODULE
+
+/*
+ * Note: Initial page and area numbers must be fixed to allow static
+ * initialization. This enables very early tracing. Changes to these values
+ * must be reflected in __DEFINE_STATIC_AREA.
+ */
+#define EARLY_PAGES            8
+#define EARLY_AREAS            1
+
+#define VNAME(var, suffix)     __##var##_##suffix
+
 /*
-   define the debug levels:
-   - 0 No debugging output to console or syslog
-   - 1 Log internal errors to syslog, ignore check conditions
-   - 2 Log internal errors and check conditions to syslog
-   - 3 Log internal errors to console, log check conditions to syslog
-   - 4 Log internal errors and check conditions to console
-   - 5 panic on internal errors, log check conditions to console
-   - 6 panic on both, internal errors and check conditions
+ * Define static areas for early trace data. During boot debug_register_static()
+ * will replace these with dynamically allocated areas to allow custom page and
+ * area sizes, and dynamic resizing.
  */
+#define __DEFINE_STATIC_AREA(var)                                      \
+static char VNAME(var, data)[EARLY_PAGES][PAGE_SIZE] __initdata;       \
+static debug_entry_t *VNAME(var, pages)[EARLY_PAGES] __initdata = {    \
+       (debug_entry_t *)VNAME(var, data)[0],                           \
+       (debug_entry_t *)VNAME(var, data)[1],                           \
+       (debug_entry_t *)VNAME(var, data)[2],                           \
+       (debug_entry_t *)VNAME(var, data)[3],                           \
+       (debug_entry_t *)VNAME(var, data)[4],                           \
+       (debug_entry_t *)VNAME(var, data)[5],                           \
+       (debug_entry_t *)VNAME(var, data)[6],                           \
+       (debug_entry_t *)VNAME(var, data)[7],                           \
+};                                                                     \
+static debug_entry_t **VNAME(var, areas)[EARLY_AREAS] __initdata = {   \
+       (debug_entry_t **)VNAME(var, pages),                            \
+};                                                                     \
+static int VNAME(var, active_pages)[EARLY_AREAS] __initdata;           \
+static int VNAME(var, active_entries)[EARLY_AREAS] __initdata
+
+#define __DEBUG_INFO_INIT(var, _name, _buf_size) {                     \
+       .next = NULL,                                                   \
+       .prev = NULL,                                                   \
+       .ref_count = REFCOUNT_INIT(1),                                  \
+       .lock = __SPIN_LOCK_UNLOCKED(var.lock),                         \
+       .level = DEBUG_DEFAULT_LEVEL,                                   \
+       .nr_areas = EARLY_AREAS,                                        \
+       .pages_per_area = EARLY_PAGES,                                  \
+       .buf_size = (_buf_size),                                        \
+       .entry_size = sizeof(debug_entry_t) + (_buf_size),              \
+       .areas = VNAME(var, areas),                                     \
+       .active_area = 0,                                               \
+       .active_pages = VNAME(var, active_pages),                       \
+       .active_entries = VNAME(var, active_entries),                   \
+       .debugfs_root_entry = NULL,                                     \
+       .debugfs_entries = { NULL },                                    \
+       .views = { NULL },                                              \
+       .name = (_name),                                                \
+       .mode = 0600,                                                   \
+}
+
+#define __REGISTER_STATIC_DEBUG_INFO(var, name, pages, areas, view)    \
+static int __init VNAME(var, reg)(void)                                        \
+{                                                                      \
+       debug_register_static(&var, (pages), (areas));                  \
+       debug_register_view(&var, (view));                              \
+       return 0;                                                       \
+}                                                                      \
+arch_initcall(VNAME(var, reg))
+
+/**
+ * DEFINE_STATIC_DEBUG_INFO - Define static debug_info_t
+ *
+ * @var: Name of debug_info_t variable
+ * @name: Name of debug log (e.g. used for debugfs entry)
+ * @pages_per_area: Number of pages per area
+ * @nr_areas: Number of debug areas
+ * @buf_size: Size of data area in each debug entry
+ * @view: Pointer to debug view struct
+ *
+ * Define a static debug_info_t for early tracing. The associated debugfs log
+ * is automatically registered with the specified debug view.
+ *
+ * Important: Users of this macro must not call any of the
+ * debug_register/_unregister() functions for this debug_info_t!
+ *
+ * Note: Tracing will start with a fixed number of initial pages and areas.
+ * The debug area will be changed to use the specified numbers during
+ * arch_initcall.
+ */
+#define DEFINE_STATIC_DEBUG_INFO(var, name, pages, nr_areas, buf_size, view) \
+__DEFINE_STATIC_AREA(var);                                             \
+static debug_info_t __refdata var =                                    \
+       __DEBUG_INFO_INIT(var, (name), (buf_size));                     \
+__REGISTER_STATIC_DEBUG_INFO(var, name, pages, nr_areas, view)
+
+void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
 
-#ifndef DEBUG_LEVEL
-#define DEBUG_LEVEL 4
-#endif
-
-#define INTERNAL_ERRMSG(x,y...) "E" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_WRNMSG(x,y...) "W" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_INFMSG(x,y...) "I" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
-
-#if DEBUG_LEVEL > 0
-#define PRINT_DEBUG(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_INFO(x...)       printk(KERN_INFO PRINTK_HEADER x)
-#define PRINT_WARN(x...)       printk(KERN_WARNING PRINTK_HEADER x)
-#define PRINT_ERR(x...)                printk(KERN_ERR PRINTK_HEADER x)
-#define PRINT_FATAL(x...)      panic(PRINTK_HEADER x)
-#else
-#define PRINT_DEBUG(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_INFO(x...)       printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_WARN(x...)       printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_ERR(x...)                printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_FATAL(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#endif /* DASD_DEBUG */
+#endif /* MODULE */
 
 #endif /* DEBUG_H */
index ca8f85b..b3a8cb4 100644 (file)
@@ -309,6 +309,10 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode);
 
 struct hypfs_diag0c_entry;
 
+/*
+ * This structure must contain only pointers/references into
+ * the AMODE31 text section.
+ */
 struct diag_ops {
        int (*diag210)(struct diag210 *addr);
        int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
@@ -317,6 +321,13 @@ struct diag_ops {
        void (*diag308_reset)(void);
 };
 
-extern struct diag_ops diag_dma_ops;
-extern struct diag210 *__diag210_tmp_dma;
+extern struct diag_ops diag_amode31_ops;
+extern struct diag210 *__diag210_tmp_amode31;
+
+int _diag210_amode31(struct diag210 *addr);
+int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
+int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
+void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
+void _diag308_reset_amode31(void);
+
 #endif /* _ASM_S390_DIAG_H */
index bd00c94..70a30ae 100644 (file)
 /* Keep this the last entry.  */
 #define R_390_NUM      61
 
-/* Bits present in AT_HWCAP. */
-#define HWCAP_S390_ESAN3       1
-#define HWCAP_S390_ZARCH       2
-#define HWCAP_S390_STFLE       4
-#define HWCAP_S390_MSA         8
-#define HWCAP_S390_LDISP       16
-#define HWCAP_S390_EIMM                32
-#define HWCAP_S390_DFP         64
-#define HWCAP_S390_HPAGE       128
-#define HWCAP_S390_ETF3EH      256
-#define HWCAP_S390_HIGH_GPRS   512
-#define HWCAP_S390_TE          1024
-#define HWCAP_S390_VXRS                2048
-#define HWCAP_S390_VXRS_BCD    4096
-#define HWCAP_S390_VXRS_EXT    8192
-#define HWCAP_S390_GS          16384
-#define HWCAP_S390_VXRS_EXT2   32768
-#define HWCAP_S390_VXRS_PDE    65536
-#define HWCAP_S390_SORT                131072
-#define HWCAP_S390_DFLT                262144
+enum {
+       HWCAP_NR_ESAN3          = 0,
+       HWCAP_NR_ZARCH          = 1,
+       HWCAP_NR_STFLE          = 2,
+       HWCAP_NR_MSA            = 3,
+       HWCAP_NR_LDISP          = 4,
+       HWCAP_NR_EIMM           = 5,
+       HWCAP_NR_DFP            = 6,
+       HWCAP_NR_HPAGE          = 7,
+       HWCAP_NR_ETF3EH         = 8,
+       HWCAP_NR_HIGH_GPRS      = 9,
+       HWCAP_NR_TE             = 10,
+       HWCAP_NR_VXRS           = 11,
+       HWCAP_NR_VXRS_BCD       = 12,
+       HWCAP_NR_VXRS_EXT       = 13,
+       HWCAP_NR_GS             = 14,
+       HWCAP_NR_VXRS_EXT2      = 15,
+       HWCAP_NR_VXRS_PDE       = 16,
+       HWCAP_NR_SORT           = 17,
+       HWCAP_NR_DFLT           = 18,
+       HWCAP_NR_VXRS_PDE2      = 19,
+       HWCAP_NR_NNPA           = 20,
+       HWCAP_NR_PCI_MIO        = 21,
+       HWCAP_NR_SIE            = 22,
+       HWCAP_NR_MAX
+};
 
-/* Internal bits, not exposed via elf */
-#define HWCAP_INT_SIE          1UL
+/* Bits present in AT_HWCAP. */
+#define HWCAP_ESAN3            BIT(HWCAP_NR_ESAN3)
+#define HWCAP_ZARCH            BIT(HWCAP_NR_ZARCH)
+#define HWCAP_STFLE            BIT(HWCAP_NR_STFLE)
+#define HWCAP_MSA              BIT(HWCAP_NR_MSA)
+#define HWCAP_LDISP            BIT(HWCAP_NR_LDISP)
+#define HWCAP_EIMM             BIT(HWCAP_NR_EIMM)
+#define HWCAP_DFP              BIT(HWCAP_NR_DFP)
+#define HWCAP_HPAGE            BIT(HWCAP_NR_HPAGE)
+#define HWCAP_ETF3EH           BIT(HWCAP_NR_ETF3EH)
+#define HWCAP_HIGH_GPRS                BIT(HWCAP_NR_HIGH_GPRS)
+#define HWCAP_TE               BIT(HWCAP_NR_TE)
+#define HWCAP_VXRS             BIT(HWCAP_NR_VXRS)
+#define HWCAP_VXRS_BCD         BIT(HWCAP_NR_VXRS_BCD)
+#define HWCAP_VXRS_EXT         BIT(HWCAP_NR_VXRS_EXT)
+#define HWCAP_GS               BIT(HWCAP_NR_GS)
+#define HWCAP_VXRS_EXT2                BIT(HWCAP_NR_VXRS_EXT2)
+#define HWCAP_VXRS_PDE         BIT(HWCAP_NR_VXRS_PDE)
+#define HWCAP_SORT             BIT(HWCAP_NR_SORT)
+#define HWCAP_DFLT             BIT(HWCAP_NR_DFLT)
+#define HWCAP_VXRS_PDE2                BIT(HWCAP_NR_VXRS_PDE2)
+#define HWCAP_NNPA             BIT(HWCAP_NR_NNPA)
+#define HWCAP_PCI_MIO          BIT(HWCAP_NR_PCI_MIO)
+#define HWCAP_SIE              BIT(HWCAP_NR_SIE)
 
 /*
  * These are used to set parameters in the core dumps.
@@ -209,10 +237,6 @@ struct arch_elf_state {
 extern unsigned long elf_hwcap;
 #define ELF_HWCAP (elf_hwcap)
 
-/* Internal hardware capabilities, not exposed via elf */
-
-extern unsigned long int_hwcap;
-
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
    intent than poking at uname or /proc/cpuinfo.
index 3beb294..16dc57d 100644 (file)
@@ -28,8 +28,8 @@ struct exception_table_entry
        long handler;
 };
 
-extern struct exception_table_entry *__start_dma_ex_table;
-extern struct exception_table_entry *__stop_dma_ex_table;
+extern struct exception_table_entry *__start_amode31_ex_table;
+extern struct exception_table_entry *__stop_amode31_ex_table;
 
 const struct exception_table_entry *s390_search_extables(unsigned long addr);
 
index 345cbe9..e8b460f 100644 (file)
@@ -18,7 +18,6 @@
 void ftrace_caller(void);
 
 extern char ftrace_graph_caller_end;
-extern unsigned long ftrace_plt;
 extern void *ftrace_func;
 
 struct dyn_arch_ftrace { };
@@ -31,10 +30,11 @@ struct dyn_arch_ftrace { };
 
 struct module;
 struct dyn_ftrace;
-/*
- * Either -mhotpatch or -mnop-mcount is used - no explicit init is required
- */
-static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { return 0; }
+
+bool ftrace_need_init_nop(void);
+#define ftrace_need_init_nop ftrace_need_init_nop
+
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
 #define ftrace_init_nop ftrace_init_nop
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
@@ -42,42 +42,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
        return addr;
 }
 
-struct ftrace_insn {
-       u16 opc;
-       s32 disp;
-} __packed;
-
-static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       /* brcl 0,0 */
-       insn->opc = 0xc004;
-       insn->disp = 0;
-#endif
-}
-
-static inline int is_ftrace_nop(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       if (insn->disp == 0)
-               return 1;
-#endif
-       return 0;
-}
-
-static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
-                                            unsigned long ip)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       unsigned long target;
-
-       /* brasl r0,ftrace_caller */
-       target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
-       insn->opc = 0xc005;
-       insn->disp = (target - ip) / 2;
-#endif
-}
-
 /*
  * Even though the system call numbers are identical for s390/s390x a
  * different system call table is used for compat tasks. This may lead
diff --git a/arch/s390/include/asm/ftrace.lds.h b/arch/s390/include/asm/ftrace.lds.h
new file mode 100644 (file)
index 0000000..968adfd
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+
+#define SIZEOF_MCOUNT_LOC_ENTRY 8
+#define SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE 24
+#define FTRACE_HOTPATCH_TRAMPOLINES_SIZE(n)                                   \
+       DIV_ROUND_UP(SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE * (n),                  \
+                    SIZEOF_MCOUNT_LOC_ENTRY)
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT                                      \
+       . = ALIGN(8);                                                          \
+       __ftrace_hotpatch_trampolines_start = .;                               \
+       . = . + FTRACE_HOTPATCH_TRAMPOLINES_SIZE(__stop_mcount_loc -           \
+                                                __start_mcount_loc);          \
+       __ftrace_hotpatch_trampolines_end = .;
+#else
+#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT
+#endif
index a9e2c72..3f8ee25 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/types.h>
 #include <asm/cio.h>
 #include <asm/setup.h>
+#include <asm/page.h>
 #include <uapi/asm/ipl.h>
 
 struct ipl_parameter_block {
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
new file mode 100644 (file)
index 0000000..d55ba87
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_KFENCE_H
+#define _ASM_S390_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/kfence.h>
+#include <asm/set_memory.h>
+#include <asm/page.h>
+
+void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static __always_inline bool arch_kfence_init_pool(void)
+{
+       return true;
+}
+
+#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
+
+/*
+ * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
+ * but earlier where page table allocations still happen with memblock.
+ * Reason is that arch_kfence_init_pool() gets called when the system
+ * is still in a limbo state - disabling and enabling bottom halves is
+ * not yet allowed, but that is what our page_table_alloc() would do.
+ */
+static __always_inline void kfence_split_mapping(void)
+{
+#ifdef CONFIG_KFENCE
+       unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
+
+       set_memory_4k((unsigned long)__kfence_pool, pool_pages);
+#endif
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+       __kernel_map_pages(virt_to_page(addr), 1, !protect);
+       return true;
+}
+
+#endif /* _ASM_S390_KFENCE_H */
index cbc7c3a..df73a05 100644 (file)
 #include <uapi/asm/kvm_para.h>
 #include <asm/diag.h>
 
-static inline long __kvm_hypercall0(unsigned long nr)
-{
-       register unsigned long __nr asm("1") = nr;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr): "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall0(unsigned long nr)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall0(nr);
-}
-
-static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall1(nr, p1);
-}
-
-static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1,
-                              unsigned long p2)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
-                     : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
-                              unsigned long p2)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall2(nr, p1, p2);
-}
-
-static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall3(nr, p1, p2, p3);
-}
-
-static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall4(nr, p1, p2, p3, p4);
-}
-
-static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register unsigned long __p5 asm("6") = p5;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4), "d" (__p5)  : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall5(nr, p1, p2, p3, p4, p5);
-}
-
-static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5,
-                              unsigned long p6)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register unsigned long __p5 asm("6") = p5;
-       register unsigned long __p6 asm("7") = p6;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
-                     : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5,
-                              unsigned long p6)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6);
-}
+#define HYPERCALL_FMT_0
+#define HYPERCALL_FMT_1 , "0" (r2)
+#define HYPERCALL_FMT_2 , "d" (r3) HYPERCALL_FMT_1
+#define HYPERCALL_FMT_3 , "d" (r4) HYPERCALL_FMT_2
+#define HYPERCALL_FMT_4 , "d" (r5) HYPERCALL_FMT_3
+#define HYPERCALL_FMT_5 , "d" (r6) HYPERCALL_FMT_4
+#define HYPERCALL_FMT_6 , "d" (r7) HYPERCALL_FMT_5
+
+#define HYPERCALL_PARM_0
+#define HYPERCALL_PARM_1 , unsigned long arg1
+#define HYPERCALL_PARM_2 HYPERCALL_PARM_1, unsigned long arg2
+#define HYPERCALL_PARM_3 HYPERCALL_PARM_2, unsigned long arg3
+#define HYPERCALL_PARM_4 HYPERCALL_PARM_3, unsigned long arg4
+#define HYPERCALL_PARM_5 HYPERCALL_PARM_4, unsigned long arg5
+#define HYPERCALL_PARM_6 HYPERCALL_PARM_5, unsigned long arg6
+
+#define HYPERCALL_REGS_0
+#define HYPERCALL_REGS_1                                               \
+       register unsigned long r2 asm("2") = arg1
+#define HYPERCALL_REGS_2                                               \
+       HYPERCALL_REGS_1;                                               \
+       register unsigned long r3 asm("3") = arg2
+#define HYPERCALL_REGS_3                                               \
+       HYPERCALL_REGS_2;                                               \
+       register unsigned long r4 asm("4") = arg3
+#define HYPERCALL_REGS_4                                               \
+       HYPERCALL_REGS_3;                                               \
+       register unsigned long r5 asm("5") = arg4
+#define HYPERCALL_REGS_5                                               \
+       HYPERCALL_REGS_4;                                               \
+       register unsigned long r6 asm("6") = arg5
+#define HYPERCALL_REGS_6                                               \
+       HYPERCALL_REGS_5;                                               \
+       register unsigned long r7 asm("7") = arg6
+
+#define HYPERCALL_ARGS_0
+#define HYPERCALL_ARGS_1 , arg1
+#define HYPERCALL_ARGS_2 HYPERCALL_ARGS_1, arg2
+#define HYPERCALL_ARGS_3 HYPERCALL_ARGS_2, arg3
+#define HYPERCALL_ARGS_4 HYPERCALL_ARGS_3, arg4
+#define HYPERCALL_ARGS_5 HYPERCALL_ARGS_4, arg5
+#define HYPERCALL_ARGS_6 HYPERCALL_ARGS_5, arg6
+
+#define GENERATE_KVM_HYPERCALL_FUNC(args)                              \
+static inline                                                          \
+long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args)     \
+{                                                                      \
+       register unsigned long __nr asm("1") = nr;                      \
+       register long __rc asm("2");                                    \
+       HYPERCALL_REGS_##args;                                          \
+                                                                       \
+       asm volatile (                                                  \
+               "       diag    2,4,0x500\n"                            \
+               : "=d" (__rc)                                           \
+               : "d" (__nr) HYPERCALL_FMT_##args                       \
+               : "memory", "cc");                                      \
+       return __rc;                                                    \
+}                                                                      \
+                                                                       \
+static inline                                                          \
+long kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args)       \
+{                                                                      \
+       diag_stat_inc(DIAG_STAT_X500);                                  \
+       return __kvm_hypercall##args(nr HYPERCALL_ARGS_##args);         \
+}
+
+GENERATE_KVM_HYPERCALL_FUNC(0)
+GENERATE_KVM_HYPERCALL_FUNC(1)
+GENERATE_KVM_HYPERCALL_FUNC(2)
+GENERATE_KVM_HYPERCALL_FUNC(3)
+GENERATE_KVM_HYPERCALL_FUNC(4)
+GENERATE_KVM_HYPERCALL_FUNC(5)
+GENERATE_KVM_HYPERCALL_FUNC(6)
 
 /* kvm on s390 is always paravirtualization enabled */
 static inline int kvm_para_available(void)
index 24e8fed..1ffea75 100644 (file)
@@ -22,7 +22,7 @@
 
 #define EX_TABLE(_fault, _target)                                      \
        __EX_TABLE(__ex_table, _fault, _target)
-#define EX_TABLE_DMA(_fault, _target)                                  \
-       __EX_TABLE(.dma.ex_table, _fault, _target)
+#define EX_TABLE_AMODE31(_fault, _target)                              \
+       __EX_TABLE(.amode31.ex_table, _fault, _target)
 
 #endif
index 47bde5a..11213c8 100644 (file)
@@ -124,7 +124,8 @@ struct lowcore {
        /* Restart function and parameter. */
        __u64   restart_fn;                     /* 0x0370 */
        __u64   restart_data;                   /* 0x0378 */
-       __u64   restart_source;                 /* 0x0380 */
+       __u32   restart_source;                 /* 0x0380 */
+       __u32   restart_flags;                  /* 0x0384 */
 
        /* Address space pointer. */
        __u64   kernel_asce;                    /* 0x0388 */
index e0a6d29..9f1eea1 100644 (file)
@@ -8,16 +8,14 @@
  * This file contains the s390 architecture specific module code.
  */
 
-struct mod_arch_syminfo
-{
+struct mod_arch_syminfo {
        unsigned long got_offset;
        unsigned long plt_offset;
        int got_initialized;
        int plt_initialized;
 };
 
-struct mod_arch_specific
-{
+struct mod_arch_specific {
        /* Starting offset of got in the module core memory. */
        unsigned long got_offset;
        /* Starting offset of plt in the module core memory. */
@@ -30,6 +28,14 @@ struct mod_arch_specific
        int nsyms;
        /* Additional symbol information (got and plt offsets). */
        struct mod_arch_syminfo *syminfo;
+#ifdef CONFIG_FUNCTION_TRACER
+       /* Start of memory reserved for ftrace hotpatch trampolines. */
+       struct ftrace_hotpatch_trampoline *trampolines_start;
+       /* End of memory reserved for ftrace hotpatch trampolines. */
+       struct ftrace_hotpatch_trampoline *trampolines_end;
+       /* Next unused ftrace hotpatch trampoline slot. */
+       struct ftrace_hotpatch_trampoline *next_trampoline;
+#endif /* CONFIG_FUNCTION_TRACER */
 };
 
 #endif /* _ASM_S390_MODULE_H */
index 3ba945c..d98d17a 100644 (file)
@@ -144,9 +144,6 @@ struct page;
 void arch_free_page(struct page *page, int order);
 void arch_alloc_page(struct page *page, int order);
 void arch_set_page_dat(struct page *page, int order);
-void arch_set_page_nodat(struct page *page, int order);
-int arch_test_page_nodat(struct page *page);
-void arch_set_page_states(int make_stable);
 
 static inline int devmem_is_allowed(unsigned long pfn)
 {
index 5509b22..e4803ec 100644 (file)
@@ -216,9 +216,10 @@ void zpci_remove_reserved_devices(void);
 int clp_setup_writeback_mio(void);
 int clp_scan_pci_devices(void);
 int clp_query_pci_fn(struct zpci_dev *zdev);
-int clp_enable_fh(struct zpci_dev *, u8);
-int clp_disable_fh(struct zpci_dev *);
+int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
+int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
 int clp_get_state(u32 fid, enum zpci_state *state);
+int clp_refresh_fh(u32 fid, u32 *fh);
 
 /* UID */
 void update_uid_checking(bool new);
@@ -271,6 +272,8 @@ struct zpci_dev *get_zdev_by_fid(u32);
 /* DMA */
 int zpci_dma_init(void);
 void zpci_dma_exit(void);
+int zpci_dma_init_device(struct zpci_dev *zdev);
+int zpci_dma_exit_device(struct zpci_dev *zdev);
 
 /* IRQ */
 int __init zpci_irq_init(void);
index f62cd3e..3b8e89d 100644 (file)
@@ -182,8 +182,6 @@ static inline unsigned long *get_st_pto(unsigned long entry)
 }
 
 /* Prototypes */
-int zpci_dma_init_device(struct zpci_dev *);
-void zpci_dma_exit_device(struct zpci_dev *);
 void dma_free_seg_table(unsigned long);
 unsigned long *dma_alloc_cpu_table(void);
 void dma_cleanup_tables(unsigned long *);
index dcac7b2..b61426c 100644 (file)
@@ -67,15 +67,15 @@ extern unsigned long zero_page_mask;
 /* TODO: s390 cannot support io_remap_pfn_range... */
 
 #define pte_ERROR(e) \
-       printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
+       pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
-       printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
+       pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 #define pud_ERROR(e) \
-       printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
+       pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
 #define p4d_ERROR(e) \
-       printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
+       pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
 #define pgd_ERROR(e) \
-       printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
+       pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 
 /*
  * The vmalloc and module area will always be on the topmost area of the
index ddc7858..879b8e3 100644 (file)
@@ -26,6 +26,8 @@
 #define _CIF_MCCK_GUEST                BIT(CIF_MCCK_GUEST)
 #define _CIF_DEDICATED_CPU     BIT(CIF_DEDICATED_CPU)
 
+#define RESTART_FLAG_CTLREGS   _AC(1 << 0, U)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/cpumask.h>
index cb4f73c..25b5dc3 100644 (file)
@@ -291,16 +291,15 @@ struct qdio_ssqd_desc {
 typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
                            int, int, unsigned long);
 
-/* qdio errors reported to the upper-layer program */
+/* qdio errors reported through the queue handlers: */
 #define QDIO_ERROR_ACTIVATE                    0x0001
 #define QDIO_ERROR_GET_BUF_STATE               0x0002
 #define QDIO_ERROR_SET_BUF_STATE               0x0004
+
+/* extra info for completed SBALs: */
 #define QDIO_ERROR_SLSB_STATE                  0x0100
 #define QDIO_ERROR_SLSB_PENDING                        0x0200
 
-#define QDIO_ERROR_FATAL                       0x00ff
-#define QDIO_ERROR_TEMPORARY                   0xff00
-
 /* for qdio_cleanup */
 #define QDIO_FLAG_CLEANUP_USING_CLEAR          0x01
 #define QDIO_FLAG_CLEANUP_USING_HALT           0x02
@@ -312,8 +311,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
  * @qib_param_field_format: format for qib_parm_field
  * @qib_param_field: pointer to 128 bytes or NULL, if no param field
  * @qib_rflags: rflags to set
- * @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
- * @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
  * @no_input_qs: number of input queues
  * @no_output_qs: number of output queues
  * @input_handler: handler to be called for input queues
@@ -330,27 +327,18 @@ struct qdio_initialize {
        unsigned int qib_param_field_format;
        unsigned char *qib_param_field;
        unsigned char qib_rflags;
-       unsigned long *input_slib_elements;
-       unsigned long *output_slib_elements;
        unsigned int no_input_qs;
        unsigned int no_output_qs;
        qdio_handler_t *input_handler;
        qdio_handler_t *output_handler;
        void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
-       unsigned int scan_threshold;
        unsigned long int_parm;
        struct qdio_buffer ***input_sbal_addr_array;
        struct qdio_buffer ***output_sbal_addr_array;
 };
 
-#define QDIO_STATE_INACTIVE            0x00000002 /* after qdio_cleanup */
-#define QDIO_STATE_ESTABLISHED         0x00000004 /* after qdio_establish */
-#define QDIO_STATE_ACTIVE              0x00000008 /* after qdio_activate */
-#define QDIO_STATE_STOPPED             0x00000010 /* after queues went down */
-
 #define QDIO_FLAG_SYNC_INPUT           0x01
 #define QDIO_FLAG_SYNC_OUTPUT          0x02
-#define QDIO_FLAG_PCI_OUT              0x10
 
 int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
 void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
@@ -367,7 +355,6 @@ extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
                   unsigned int bufnr, unsigned int count, struct qaob *aob);
 extern int qdio_start_irq(struct ccw_device *cdev);
 extern int qdio_stop_irq(struct ccw_device *cdev);
-extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
 extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
                              bool is_input, unsigned int *bufnr,
                              unsigned int *error);
index 5763769..e3ae937 100644 (file)
@@ -8,8 +8,6 @@
 #define _ASM_S390_SCLP_H
 
 #include <linux/types.h>
-#include <asm/chpid.h>
-#include <asm/cpu.h>
 
 #define SCLP_CHP_INFO_MASK_SIZE                32
 #define EARLY_SCCB_SIZE                PAGE_SIZE
 /* 24 + 16 * SCLP_MAX_CORES */
 #define EXT_SCCB_READ_CPU      (3 * PAGE_SIZE)
 
+#ifndef __ASSEMBLY__
+#include <asm/chpid.h>
+#include <asm/cpu.h>
+
 struct sclp_chp_info {
        u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
        u8 standby[SCLP_CHP_INFO_MASK_SIZE];
@@ -113,6 +115,9 @@ struct zpci_report_error_header {
        u8 data[0];     /* Subsequent Data passed verbatim to SCLP ET 24 */
 } __packed;
 
+extern char *sclp_early_sccb;
+
+void sclp_early_set_buffer(void *sccb);
 int sclp_early_read_info(void);
 int sclp_early_read_storage_info(void);
 int sclp_early_get_core_info(struct sclp_core_info *info);
@@ -147,4 +152,5 @@ static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
        return _sclp_get_core_info(info);
 }
 
+#endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_SCLP_H */
index 0c21514..85881dd 100644 (file)
@@ -35,7 +35,7 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr)
  */
 #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
 
-extern unsigned long __sdma, __edma;
-extern unsigned long __stext_dma, __etext_dma;
+extern unsigned long __samode31, __eamode31;
+extern unsigned long __stext_amode31, __etext_amode31;
 
 #endif
index a22a5a8..950d87b 100644 (file)
@@ -10,6 +10,7 @@ extern struct mutex cpa_mutex;
 #define SET_MEMORY_RW  2UL
 #define SET_MEMORY_NX  4UL
 #define SET_MEMORY_X   8UL
+#define SET_MEMORY_4K  16UL
 
 int __set_memory(unsigned long addr, int numpages, unsigned long flags);
 
@@ -33,4 +34,9 @@ static inline int set_memory_x(unsigned long addr, int numpages)
        return __set_memory(addr, numpages, SET_MEMORY_X);
 }
 
+static inline int set_memory_4k(unsigned long addr, int numpages)
+{
+       return __set_memory(addr, numpages, SET_MEMORY_4K);
+}
+
 #endif
index 3a77aa9..b6606ff 100644 (file)
 #include <uapi/asm/setup.h>
 #include <linux/build_bug.h>
 
-#define EP_OFFSET              0x10008
-#define EP_STRING              "S390EP"
 #define PARMAREA               0x10400
-#define EARLY_SCCB_OFFSET      0x11000
-#define HEAD_END               0x12000
+#define HEAD_END               0x11000
 
 /*
  * Machine features detected in early.c
@@ -36,6 +33,7 @@
 #define MACHINE_FLAG_NX                BIT(15)
 #define MACHINE_FLAG_GS                BIT(16)
 #define MACHINE_FLAG_SCC       BIT(17)
+#define MACHINE_FLAG_PCI_MIO   BIT(18)
 
 #define LPP_MAGIC              BIT(31)
 #define LPP_PID_MASK           _AC(0xffffffff, UL)
 #define STARTUP_NORMAL_OFFSET  0x10000
 #define STARTUP_KDUMP_OFFSET   0x10010
 
-/* Offsets to parameters in kernel/head.S  */
-
-#define IPL_DEVICE_OFFSET      0x10400
-#define INITRD_START_OFFSET    0x10408
-#define INITRD_SIZE_OFFSET     0x10410
-#define OLDMEM_BASE_OFFSET     0x10418
-#define OLDMEM_SIZE_OFFSET     0x10420
-#define KERNEL_VERSION_OFFSET  0x10428
-#define COMMAND_LINE_OFFSET    0x10480
-
 #ifndef __ASSEMBLY__
 
 #include <asm/lowcore.h>
 #include <asm/types.h>
 
-#define IPL_DEVICE     (*(unsigned long *)  (IPL_DEVICE_OFFSET))
-#define INITRD_START   (*(unsigned long *)  (INITRD_START_OFFSET))
-#define INITRD_SIZE    (*(unsigned long *)  (INITRD_SIZE_OFFSET))
-#define OLDMEM_BASE    (*(unsigned long *)  (OLDMEM_BASE_OFFSET))
-#define OLDMEM_SIZE    (*(unsigned long *)  (OLDMEM_SIZE_OFFSET))
-#define COMMAND_LINE   ((char *)            (COMMAND_LINE_OFFSET))
-
 struct parmarea {
        unsigned long ipl_device;                       /* 0x10400 */
        unsigned long initrd_start;                     /* 0x10408 */
@@ -110,6 +91,7 @@ extern unsigned long mio_wb_bit_mask;
 #define MACHINE_HAS_NX         (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
 #define MACHINE_HAS_GS         (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
 #define MACHINE_HAS_SCC                (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
+#define MACHINE_HAS_PCI_MIO    (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
 
 /*
  * Console mode. Override with conmode=
@@ -161,20 +143,22 @@ static inline unsigned long kaslr_offset(void)
 
 extern int is_full_image;
 
+struct initrd_data {
+       unsigned long start;
+       unsigned long size;
+};
+extern struct initrd_data initrd_data;
+
+struct oldmem_data {
+       unsigned long start;
+       unsigned long size;
+};
+extern struct oldmem_data oldmem_data;
+
 static inline u32 gen_lpswe(unsigned long addr)
 {
        BUILD_BUG_ON(addr > 0xfff);
        return 0xb2b20000 | addr;
 }
-
-#else /* __ASSEMBLY__ */
-
-#define IPL_DEVICE     (IPL_DEVICE_OFFSET)
-#define INITRD_START   (INITRD_START_OFFSET)
-#define INITRD_SIZE    (INITRD_SIZE_OFFSET)
-#define OLDMEM_BASE    (OLDMEM_BASE_OFFSET)
-#define OLDMEM_SIZE    (OLDMEM_SIZE_OFFSET)
-#define COMMAND_LINE   (COMMAND_LINE_OFFSET)
-
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_SETUP_H */
index 9107e3d..b3dd883 100644 (file)
@@ -104,4 +104,63 @@ static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
        return false;
 }
 
+#define SYSCALL_FMT_0
+#define SYSCALL_FMT_1 , "0" (r2)
+#define SYSCALL_FMT_2 , "d" (r3) SYSCALL_FMT_1
+#define SYSCALL_FMT_3 , "d" (r4) SYSCALL_FMT_2
+#define SYSCALL_FMT_4 , "d" (r5) SYSCALL_FMT_3
+#define SYSCALL_FMT_5 , "d" (r6) SYSCALL_FMT_4
+#define SYSCALL_FMT_6 , "d" (r7) SYSCALL_FMT_5
+
+#define SYSCALL_PARM_0
+#define SYSCALL_PARM_1 , long arg1
+#define SYSCALL_PARM_2 SYSCALL_PARM_1, long arg2
+#define SYSCALL_PARM_3 SYSCALL_PARM_2, long arg3
+#define SYSCALL_PARM_4 SYSCALL_PARM_3, long arg4
+#define SYSCALL_PARM_5 SYSCALL_PARM_4, long arg5
+#define SYSCALL_PARM_6 SYSCALL_PARM_5, long arg6
+
+#define SYSCALL_REGS_0
+#define SYSCALL_REGS_1                                                 \
+       register long r2 asm("2") = arg1
+#define SYSCALL_REGS_2                                                 \
+       SYSCALL_REGS_1;                                                 \
+       register long r3 asm("3") = arg2
+#define SYSCALL_REGS_3                                                 \
+       SYSCALL_REGS_2;                                                 \
+       register long r4 asm("4") = arg3
+#define SYSCALL_REGS_4                                                 \
+       SYSCALL_REGS_3;                                                 \
+       register long r5 asm("5") = arg4
+#define SYSCALL_REGS_5                                                 \
+       SYSCALL_REGS_4;                                                 \
+       register long r6 asm("6") = arg5
+#define SYSCALL_REGS_6                                                 \
+       SYSCALL_REGS_5;                                                 \
+       register long r7 asm("7") = arg6
+
+#define GENERATE_SYSCALL_FUNC(nr)                                      \
+static __always_inline                                                 \
+long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr)              \
+{                                                                      \
+       register unsigned long r1 asm ("1") = syscall;                  \
+       register long rc asm ("2");                                     \
+       SYSCALL_REGS_##nr;                                              \
+                                                                       \
+       asm volatile (                                                  \
+               "       svc     0\n"                                    \
+               : "=d" (rc)                                             \
+               : "d" (r1) SYSCALL_FMT_##nr                             \
+               : "memory");                                            \
+       return rc;                                                      \
+}
+
+GENERATE_SYSCALL_FUNC(0)
+GENERATE_SYSCALL_FUNC(1)
+GENERATE_SYSCALL_FUNC(2)
+GENERATE_SYSCALL_FUNC(3)
+GENERATE_SYSCALL_FUNC(4)
+GENERATE_SYSCALL_FUNC(5)
+GENERATE_SYSCALL_FUNC(6)
+
 #endif /* _ASM_SYSCALL_H */
index 12c5f00..fe92a4c 100644 (file)
@@ -356,11 +356,9 @@ int uv_convert_from_secure(unsigned long paddr);
 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
 
 void setup_uv(void);
-void adjust_to_uv_max(unsigned long *vmax);
 #else
 #define is_prot_virt_host() 0
 static inline void setup_uv(void) {}
-static inline void adjust_to_uv_max(unsigned long *vmax) {}
 
 static inline int uv_destroy_page(unsigned long paddr)
 {
@@ -373,10 +371,4 @@ static inline int uv_convert_from_secure(unsigned long paddr)
 }
 #endif
 
-#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
-void uv_query_info(void);
-#else
-static inline void uv_query_info(void) {}
-#endif
-
 #endif /* _ASM_S390_UV_H */
index d6465b2..db84942 100644 (file)
@@ -6,6 +6,7 @@
 
 #define VDSO_HAS_CLOCK_GETRES 1
 
+#include <asm/syscall.h>
 #include <asm/timex.h>
 #include <asm/unistd.h>
 #include <linux/compiler.h>
@@ -35,35 +36,20 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *
 static __always_inline
 long clock_gettime_fallback(clockid_t clkid, struct __kernel_timespec *ts)
 {
-       register unsigned long r1 __asm__("r1") = __NR_clock_gettime;
-       register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
-       register void *r3 __asm__("r3") = ts;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_clock_gettime, (long)clkid, (long)ts);
 }
 
 static __always_inline
 long gettimeofday_fallback(register struct __kernel_old_timeval *tv,
                           register struct timezone *tz)
 {
-       register unsigned long r1 __asm__("r1") = __NR_gettimeofday;
-       register unsigned long r2 __asm__("r2") = (unsigned long)tv;
-       register void *r3 __asm__("r3") = tz;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
 }
 
 static __always_inline
 long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
 {
-       register unsigned long r1 __asm__("r1") = __NR_clock_getres;
-       register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
-       register void *r3 __asm__("r3") = ts;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_clock_getres, (long)clkid, (long)ts);
 }
 
 #ifdef CONFIG_TIME_NS
index 4a44ba5..80f500f 100644 (file)
@@ -40,7 +40,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
 obj-y  += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
 obj-y  += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
 obj-y  += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y  += smp.o
+obj-y  += smp.o text_amode31.o
 
 extra-y                                += head64.o vmlinux.lds
 
index 77ff213..b57da93 100644 (file)
@@ -116,6 +116,7 @@ int main(void)
        OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
        OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
        OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
+       OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
        OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
        OFFSET(__LC_USER_ASCE, lowcore, user_asce);
        OFFSET(__LC_LPP, lowcore, lpp);
@@ -152,5 +153,12 @@ int main(void)
        DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
        /* sizeof kernel parameter area */
        DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
+       /* kernel parameter area offsets */
+       DEFINE(IPL_DEVICE, PARMAREA + offsetof(struct parmarea, ipl_device));
+       DEFINE(INITRD_START, PARMAREA + offsetof(struct parmarea, initrd_start));
+       DEFINE(INITRD_SIZE, PARMAREA + offsetof(struct parmarea, initrd_size));
+       DEFINE(OLDMEM_BASE, PARMAREA + offsetof(struct parmarea, oldmem_base));
+       DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
+       DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
        return 0;
 }
index 0e36dfc..d72a6df 100644 (file)
@@ -140,7 +140,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
 
        while (count) {
                from = __pa(src);
-               if (!OLDMEM_BASE && from < sclp.hsa_size) {
+               if (!oldmem_data.start && from < sclp.hsa_size) {
                        /* Copy from zfcp/nvme dump HSA area */
                        len = min(count, sclp.hsa_size - from);
                        rc = memcpy_hsa_kernel(dst, from, len);
@@ -148,12 +148,12 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
                                return rc;
                } else {
                        /* Check for swapped kdump oldmem areas */
-                       if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
-                               from -= OLDMEM_BASE;
-                               len = min(count, OLDMEM_SIZE - from);
-                       } else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
-                               len = min(count, OLDMEM_SIZE - from);
-                               from += OLDMEM_BASE;
+                       if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
+                               from -= oldmem_data.start;
+                               len = min(count, oldmem_data.size - from);
+                       } else if (oldmem_data.start && from < oldmem_data.size) {
+                               len = min(count, oldmem_data.size - from);
+                               from += oldmem_data.start;
                        } else {
                                len = count;
                        }
@@ -183,7 +183,7 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
 
        while (count) {
                from = __pa(src);
-               if (!OLDMEM_BASE && from < sclp.hsa_size) {
+               if (!oldmem_data.start && from < sclp.hsa_size) {
                        /* Copy from zfcp/nvme dump HSA area */
                        len = min(count, sclp.hsa_size - from);
                        rc = memcpy_hsa_user(dst, from, len);
@@ -191,12 +191,12 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
                                return rc;
                } else {
                        /* Check for swapped kdump oldmem areas */
-                       if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
-                               from -= OLDMEM_BASE;
-                               len = min(count, OLDMEM_SIZE - from);
-                       } else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
-                               len = min(count, OLDMEM_SIZE - from);
-                               from += OLDMEM_BASE;
+                       if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) {
+                               from -= oldmem_data.size;
+                               len = min(count, oldmem_data.size - from);
+                       } else if (oldmem_data.start && from < oldmem_data.size) {
+                               len = min(count, oldmem_data.size - from);
+                               from += oldmem_data.start;
                        } else {
                                len = count;
                        }
@@ -243,10 +243,10 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
        unsigned long size_old;
        int rc;
 
-       if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
-               size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
+       if (pfn < oldmem_data.size >> PAGE_SHIFT) {
+               size_old = min(size, oldmem_data.size - (pfn << PAGE_SHIFT));
                rc = remap_pfn_range(vma, from,
-                                    pfn + (OLDMEM_BASE >> PAGE_SHIFT),
+                                    pfn + (oldmem_data.start >> PAGE_SHIFT),
                                     size_old, prot);
                if (rc || size == size_old)
                        return rc;
@@ -288,7 +288,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
 int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
                           unsigned long pfn, unsigned long size, pgprot_t prot)
 {
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
        else
                return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
@@ -633,17 +633,17 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
        u64 hdr_off;
 
        /* If we are not in kdump or zfcp/nvme dump mode return */
-       if (!OLDMEM_BASE && !is_ipl_type_dump())
+       if (!oldmem_data.start && !is_ipl_type_dump())
                return 0;
        /* If we cannot get HSA size for zfcp/nvme dump return error */
        if (is_ipl_type_dump() && !sclp.hsa_size)
                return -ENODEV;
 
        /* For kdump, exclude previous crashkernel memory */
-       if (OLDMEM_BASE) {
-               oldmem_region.base = OLDMEM_BASE;
-               oldmem_region.size = OLDMEM_SIZE;
-               oldmem_type.total_size = OLDMEM_SIZE;
+       if (oldmem_data.start) {
+               oldmem_region.base = oldmem_data.start;
+               oldmem_region.size = oldmem_data.size;
+               oldmem_type.total_size = oldmem_data.size;
        }
 
        mem_chunk_cnt = get_mem_chunk_cnt();
index 09b6c64..4331c7e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/minmax.h>
 #include <linux/debugfs.h>
 
 #include <asm/debug.h>
@@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
                                     char *out_buf, const char *in_buf);
 static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
                                   char *out_buf, debug_sprintf_entry_t *curr_event);
+static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
+static void debug_events_append(debug_info_t *dest, debug_info_t *src);
 
 /* globals */
 
@@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
                goto out;
 
        rc->mode = mode & ~S_IFMT;
-
-       /* create root directory */
-       rc->debugfs_root_entry = debugfs_create_dir(rc->name,
-                                                   debug_debugfs_root_entry);
-
-       /* append new element to linked list */
-       if (!debug_area_first) {
-               /* first element in list */
-               debug_area_first = rc;
-               rc->prev = NULL;
-       } else {
-               /* append element to end of list */
-               debug_area_last->next = rc;
-               rc->prev = debug_area_last;
-       }
-       debug_area_last = rc;
-       rc->next = NULL;
-
        refcount_set(&rc->ref_count, 1);
 out:
        return rc;
@@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
  */
 static void debug_info_put(debug_info_t *db_info)
 {
-       int i;
-
        if (!db_info)
                return;
-       if (refcount_dec_and_test(&db_info->ref_count)) {
-               for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
-                       if (!db_info->views[i])
-                               continue;
-                       debugfs_remove(db_info->debugfs_entries[i]);
-               }
-               debugfs_remove(db_info->debugfs_root_entry);
-               if (db_info == debug_area_first)
-                       debug_area_first = db_info->next;
-               if (db_info == debug_area_last)
-                       debug_area_last = db_info->prev;
-               if (db_info->prev)
-                       db_info->prev->next = db_info->next;
-               if (db_info->next)
-                       db_info->next->prev = db_info->prev;
+       if (refcount_dec_and_test(&db_info->ref_count))
                debug_info_free(db_info);
-       }
 }
 
 /*
@@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
        return 0; /* success */
 }
 
+/* Create debugfs entries and add to internal list. */
+static void _debug_register(debug_info_t *id)
+{
+       /* create root directory */
+       id->debugfs_root_entry = debugfs_create_dir(id->name,
+                                                   debug_debugfs_root_entry);
+
+       /* append new element to linked list */
+       if (!debug_area_first) {
+               /* first element in list */
+               debug_area_first = id;
+               id->prev = NULL;
+       } else {
+               /* append element to end of list */
+               debug_area_last->next = id;
+               id->prev = debug_area_last;
+       }
+       debug_area_last = id;
+       id->next = NULL;
+
+       debug_register_view(id, &debug_level_view);
+       debug_register_view(id, &debug_flush_view);
+       debug_register_view(id, &debug_pages_view);
+}
+
 /**
  * debug_register_mode() - creates and initializes debug area.
  *
@@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
        if ((uid != 0) || (gid != 0))
                pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
        BUG_ON(!initialized);
-       mutex_lock(&debug_mutex);
 
        /* create new debug_info */
        rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
-       if (!rc)
-               goto out;
-       debug_register_view(rc, &debug_level_view);
-       debug_register_view(rc, &debug_flush_view);
-       debug_register_view(rc, &debug_pages_view);
-out:
-       if (!rc)
+       if (rc) {
+               mutex_lock(&debug_mutex);
+               _debug_register(rc);
+               mutex_unlock(&debug_mutex);
+       } else {
                pr_err("Registering debug feature %s failed\n", name);
-       mutex_unlock(&debug_mutex);
+       }
        return rc;
 }
 EXPORT_SYMBOL(debug_register_mode);
@@ -702,6 +692,82 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
 }
 EXPORT_SYMBOL(debug_register);
 
+/**
+ * debug_register_static() - registers a static debug area
+ *
+ * @id: Handle for static debug area
+ * @pages_per_area: Number of pages per area
+ * @nr_areas: Number of debug areas
+ *
+ * Register debug_info_t defined using DEFINE_STATIC_DEBUG_INFO.
+ *
+ * Note: This function is called automatically via an initcall generated by
+ *      DEFINE_STATIC_DEBUG_INFO.
+ */
+void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas)
+{
+       unsigned long flags;
+       debug_info_t *copy;
+
+       if (!initialized) {
+               pr_err("Tried to register debug feature %s too early\n",
+                      id->name);
+               return;
+       }
+
+       copy = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
+                               id->level, ALL_AREAS);
+       if (!copy) {
+               pr_err("Registering debug feature %s failed\n", id->name);
+
+               /* Clear pointers to prevent tracing into released initdata. */
+               spin_lock_irqsave(&id->lock, flags);
+               id->areas = NULL;
+               id->active_pages = NULL;
+               id->active_entries = NULL;
+               spin_unlock_irqrestore(&id->lock, flags);
+
+               return;
+       }
+
+       /* Replace static trace area with dynamic copy. */
+       spin_lock_irqsave(&id->lock, flags);
+       debug_events_append(copy, id);
+       debug_areas_swap(id, copy);
+       spin_unlock_irqrestore(&id->lock, flags);
+
+       /* Clear pointers to initdata and discard copy. */
+       copy->areas = NULL;
+       copy->active_pages = NULL;
+       copy->active_entries = NULL;
+       debug_info_free(copy);
+
+       mutex_lock(&debug_mutex);
+       _debug_register(id);
+       mutex_unlock(&debug_mutex);
+}
+
+/* Remove debugfs entries and remove from internal list. */
+static void _debug_unregister(debug_info_t *id)
+{
+       int i;
+
+       for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+               if (!id->views[i])
+                       continue;
+               debugfs_remove(id->debugfs_entries[i]);
+       }
+       debugfs_remove(id->debugfs_root_entry);
+       if (id == debug_area_first)
+               debug_area_first = id->next;
+       if (id == debug_area_last)
+               debug_area_last = id->prev;
+       if (id->prev)
+               id->prev->next = id->next;
+       if (id->next)
+               id->next->prev = id->prev;
+}
+
 /**
  * debug_unregister() - give back debug area.
  *
@@ -715,8 +781,10 @@ void debug_unregister(debug_info_t *id)
        if (!id)
                return;
        mutex_lock(&debug_mutex);
-       debug_info_put(id);
+       _debug_unregister(id);
        mutex_unlock(&debug_mutex);
+
+       debug_info_put(id);
 }
 EXPORT_SYMBOL(debug_unregister);
 
@@ -726,35 +794,28 @@ EXPORT_SYMBOL(debug_unregister);
  */
 static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
 {
-       debug_entry_t ***new_areas;
+       debug_info_t *new_id;
        unsigned long flags;
-       int rc = 0;
 
        if (!id || (nr_areas <= 0) || (pages_per_area < 0))
                return -EINVAL;
-       if (pages_per_area > 0) {
-               new_areas = debug_areas_alloc(pages_per_area, nr_areas);
-               if (!new_areas) {
-                       pr_info("Allocating memory for %i pages failed\n",
-                               pages_per_area);
-                       rc = -ENOMEM;
-                       goto out;
-               }
-       } else {
-               new_areas = NULL;
+
+       new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
+                                 id->level, ALL_AREAS);
+       if (!new_id) {
+               pr_info("Allocating memory for %i pages failed\n",
+                       pages_per_area);
+               return -ENOMEM;
        }
+
        spin_lock_irqsave(&id->lock, flags);
-       debug_areas_free(id);
-       id->areas = new_areas;
-       id->nr_areas = nr_areas;
-       id->pages_per_area = pages_per_area;
-       id->active_area = 0;
-       memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
-       memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
+       debug_events_append(new_id, id);
+       debug_areas_swap(new_id, id);
+       debug_info_free(new_id);
        spin_unlock_irqrestore(&id->lock, flags);
        pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
-out:
-       return rc;
+
+       return 0;
 }
 
 /**
@@ -772,16 +833,17 @@ void debug_set_level(debug_info_t *id, int new_level)
 
        if (!id)
                return;
-       spin_lock_irqsave(&id->lock, flags);
+
        if (new_level == DEBUG_OFF_LEVEL) {
-               id->level = DEBUG_OFF_LEVEL;
                pr_info("%s: switched off\n", id->name);
        } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
                pr_info("%s: level %i is out of range (%i - %i)\n",
                        id->name, new_level, 0, DEBUG_MAX_LEVEL);
-       } else {
-               id->level = new_level;
+               return;
        }
+
+       spin_lock_irqsave(&id->lock, flags);
+       id->level = new_level;
        spin_unlock_irqrestore(&id->lock, flags);
 }
 EXPORT_SYMBOL(debug_set_level);
@@ -821,6 +883,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
                                  id->active_entries[id->active_area]);
 }
 
+/* Swap debug areas of a and b. */
+static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
+{
+       swap(a->nr_areas, b->nr_areas);
+       swap(a->pages_per_area, b->pages_per_area);
+       swap(a->areas, b->areas);
+       swap(a->active_area, b->active_area);
+       swap(a->active_pages, b->active_pages);
+       swap(a->active_entries, b->active_entries);
+}
+
+/* Append all debug events in active area from source to destination log. */
+static void debug_events_append(debug_info_t *dest, debug_info_t *src)
+{
+       debug_entry_t *from, *to, *last;
+
+       if (!src->areas || !dest->areas)
+               return;
+
+       /* Loop over all entries in src, starting with oldest. */
+       from = get_active_entry(src);
+       last = from;
+       do {
+               if (from->clock != 0LL) {
+                       to = get_active_entry(dest);
+                       memset(to, 0, dest->entry_size);
+                       memcpy(to, from, min(src->entry_size,
+                                            dest->entry_size));
+                       proceed_active_entry(dest);
+               }
+
+               proceed_active_entry(src);
+               from = get_active_entry(src);
+       } while (from != last);
+}
+
 /*
  * debug_finish_entry:
  * - set timestamp, caller address, cpu number etc.
@@ -1111,16 +1209,17 @@ int debug_register_view(debug_info_t *id, struct debug_view *view)
                        break;
        }
        if (i == DEBUG_MAX_VIEWS) {
-               pr_err("Registering view %s/%s would exceed the maximum "
-                      "number of views %i\n", id->name, view->name, i);
                rc = -1;
        } else {
                id->views[i] = view;
                id->debugfs_entries[i] = pde;
        }
        spin_unlock_irqrestore(&id->lock, flags);
-       if (rc)
+       if (rc) {
+               pr_err("Registering view %s/%s would exceed the maximum "
+                      "number of views %i\n", id->name, view->name, i);
                debugfs_remove(pde);
+       }
 out:
        return rc;
 }
index a3f4746..76a656b 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/diag.h>
 #include <asm/trace/diag.h>
 #include <asm/sections.h>
+#include "entry.h"
 
 struct diag_stat {
        unsigned int counter[NR_DIAG_STAT];
@@ -50,8 +51,16 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
        [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
 };
 
-struct diag_ops __bootdata_preserved(diag_dma_ops);
-struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
+struct diag_ops __amode31_ref diag_amode31_ops = {
+       .diag210 = _diag210_amode31,
+       .diag26c = _diag26c_amode31,
+       .diag14 = _diag14_amode31,
+       .diag0c = _diag0c_amode31,
+       .diag308_reset = _diag308_reset_amode31
+};
+
+static struct diag210 _diag210_tmp_amode31 __section(".amode31.data");
+struct diag210 __amode31_ref *__diag210_tmp_amode31 = &_diag210_tmp_amode31;
 
 static int show_diag_stat(struct seq_file *m, void *v)
 {
@@ -59,7 +68,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
        unsigned long n = (unsigned long) v - 1;
        int cpu, prec, tmp;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (n == 0) {
                seq_puts(m, "         ");
 
@@ -78,7 +87,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
                }
                seq_printf(m, "    %s\n", diag_map[n-1].name);
        }
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 }
 
@@ -135,7 +144,7 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
 int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
 {
        diag_stat_inc(DIAG_STAT_X014);
-       return diag_dma_ops.diag14(rx, ry1, subcode);
+       return diag_amode31_ops.diag14(rx, ry1, subcode);
 }
 EXPORT_SYMBOL(diag14);
 
@@ -172,12 +181,12 @@ int diag210(struct diag210 *addr)
        int ccode;
 
        spin_lock_irqsave(&diag210_lock, flags);
-       *__diag210_tmp_dma = *addr;
+       *__diag210_tmp_amode31 = *addr;
 
        diag_stat_inc(DIAG_STAT_X210);
-       ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
+       ccode = diag_amode31_ops.diag210(__diag210_tmp_amode31);
 
-       *addr = *__diag210_tmp_dma;
+       *addr = *__diag210_tmp_amode31;
        spin_unlock_irqrestore(&diag210_lock, flags);
 
        return ccode;
@@ -205,6 +214,6 @@ EXPORT_SYMBOL(diag224);
 int diag26c(void *req, void *resp, enum diag26c_sc subcode)
 {
        diag_stat_inc(DIAG_STAT_X26C);
-       return diag_dma_ops.diag26c(req, resp, subcode);
+       return diag_amode31_ops.diag26c(req, resp, subcode);
 }
 EXPORT_SYMBOL(diag26c);
index 5412efe..ec55154 100644 (file)
@@ -312,10 +312,12 @@ static const unsigned char formats[][6] = {
        [INSTR_VRR_VV]       = { V_8, V_12, 0, 0, 0, 0 },
        [INSTR_VRR_VV0U]     = { V_8, V_12, U4_32, 0, 0, 0 },
        [INSTR_VRR_VV0U0U]   = { V_8, V_12, U4_32, U4_24, 0, 0 },
+       [INSTR_VRR_VV0U2]    = { V_8, V_12, U4_24, 0, 0, 0 },
        [INSTR_VRR_VV0UU2]   = { V_8, V_12, U4_32, U4_28, 0, 0 },
        [INSTR_VRR_VV0UUU]   = { V_8, V_12, U4_32, U4_28, U4_24, 0 },
        [INSTR_VRR_VVV]      = { V_8, V_12, V_16, 0, 0, 0 },
        [INSTR_VRR_VVV0U]    = { V_8, V_12, V_16, U4_32, 0, 0 },
+       [INSTR_VRR_VVV0U0]   = { V_8, V_12, V_16, U4_24, 0, 0 },
        [INSTR_VRR_VVV0U0U]  = { V_8, V_12, V_16, U4_32, U4_24, 0 },
        [INSTR_VRR_VVV0UU]   = { V_8, V_12, V_16, U4_32, U4_28, 0 },
        [INSTR_VRR_VVV0UUU]  = { V_8, V_12, V_16, U4_32, U4_28, U4_24 },
index fb84e3f..9857cb0 100644 (file)
@@ -236,6 +236,10 @@ static __init void detect_machine_facilities(void)
                clock_comparator_max = -1ULL >> 1;
                __ctl_set_bit(0, 53);
        }
+       if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
+               S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
+               /* the control bit is set during PCI initialization */
+       }
 }
 
 static inline void save_vector_registers(void)
index 5a2f70c..b9716a7 100644 (file)
@@ -624,12 +624,15 @@ ENTRY(mcck_int_handler)
 4:     j       4b
 ENDPROC(mcck_int_handler)
 
-#
-# PSW restart interrupt handler
-#
 ENTRY(restart_int_handler)
        ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
        stg     %r15,__LC_SAVE_AREA_RESTART
+       TSTMSK  __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
+       jz      0f
+       la      %r15,4095
+       lctlg   %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
+0:     larl    %r15,.Lstosm_tmp
+       stosm   0(%r15),0x04                    # turn dat on, keep irqs off
        lg      %r15,__LC_RESTART_STACK
        xc      STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
        stmg    %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
@@ -638,7 +641,7 @@ ENTRY(restart_int_handler)
        xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
        lg      %r1,__LC_RESTART_FN             # load fn, parm & source cpu
        lg      %r2,__LC_RESTART_DATA
-       lg      %r3,__LC_RESTART_SOURCE
+       lgf     %r3,__LC_RESTART_SOURCE
        ltgr    %r3,%r3                         # test source cpu address
        jm      1f                              # negative -> skip source stop
 0:     sigp    %r4,%r3,SIGP_SENSE              # sigp sense to source cpu
index 1ab3346..7f2696e 100644 (file)
@@ -28,10 +28,8 @@ void do_non_secure_storage_access(struct pt_regs *regs);
 void do_secure_storage_violation(struct pt_regs *regs);
 void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
 void kernel_stack_overflow(struct pt_regs * regs);
-void do_signal(struct pt_regs *regs);
 void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
                     struct pt_regs *regs);
-void do_notify_resume(struct pt_regs *regs);
 
 void __init init_IRQ(void);
 void do_io_irq(struct pt_regs *regs);
@@ -64,4 +62,13 @@ void stack_free(unsigned long stack);
 
 extern char kprobes_insn_page[];
 
+extern char _samode31[], _eamode31[];
+extern char _stext_amode31[], _etext_amode31[];
+extern struct exception_table_entry _start_amode31_ex_table[];
+extern struct exception_table_entry _stop_amode31_ex_table[];
+
+#define __amode31_data __section(".amode31.data")
+#define __amode31_ref __section(".amode31.refs")
+extern long _start_amode31_refs[], _end_amode31_refs[];
+
 #endif /* _ENTRY_H */
index 2d8f595..0a464d3 100644 (file)
 #include <trace/syscall.h>
 #include <asm/asm-offsets.h>
 #include <asm/cacheflush.h>
+#include <asm/ftrace.lds.h>
+#include <asm/nospec-branch.h>
 #include <asm/set_memory.h>
 #include "entry.h"
+#include "ftrace.h"
 
 /*
  * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
  */
 
 void *ftrace_func __read_mostly = ftrace_stub;
-unsigned long ftrace_plt;
+struct ftrace_insn {
+       u16 opc;
+       s32 disp;
+} __packed;
+
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_br:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       br      %r1\n"
+       "ftrace_shared_hotpatch_trampoline_br_end:\n"
+);
+
+#ifdef CONFIG_EXPOLINE
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_ex:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       ex      %r0," __stringify(__LC_BR_R1) "(%r0)\n"
+       "       j       .\n"
+       "ftrace_shared_hotpatch_trampoline_ex_end:\n"
+);
+
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_exrl:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       .insn   ril,0xc60000000000,%r0,0f\n" /* exrl */
+       "       j       .\n"
+       "0:     br      %r1\n"
+       "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
+);
+#endif /* CONFIG_EXPOLINE */
+
+#ifdef CONFIG_MODULES
+static char *ftrace_plt;
+
+asm(
+       "       .data\n"
+       "ftrace_plt_template:\n"
+       "       basr    %r1,%r0\n"
+       "       lg      %r1,0f-.(%r1)\n"
+       "       br      %r1\n"
+       "0:     .quad   ftrace_caller\n"
+       "ftrace_plt_template_end:\n"
+       "       .previous\n"
+);
+#endif /* CONFIG_MODULES */
+
+static const char *ftrace_shared_hotpatch_trampoline(const char **end)
+{
+       const char *tstart, *tend;
+
+       tstart = ftrace_shared_hotpatch_trampoline_br;
+       tend = ftrace_shared_hotpatch_trampoline_br_end;
+#ifdef CONFIG_EXPOLINE
+       if (!nospec_disable) {
+               tstart = ftrace_shared_hotpatch_trampoline_ex;
+               tend = ftrace_shared_hotpatch_trampoline_ex_end;
+               if (test_facility(35)) { /* exrl */
+                       tstart = ftrace_shared_hotpatch_trampoline_exrl;
+                       tend = ftrace_shared_hotpatch_trampoline_exrl_end;
+               }
+       }
+#endif /* CONFIG_EXPOLINE */
+       if (end)
+               *end = tend;
+       return tstart;
+}
+
+bool ftrace_need_init_nop(void)
+{
+       return ftrace_shared_hotpatch_trampoline(NULL);
+}
+
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+       static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
+               __ftrace_hotpatch_trampolines_start;
+       static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
+       static struct ftrace_hotpatch_trampoline *trampoline;
+       struct ftrace_hotpatch_trampoline **next_trampoline;
+       struct ftrace_hotpatch_trampoline *trampolines_end;
+       struct ftrace_hotpatch_trampoline tmp;
+       struct ftrace_insn *insn;
+       const char *shared;
+       s32 disp;
+
+       BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
+                    SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
+
+       next_trampoline = &next_vmlinux_trampoline;
+       trampolines_end = __ftrace_hotpatch_trampolines_end;
+       shared = ftrace_shared_hotpatch_trampoline(NULL);
+#ifdef CONFIG_MODULES
+       if (mod) {
+               next_trampoline = &mod->arch.next_trampoline;
+               trampolines_end = mod->arch.trampolines_end;
+               shared = ftrace_plt;
+       }
+#endif
+
+       if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
+               return -ENOMEM;
+       trampoline = (*next_trampoline)++;
+
+       /* Check for the compiler-generated fentry nop (brcl 0, .). */
+       if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
+               return -EINVAL;
+
+       /* Generate the trampoline. */
+       tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
+       tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
+       tmp.interceptor = FTRACE_ADDR;
+       tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
+       s390_kernel_write(trampoline, &tmp, sizeof(tmp));
+
+       /* Generate a jump to the trampoline. */
+       disp = ((char *)trampoline - (char *)rec->ip) / 2;
+       insn = (struct ftrace_insn *)rec->ip;
+       s390_kernel_write(&insn->disp, &disp, sizeof(disp));
+
+       return 0;
+}
 
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                       unsigned long addr)
@@ -49,11 +175,45 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
        return 0;
 }
 
+static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
+{
+       /* brcl 0,0 */
+       insn->opc = 0xc004;
+       insn->disp = 0;
+}
+
+static void ftrace_generate_call_insn(struct ftrace_insn *insn,
+                                     unsigned long ip)
+{
+       unsigned long target;
+
+       /* brasl r0,ftrace_caller */
+       target = FTRACE_ADDR;
+#ifdef CONFIG_MODULES
+       if (is_module_addr((void *)ip))
+               target = (unsigned long)ftrace_plt;
+#endif /* CONFIG_MODULES */
+       insn->opc = 0xc005;
+       insn->disp = (target - ip) / 2;
+}
+
+static void brcl_disable(void *brcl)
+{
+       u8 op = 0x04; /* set mask field to zero */
+
+       s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+}
+
 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                    unsigned long addr)
 {
        struct ftrace_insn orig, new, old;
 
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               brcl_disable((void *)rec->ip);
+               return 0;
+       }
+
        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        /* Replace ftrace call with a nop. */
@@ -67,10 +227,22 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
        return 0;
 }
 
+static void brcl_enable(void *brcl)
+{
+       u8 op = 0xf4; /* set mask field to all ones */
+
+       s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+}
+
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
        struct ftrace_insn orig, new, old;
 
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               brcl_enable((void *)rec->ip);
+               return 0;
+       }
+
        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        /* Replace nop with an ftrace call. */
@@ -95,22 +267,44 @@ int __init ftrace_dyn_arch_init(void)
        return 0;
 }
 
+void arch_ftrace_update_code(int command)
+{
+       if (ftrace_shared_hotpatch_trampoline(NULL))
+               ftrace_modify_all_code(command);
+       else
+               ftrace_run_stop_machine(command);
+}
+
+static void __ftrace_sync(void *dummy)
+{
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               /* Send SIGP to the other CPUs, so they see the new code. */
+               smp_call_function(__ftrace_sync, NULL, 1);
+       }
+       return 0;
+}
+
 #ifdef CONFIG_MODULES
 
 static int __init ftrace_plt_init(void)
 {
-       unsigned int *ip;
+       const char *start, *end;
 
-       ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
+       ftrace_plt = module_alloc(PAGE_SIZE);
        if (!ftrace_plt)
                panic("cannot allocate ftrace plt\n");
-       ip = (unsigned int *) ftrace_plt;
-       ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
-       ip[1] = 0x100a0004;
-       ip[2] = 0x07f10000;
-       ip[3] = FTRACE_ADDR >> 32;
-       ip[4] = FTRACE_ADDR & 0xffffffff;
-       set_memory_ro(ftrace_plt, 1);
+
+       start = ftrace_shared_hotpatch_trampoline(&end);
+       if (!start) {
+               start = ftrace_plt_template;
+               end = ftrace_plt_template_end;
+       }
+       memcpy(ftrace_plt, start, end - start);
+       set_memory_ro((unsigned long)ftrace_plt, 1);
        return 0;
 }
 device_initcall(ftrace_plt_init);
@@ -147,17 +341,13 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
  */
 int ftrace_enable_ftrace_graph_caller(void)
 {
-       u8 op = 0x04; /* set mask field to zero */
-
-       s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+       brcl_disable(__va(ftrace_graph_caller));
        return 0;
 }
 
 int ftrace_disable_ftrace_graph_caller(void)
 {
-       u8 op = 0xf4; /* set mask field to all ones */
-
-       s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+       brcl_enable(__va(ftrace_graph_caller));
        return 0;
 }
 
diff --git a/arch/s390/kernel/ftrace.h b/arch/s390/kernel/ftrace.h
new file mode 100644 (file)
index 0000000..69e416f
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FTRACE_H
+#define _FTRACE_H
+
+#include <asm/types.h>
+
+struct ftrace_hotpatch_trampoline {
+       u16 brasl_opc;
+       s32 brasl_disp;
+       s16: 16;
+       u64 rest_of_intercepted_function;
+       u64 interceptor;
+} __packed;
+
+extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_start[];
+extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[];
+extern const char ftrace_shared_hotpatch_trampoline_br[];
+extern const char ftrace_shared_hotpatch_trampoline_br_end[];
+extern const char ftrace_shared_hotpatch_trampoline_ex[];
+extern const char ftrace_shared_hotpatch_trampoline_ex_end[];
+extern const char ftrace_shared_hotpatch_trampoline_exrl[];
+extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
+extern const char ftrace_plt_template[];
+extern const char ftrace_plt_template_end[];
+
+#endif /* _FTRACE_H */
index 0c25388..114b549 100644 (file)
@@ -21,6 +21,7 @@ ENTRY(startup_continue)
        larl    %r1,tod_clock_base
        mvc     0(16,%r1),__LC_BOOT_CLOCK
        larl    %r13,.LPG1              # get base
+       lctlg   %c0,%c15,.Lctl-.LPG1(%r13)      # load control registers
 #
 # Setup stack
 #
@@ -41,3 +42,19 @@ ENTRY(startup_continue)
        .align  16
 .LPG1:
 .Ldw:  .quad   0x0002000180000000,0x0000000000000000
+.Lctl: .quad   0x04040000              # cr0: AFP registers & secondary space
+       .quad   0                       # cr1: primary space segment table
+       .quad   0                       # cr2: dispatchable unit control table
+       .quad   0                       # cr3: instruction authorization
+       .quad   0xffff                  # cr4: instruction authorization
+       .quad   0                       # cr5: primary-aste origin
+       .quad   0                       # cr6: I/O interrupts
+       .quad   0                       # cr7: secondary space segment table
+       .quad   0x0000000000008000      # cr8: access registers translation
+       .quad   0                       # cr9: tracing off
+       .quad   0                       # cr10: tracing off
+       .quad   0                       # cr11: tracing off
+       .quad   0                       # cr12: tracing off
+       .quad   0                       # cr13: home space segment table
+       .quad   0xc0000000              # cr14: machine check handling off
+       .quad   0                       # cr15: linkage stack operations
index 50e2c21..e2cc357 100644 (file)
@@ -179,8 +179,6 @@ static inline int __diag308(unsigned long subcode, void *addr)
 
 int diag308(unsigned long subcode, void *addr)
 {
-       if (IS_ENABLED(CONFIG_KASAN))
-               __arch_local_irq_stosm(0x04); /* enable DAT */
        diag_stat_inc(DIAG_STAT_X308);
        return __diag308(subcode, addr);
 }
@@ -1843,7 +1841,6 @@ static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
 
 static void __do_restart(void *ignore)
 {
-       __arch_local_irq_stosm(0x04); /* enable DAT */
        smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
        crash_kexec(NULL);
@@ -2082,7 +2079,7 @@ void s390_reset_system(void)
 
        /* Disable lowcore protection */
        __ctl_clear_bit(0, 28);
-       diag_dma_ops.diag308_reset();
+       diag_amode31_ops.diag308_reset();
 }
 
 #ifdef CONFIG_KEXEC_FILE
index af43535..b5245fa 100644 (file)
@@ -1,4 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <linux/minmax.h>
+#include <linux/string.h>
 #include <asm/ebcdic.h>
 #include <asm/ipl.h>
 
index 234d085..3a3145c 100644 (file)
@@ -228,7 +228,7 @@ int show_interrupts(struct seq_file *p, void *v)
        int index = *(loff_t *) v;
        int cpu, irq;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (index == 0) {
                seq_puts(p, "           ");
                for_each_online_cpu(cpu)
@@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
        }
 out:
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 }
 
index ab584e8..9156653 100644 (file)
@@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
        unsigned char *ipe = (unsigned char *)expected;
        unsigned char *ipn = (unsigned char *)new;
 
-       pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
+       pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
        pr_emerg("Found:    %6ph\n", ipc);
        pr_emerg("Expected: %6ph\n", ipe);
        pr_emerg("New:      %6ph\n", ipn);
index 1005a69..0505e55 100644 (file)
@@ -224,8 +224,8 @@ void arch_crash_save_vmcoreinfo(void)
        VMCOREINFO_SYMBOL(lowcore_ptr);
        VMCOREINFO_SYMBOL(high_memory);
        VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
-       vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
-       vmcoreinfo_append_str("EDMA=%lx\n", __edma);
+       vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
+       vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
        mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
 }
@@ -263,7 +263,6 @@ static void __do_machine_kexec(void *data)
  */
 static void __machine_kexec(void *data)
 {
-       __arch_local_irq_stosm(0x04); /* enable DAT */
        pfault_fini();
        tracing_off();
        debug_locks_off();
index 4055f1c..b01ba46 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/elf.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <linux/ftrace.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/kasan.h>
@@ -23,6 +24,8 @@
 #include <asm/alternative.h>
 #include <asm/nospec-branch.h>
 #include <asm/facility.h>
+#include <asm/ftrace.lds.h>
+#include <asm/set_memory.h>
 
 #if 0
 #define DEBUGP printk
@@ -48,6 +51,13 @@ void *module_alloc(unsigned long size)
        return p;
 }
 
+#ifdef CONFIG_FUNCTION_TRACER
+void module_arch_cleanup(struct module *mod)
+{
+       module_memfree(mod->arch.trampolines_start);
+}
+#endif
+
 void module_arch_freeing_init(struct module *mod)
 {
        if (is_livepatch_module(mod) &&
@@ -466,6 +476,30 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                    write);
 }
 
+#ifdef CONFIG_FUNCTION_TRACER
+static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
+                                                   const Elf_Shdr *s)
+{
+       char *start, *end;
+       int numpages;
+       size_t size;
+
+       size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
+       numpages = DIV_ROUND_UP(size, PAGE_SIZE);
+       start = module_alloc(numpages * PAGE_SIZE);
+       if (!start)
+               return -ENOMEM;
+       set_memory_ro((unsigned long)start, numpages);
+       end = start + size;
+
+       me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
+       me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
+       me->arch.next_trampoline = me->arch.trampolines_start;
+
+       return 0;
+}
+#endif /* CONFIG_FUNCTION_TRACER */
+
 int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
@@ -473,6 +507,9 @@ int module_finalize(const Elf_Ehdr *hdr,
        const Elf_Shdr *s;
        char *secstrings, *secname;
        void *aseg;
+#ifdef CONFIG_FUNCTION_TRACER
+       int ret;
+#endif
 
        if (IS_ENABLED(CONFIG_EXPOLINE) &&
            !nospec_disable && me->arch.plt_size) {
@@ -507,6 +544,14 @@ int module_finalize(const Elf_Ehdr *hdr,
                if (IS_ENABLED(CONFIG_EXPOLINE) &&
                    (str_has_prefix(secname, ".s390_return")))
                        nospec_revert(aseg, aseg + s->sh_size);
+
+#ifdef CONFIG_FUNCTION_TRACER
+               if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
+                       ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
+                       if (ret < 0)
+                               return ret;
+               }
+#endif /* CONFIG_FUNCTION_TRACER */
        }
 
        jump_label_apply_nops(me);
index 5a7420b..4bef35b 100644 (file)
@@ -121,7 +121,7 @@ static void os_info_old_init(void)
 
        if (os_info_init)
                return;
-       if (!OLDMEM_BASE)
+       if (!oldmem_data.start)
                goto fail;
        if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
                goto fail;
index d7dc36e..2e3bb63 100644 (file)
@@ -1138,7 +1138,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        int ret;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&cfset_ctrset_mutex);
        switch (cmd) {
        case S390_HWCTR_START:
@@ -1155,7 +1155,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
        }
        mutex_unlock(&cfset_ctrset_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return ret;
 }
 
index 82df39b..d9d4a80 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/cpufeature.h>
 #include <linux/bitops.h>
 #include <linux/kernel.h>
+#include <linux/random.h>
 #include <linux/sched/mm.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <asm/elf.h>
 #include <asm/lowcore.h>
 #include <asm/param.h>
+#include <asm/sclp.h>
 #include <asm/smp.h>
 
+unsigned long __read_mostly elf_hwcap;
+char elf_platform[ELF_PLATFORM_SIZE];
+
 struct cpu_info {
        unsigned int cpu_mhz_dynamic;
        unsigned int cpu_mhz_static;
@@ -113,15 +118,33 @@ static void show_facilities(struct seq_file *m)
 static void show_cpu_summary(struct seq_file *m, void *v)
 {
        static const char *hwcap_str[] = {
-               "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
-               "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
-               "vxe2", "vxp", "sort", "dflt"
-       };
-       static const char * const int_hwcap_str[] = {
-               "sie"
+               [HWCAP_NR_ESAN3]        = "esan3",
+               [HWCAP_NR_ZARCH]        = "zarch",
+               [HWCAP_NR_STFLE]        = "stfle",
+               [HWCAP_NR_MSA]          = "msa",
+               [HWCAP_NR_LDISP]        = "ldisp",
+               [HWCAP_NR_EIMM]         = "eimm",
+               [HWCAP_NR_DFP]          = "dfp",
+               [HWCAP_NR_HPAGE]        = "edat",
+               [HWCAP_NR_ETF3EH]       = "etf3eh",
+               [HWCAP_NR_HIGH_GPRS]    = "highgprs",
+               [HWCAP_NR_TE]           = "te",
+               [HWCAP_NR_VXRS]         = "vx",
+               [HWCAP_NR_VXRS_BCD]     = "vxd",
+               [HWCAP_NR_VXRS_EXT]     = "vxe",
+               [HWCAP_NR_GS]           = "gs",
+               [HWCAP_NR_VXRS_EXT2]    = "vxe2",
+               [HWCAP_NR_VXRS_PDE]     = "vxp",
+               [HWCAP_NR_SORT]         = "sort",
+               [HWCAP_NR_DFLT]         = "dflt",
+               [HWCAP_NR_VXRS_PDE2]    = "vxp2",
+               [HWCAP_NR_NNPA]         = "nnpa",
+               [HWCAP_NR_PCI_MIO]      = "pcimio",
+               [HWCAP_NR_SIE]          = "sie",
        };
        int i, cpu;
 
+       BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
        seq_printf(m, "vendor_id       : IBM/S390\n"
                   "# processors    : %i\n"
                   "bogomips per cpu: %lu.%02lu\n",
@@ -132,9 +155,6 @@ static void show_cpu_summary(struct seq_file *m, void *v)
        for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
                if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
                        seq_printf(m, "%s ", hwcap_str[i]);
-       for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
-               if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
-                       seq_printf(m, "%s ", int_hwcap_str[i]);
        seq_puts(m, "\n");
        show_facilities(m);
        show_cacheinfo(m);
@@ -149,6 +169,141 @@ static void show_cpu_summary(struct seq_file *m, void *v)
        }
 }
 
+static int __init setup_hwcaps(void)
+{
+       /* instructions named N3, "backported" to esa-mode */
+       if (test_facility(0))
+               elf_hwcap |= HWCAP_ESAN3;
+
+       /* z/Architecture mode active */
+       elf_hwcap |= HWCAP_ZARCH;
+
+       /* store-facility-list-extended */
+       if (test_facility(7))
+               elf_hwcap |= HWCAP_STFLE;
+
+       /* message-security assist */
+       if (test_facility(17))
+               elf_hwcap |= HWCAP_MSA;
+
+       /* long-displacement */
+       if (test_facility(19))
+               elf_hwcap |= HWCAP_LDISP;
+
+       /* extended-immediate */
+       if (test_facility(21))
+               elf_hwcap |= HWCAP_EIMM;
+
+       /* extended-translation facility 3 enhancement */
+       if (test_facility(22) && test_facility(30))
+               elf_hwcap |= HWCAP_ETF3EH;
+
+       /* decimal floating point & perform floating point operation */
+       if (test_facility(42) && test_facility(44))
+               elf_hwcap |= HWCAP_DFP;
+
+       /* huge page support */
+       if (MACHINE_HAS_EDAT1)
+               elf_hwcap |= HWCAP_HPAGE;
+
+       /* 64-bit register support for 31-bit processes */
+       elf_hwcap |= HWCAP_HIGH_GPRS;
+
+       /* transactional execution */
+       if (MACHINE_HAS_TE)
+               elf_hwcap |= HWCAP_TE;
+
+       /*
+        * Vector extension can be disabled with the "novx" parameter.
+        * Use MACHINE_HAS_VX instead of facility bit 129.
+        */
+       if (MACHINE_HAS_VX) {
+               elf_hwcap |= HWCAP_VXRS;
+               if (test_facility(134))
+                       elf_hwcap |= HWCAP_VXRS_BCD;
+               if (test_facility(135))
+                       elf_hwcap |= HWCAP_VXRS_EXT;
+               if (test_facility(148))
+                       elf_hwcap |= HWCAP_VXRS_EXT2;
+               if (test_facility(152))
+                       elf_hwcap |= HWCAP_VXRS_PDE;
+               if (test_facility(192))
+                       elf_hwcap |= HWCAP_VXRS_PDE2;
+       }
+
+       if (test_facility(150))
+               elf_hwcap |= HWCAP_SORT;
+
+       if (test_facility(151))
+               elf_hwcap |= HWCAP_DFLT;
+
+       if (test_facility(165))
+               elf_hwcap |= HWCAP_NNPA;
+
+       /* guarded storage */
+       if (MACHINE_HAS_GS)
+               elf_hwcap |= HWCAP_GS;
+
+       if (MACHINE_HAS_PCI_MIO)
+               elf_hwcap |= HWCAP_PCI_MIO;
+
+       /* virtualization support */
+       if (sclp.has_sief2)
+               elf_hwcap |= HWCAP_SIE;
+
+       return 0;
+}
+arch_initcall(setup_hwcaps);
+
+static int __init setup_elf_platform(void)
+{
+       struct cpuid cpu_id;
+
+       get_cpu_id(&cpu_id);
+       add_device_randomness(&cpu_id, sizeof(cpu_id));
+       switch (cpu_id.machine) {
+       case 0x2064:
+       case 0x2066:
+       default:        /* Use "z900" as default for 64 bit kernels. */
+               strcpy(elf_platform, "z900");
+               break;
+       case 0x2084:
+       case 0x2086:
+               strcpy(elf_platform, "z990");
+               break;
+       case 0x2094:
+       case 0x2096:
+               strcpy(elf_platform, "z9-109");
+               break;
+       case 0x2097:
+       case 0x2098:
+               strcpy(elf_platform, "z10");
+               break;
+       case 0x2817:
+       case 0x2818:
+               strcpy(elf_platform, "z196");
+               break;
+       case 0x2827:
+       case 0x2828:
+               strcpy(elf_platform, "zEC12");
+               break;
+       case 0x2964:
+       case 0x2965:
+               strcpy(elf_platform, "z13");
+               break;
+       case 0x3906:
+       case 0x3907:
+               strcpy(elf_platform, "z14");
+               break;
+       case 0x8561:
+       case 0x8562:
+               strcpy(elf_platform, "z15");
+               break;
+       }
+       return 0;
+}
+arch_initcall(setup_elf_platform);
+
 static void show_cpu_topology(struct seq_file *m, unsigned long n)
 {
 #ifdef CONFIG_SCHED_TOPOLOGY
@@ -210,7 +365,7 @@ static inline void *c_update(loff_t *pos)
 
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
-       get_online_cpus();
+       cpus_read_lock();
        return c_update(pos);
 }
 
@@ -222,7 +377,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void c_stop(struct seq_file *m, void *v)
 {
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 const struct seq_operations cpuinfo_op = {
index ff0f9e8..fe14beb 100644 (file)
@@ -89,27 +89,71 @@ EXPORT_SYMBOL(console_devno);
 unsigned int console_irq = -1;
 EXPORT_SYMBOL(console_irq);
 
-unsigned long elf_hwcap __read_mostly = 0;
-char elf_platform[ELF_PLATFORM_SIZE];
+/*
+ * Some code and data needs to stay below 2 GB, even when the kernel would be
+ * relocated above 2 GB, because it has to use 31 bit addresses.
+ * Such code and data is part of the .amode31 section.
+ */
+unsigned long __amode31_ref __samode31 = __pa(&_samode31);
+unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
+unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
+unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
+struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
+struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
+
+/*
+ * Control registers CR2, CR5 and CR15 are initialized with addresses
+ * of tables that must be placed below 2G which is handled by the AMODE31
+ * sections.
+ * Because the AMODE31 sections are relocated below 2G at startup,
+ * the content of control registers CR2, CR5 and CR15 must be updated
+ * with new addresses after the relocation. The initial initialization of
+ * control registers occurs in head64.S and then gets updated again after AMODE31
+ * relocation. We must access the relevant AMODE31 tables indirectly via
+ * pointers placed in the .amode31.refs linker section. Those pointers get
+ * updated automatically during AMODE31 relocation and always contain a valid
+ * address within AMODE31 sections.
+ */
+
+static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
+
+static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
+       [1] = 0xffffffffffffffff
+};
+
+static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0
+};
+
+static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
+       0, 0, 0x89000000, 0,
+       0, 0, 0x8a000000, 0
+};
 
-unsigned long int_hwcap = 0;
+static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
+static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
+static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
+static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
 
 int __bootdata(noexec_disabled);
 unsigned long __bootdata(ident_map_size);
 struct mem_detect_info __bootdata(mem_detect);
+struct initrd_data __bootdata(initrd_data);
 
-struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
-struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
-unsigned long __bootdata_preserved(__stext_dma);
-unsigned long __bootdata_preserved(__etext_dma);
-unsigned long __bootdata_preserved(__sdma);
-unsigned long __bootdata_preserved(__edma);
 unsigned long __bootdata_preserved(__kaslr_offset);
 unsigned int __bootdata_preserved(zlib_dfltcc_support);
 EXPORT_SYMBOL(zlib_dfltcc_support);
 u64 __bootdata_preserved(stfle_fac_list[16]);
 EXPORT_SYMBOL(stfle_fac_list);
 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+struct oldmem_data __bootdata_preserved(oldmem_data);
 
 unsigned long VMALLOC_START;
 EXPORT_SYMBOL(VMALLOC_START);
@@ -254,7 +298,7 @@ static void __init setup_zfcpdump(void)
 {
        if (!is_ipl_type_dump())
                return;
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return;
        strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
        console_loglevel = 2;
@@ -421,7 +465,7 @@ static void __init setup_lowcore_dat_off(void)
        lc->restart_stack = (unsigned long) restart_stack;
        lc->restart_fn = (unsigned long) do_restart;
        lc->restart_data = 0;
-       lc->restart_source = -1UL;
+       lc->restart_source = -1U;
 
        mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
        if (!mcck_stack)
@@ -450,12 +494,19 @@ static void __init setup_lowcore_dat_off(void)
 
 static void __init setup_lowcore_dat_on(void)
 {
+       struct lowcore *lc = lowcore_ptr[0];
+
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
+       __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
+       mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
+       mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
+       memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
+                       sizeof(S390_lowcore.cregs_save_area));
 }
 
 static struct resource code_resource = {
@@ -610,9 +661,9 @@ static void __init reserve_crashkernel(void)
                return;
        }
 
-       low = crash_base ?: OLDMEM_BASE;
+       low = crash_base ?: oldmem_data.start;
        high = low + crash_size;
-       if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
+       if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
                /* The crashkernel fits into OLDMEM, reuse OLDMEM */
                crash_base = low;
        } else {
@@ -639,7 +690,7 @@ static void __init reserve_crashkernel(void)
        if (register_memory_notifier(&kdump_mem_nb))
                return;
 
-       if (!OLDMEM_BASE && MACHINE_IS_VM)
+       if (!oldmem_data.start && MACHINE_IS_VM)
                diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
        crashk_res.start = crash_base;
        crashk_res.end = crash_base + crash_size - 1;
@@ -658,11 +709,11 @@ static void __init reserve_crashkernel(void)
 static void __init reserve_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (!INITRD_START || !INITRD_SIZE)
+       if (!initrd_data.start || !initrd_data.size)
                return;
-       initrd_start = INITRD_START;
-       initrd_end = initrd_start + INITRD_SIZE;
-       memblock_reserve(INITRD_START, INITRD_SIZE);
+       initrd_start = initrd_data.start;
+       initrd_end = initrd_start + initrd_data.size;
+       memblock_reserve(initrd_data.start, initrd_data.size);
 #endif
 }
 
@@ -732,10 +783,10 @@ static void __init memblock_add_mem_detect_info(void)
 static void __init check_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (INITRD_START && INITRD_SIZE &&
-           !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
+       if (initrd_data.start && initrd_data.size &&
+           !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
                pr_err("The initial RAM disk does not fit into the memory\n");
-               memblock_free(INITRD_START, INITRD_SIZE);
+               memblock_free(initrd_data.start, initrd_data.size);
                initrd_start = initrd_end = 0;
        }
 #endif
@@ -748,10 +799,10 @@ static void __init reserve_kernel(void)
 {
        unsigned long start_pfn = PFN_UP(__pa(_end));
 
-       memblock_reserve(0, HEAD_END);
+       memblock_reserve(0, STARTUP_NORMAL_OFFSET);
+       memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
        memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
                         - (unsigned long)_stext);
-       memblock_reserve(__sdma, __edma - __sdma);
 }
 
 static void __init setup_memory(void)
@@ -771,152 +822,52 @@ static void __init setup_memory(void)
        memblock_enforce_memory_limit(memblock_end_of_DRAM());
 }
 
-/*
- * Setup hardware capabilities.
- */
-static int __init setup_hwcaps(void)
+static void __init relocate_amode31_section(void)
 {
-       static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
-       struct cpuid cpu_id;
-       int i;
-
-       /*
-        * The store facility list bits numbers as found in the principles
-        * of operation are numbered with bit 1UL<<31 as number 0 to
-        * bit 1UL<<0 as number 31.
-        *   Bit 0: instructions named N3, "backported" to esa-mode
-        *   Bit 2: z/Architecture mode is active
-        *   Bit 7: the store-facility-list-extended facility is installed
-        *   Bit 17: the message-security assist is installed
-        *   Bit 19: the long-displacement facility is installed
-        *   Bit 21: the extended-immediate facility is installed
-        *   Bit 22: extended-translation facility 3 is installed
-        *   Bit 30: extended-translation facility 3 enhancement facility
-        * These get translated to:
-        *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
-        *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
-        *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
-        *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
-        */
-       for (i = 0; i < 6; i++)
-               if (test_facility(stfl_bits[i]))
-                       elf_hwcap |= 1UL << i;
-
-       if (test_facility(22) && test_facility(30))
-               elf_hwcap |= HWCAP_S390_ETF3EH;
-
-       /*
-        * Check for additional facilities with store-facility-list-extended.
-        * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
-        * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
-        * as stored by stfl, bits 32-xxx contain additional facilities.
-        * How many facility words are stored depends on the number of
-        * doublewords passed to the instruction. The additional facilities
-        * are:
-        *   Bit 42: decimal floating point facility is installed
-        *   Bit 44: perform floating point operation facility is installed
-        * translated to:
-        *   HWCAP_S390_DFP bit 6 (42 && 44).
-        */
-       if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
-               elf_hwcap |= HWCAP_S390_DFP;
-
-       /*
-        * Huge page support HWCAP_S390_HPAGE is bit 7.
-        */
-       if (MACHINE_HAS_EDAT1)
-               elf_hwcap |= HWCAP_S390_HPAGE;
-
-       /*
-        * 64-bit register support for 31-bit processes
-        * HWCAP_S390_HIGH_GPRS is bit 9.
-        */
-       elf_hwcap |= HWCAP_S390_HIGH_GPRS;
-
-       /*
-        * Transactional execution support HWCAP_S390_TE is bit 10.
-        */
-       if (MACHINE_HAS_TE)
-               elf_hwcap |= HWCAP_S390_TE;
-
-       /*
-        * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
-        * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
-        * instead of facility bit 129.
-        */
-       if (MACHINE_HAS_VX) {
-               elf_hwcap |= HWCAP_S390_VXRS;
-               if (test_facility(134))
-                       elf_hwcap |= HWCAP_S390_VXRS_BCD;
-               if (test_facility(135))
-                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
-               if (test_facility(148))
-                       elf_hwcap |= HWCAP_S390_VXRS_EXT2;
-               if (test_facility(152))
-                       elf_hwcap |= HWCAP_S390_VXRS_PDE;
-       }
-       if (test_facility(150))
-               elf_hwcap |= HWCAP_S390_SORT;
-       if (test_facility(151))
-               elf_hwcap |= HWCAP_S390_DFLT;
-
-       /*
-        * Guarded storage support HWCAP_S390_GS is bit 12.
-        */
-       if (MACHINE_HAS_GS)
-               elf_hwcap |= HWCAP_S390_GS;
-
-       get_cpu_id(&cpu_id);
-       add_device_randomness(&cpu_id, sizeof(cpu_id));
-       switch (cpu_id.machine) {
-       case 0x2064:
-       case 0x2066:
-       default:        /* Use "z900" as default for 64 bit kernels. */
-               strcpy(elf_platform, "z900");
-               break;
-       case 0x2084:
-       case 0x2086:
-               strcpy(elf_platform, "z990");
-               break;
-       case 0x2094:
-       case 0x2096:
-               strcpy(elf_platform, "z9-109");
-               break;
-       case 0x2097:
-       case 0x2098:
-               strcpy(elf_platform, "z10");
-               break;
-       case 0x2817:
-       case 0x2818:
-               strcpy(elf_platform, "z196");
-               break;
-       case 0x2827:
-       case 0x2828:
-               strcpy(elf_platform, "zEC12");
-               break;
-       case 0x2964:
-       case 0x2965:
-               strcpy(elf_platform, "z13");
-               break;
-       case 0x3906:
-       case 0x3907:
-               strcpy(elf_platform, "z14");
-               break;
-       case 0x8561:
-       case 0x8562:
-               strcpy(elf_platform, "z15");
-               break;
-       }
-
-       /*
-        * Virtualization support HWCAP_INT_SIE is bit 0.
-        */
-       if (sclp.has_sief2)
-               int_hwcap |= HWCAP_INT_SIE;
+       unsigned long amode31_addr, amode31_size;
+       long amode31_offset;
+       long *ptr;
+
+       /* Allocate a new AMODE31 capable memory region */
+       amode31_size = __eamode31 - __samode31;
+       pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
+       amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
+       if (!amode31_addr)
+               panic("Failed to allocate memory for AMODE31 section\n");
+       amode31_offset = amode31_addr - __samode31;
+
+       /* Move original AMODE31 section to the new one */
+       memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
+       /* Zero out the old AMODE31 section to catch invalid accesses within it */
+       memset((void *)__samode31, 0, amode31_size);
+
+       /* Update all AMODE31 region references */
+       for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
+               *ptr += amode31_offset;
+}
 
-       return 0;
+/* This must be called after AMODE31 relocation */
+static void __init setup_cr(void)
+{
+       union ctlreg2 cr2;
+       union ctlreg5 cr5;
+       union ctlreg15 cr15;
+
+       __ctl_duct[1] = (unsigned long)__ctl_aste;
+       __ctl_duct[2] = (unsigned long)__ctl_aste;
+       __ctl_duct[4] = (unsigned long)__ctl_duald;
+
+       /* Update control registers CR2, CR5 and CR15 */
+       __ctl_store(cr2.val, 2, 2);
+       __ctl_store(cr5.val, 5, 5);
+       __ctl_store(cr15.val, 15, 15);
+       cr2.ducto = (unsigned long)__ctl_duct >> 6;
+       cr5.pasteo = (unsigned long)__ctl_duct >> 6;
+       cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
+       __ctl_load(cr2.val, 2, 2);
+       __ctl_load(cr5.val, 5, 5);
+       __ctl_load(cr15.val, 15, 15);
 }
-arch_initcall(setup_hwcaps);
 
 /*
  * Add system information as device randomness
@@ -1059,6 +1010,9 @@ void __init setup_arch(char **cmdline_p)
 
        free_mem_detect_info();
 
+       relocate_amode31_section();
+       setup_cr();
+
        setup_uv();
        setup_memory_end();
        setup_memory();
index 78ef53b..307f5d9 100644 (file)
@@ -533,9 +533,3 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
         */
        restore_saved_sigmask();
 }
-
-void do_notify_resume(struct pt_regs *regs)
-{
-       tracehook_notify_resume(regs);
-       rseq_handle_notify_resume(NULL, regs);
-}
index 8984711..2a991e4 100644 (file)
@@ -252,6 +252,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
        cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
        lc->cpu_nr = cpu;
+       lc->restart_flags = RESTART_FLAG_CTLREGS;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
        lc->spinlock_index = 0;
        lc->percpu_offset = __per_cpu_offset[cpu];
@@ -294,10 +295,10 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 
        cpu = pcpu - pcpu_devices;
        lc = lowcore_ptr[cpu];
-       lc->restart_stack = lc->nodat_stack;
+       lc->restart_stack = lc->kernel_stack;
        lc->restart_fn = (unsigned long) func;
        lc->restart_data = (unsigned long) data;
-       lc->restart_source = -1UL;
+       lc->restart_source = -1U;
        pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
 }
 
@@ -311,12 +312,12 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
        func(data);     /* should not return */
 }
 
-static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
-                                               pcpu_delegate_fn *func,
-                                               void *data, unsigned long stack)
+static void pcpu_delegate(struct pcpu *pcpu,
+                         pcpu_delegate_fn *func,
+                         void *data, unsigned long stack)
 {
        struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
-       unsigned long source_cpu = stap();
+       unsigned int source_cpu = stap();
 
        __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        if (pcpu->address == source_cpu) {
@@ -569,6 +570,9 @@ static void smp_ctl_bit_callback(void *info)
        __ctl_load(cregs, 0, 15);
 }
 
+static DEFINE_SPINLOCK(ctl_lock);
+static unsigned long ctlreg;
+
 /*
  * Set a bit in a control register of all cpus
  */
@@ -576,6 +580,11 @@ void smp_ctl_set_bit(int cr, int bit)
 {
        struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
 
+       spin_lock(&ctl_lock);
+       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
+       __set_bit(bit, &ctlreg);
+       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -587,6 +596,11 @@ void smp_ctl_clear_bit(int cr, int bit)
 {
        struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
 
+       spin_lock(&ctl_lock);
+       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
+       __clear_bit(bit, &ctlreg);
+       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_clear_bit);
@@ -673,7 +687,7 @@ void __init smp_save_dump_cpus(void)
        unsigned long page;
        bool is_boot_cpu;
 
-       if (!(OLDMEM_BASE || is_ipl_type_dump()))
+       if (!(oldmem_data.start || is_ipl_type_dump()))
                /* No previous system present, normal boot. */
                return;
        /* Allocate a page as dumping area for the store status sigps */
@@ -704,12 +718,12 @@ void __init smp_save_dump_cpus(void)
                 * these registers an SCLP request is required which is
                 * done by drivers/s390/char/zcore.c:init_cpu_info()
                 */
-               if (!is_boot_cpu || OLDMEM_BASE)
+               if (!is_boot_cpu || oldmem_data.start)
                        /* Get the CPU registers */
                        smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
        }
        memblock_free(page, PAGE_SIZE);
-       diag_dma_ops.diag308_reset();
+       diag_amode31_ops.diag308_reset();
        pcpu_set_smt(0);
 }
 #endif /* CONFIG_CRASH_DUMP */
@@ -793,7 +807,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
        u16 core_id;
        int nr, i;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        nr = 0;
        cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -816,7 +830,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
                nr += smp_add_core(&info->core[i], &avail, configured, early);
        }
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return nr;
 }
 
@@ -868,11 +882,19 @@ void __init smp_detect_cpus(void)
        memblock_free_early((unsigned long)info, sizeof(*info));
 }
 
-static void smp_init_secondary(void)
+/*
+ *     Activate a secondary processor.
+ */
+static void smp_start_secondary(void *cpuvoid)
 {
        int cpu = raw_smp_processor_id();
 
        S390_lowcore.last_update_clock = get_tod_clock();
+       S390_lowcore.restart_stack = (unsigned long)restart_stack;
+       S390_lowcore.restart_fn = (unsigned long)do_restart;
+       S390_lowcore.restart_data = 0;
+       S390_lowcore.restart_source = -1U;
+       S390_lowcore.restart_flags = 0;
        restore_access_regs(S390_lowcore.access_regs_save_area);
        cpu_init();
        rcu_cpu_starting(cpu);
@@ -892,20 +914,6 @@ static void smp_init_secondary(void)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
-/*
- *     Activate a secondary processor.
- */
-static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
-{
-       S390_lowcore.restart_stack = (unsigned long) restart_stack;
-       S390_lowcore.restart_fn = (unsigned long) do_restart;
-       S390_lowcore.restart_data = 0;
-       S390_lowcore.restart_source = -1UL;
-       __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
-       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
-       call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
-}
-
 /* Upping and downing of CPUs */
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
@@ -1055,7 +1063,7 @@ static ssize_t cpu_configure_store(struct device *dev,
                return -EINVAL;
        if (val != 0 && val != 1)
                return -EINVAL;
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        rc = -EBUSY;
        /* disallow configuration changes of online cpus and cpu 0 */
@@ -1104,7 +1112,7 @@ static ssize_t cpu_configure_store(struct device *dev,
        }
 out:
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return rc ? rc : count;
 }
 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
diff --git a/arch/s390/kernel/text_amode31.S b/arch/s390/kernel/text_amode31.S
new file mode 100644 (file)
index 0000000..868e4a6
--- /dev/null
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code that needs to run below 2 GB.
+ *
+ * Copyright IBM Corp. 2019
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/sigp.h>
+
+       .section .amode31.text,"ax"
+/*
+ * Simplified version of expoline thunk. The normal thunks can not be used here,
+ * because they might be more than 2 GB away, and not reachable by the relative
+ * branch. No comdat, exrl, etc. optimizations used here, because it only
+ * affects a few functions that are not performance-relevant.
+ */
+       .macro BR_EX_AMODE31_r14
+       larl    %r1,0f
+       ex      0,0(%r1)
+       j       .
+0:     br      %r14
+       .endm
+
+/*
+ * int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
+ */
+ENTRY(_diag14_amode31)
+       lgr     %r1,%r2
+       lgr     %r2,%r3
+       lgr     %r3,%r4
+       lhi     %r5,-EIO
+       sam31
+       diag    %r1,%r2,0x14
+.Ldiag14_ex:
+       ipm     %r5
+       srl     %r5,28
+.Ldiag14_fault:
+       sam64
+       lgfr    %r2,%r5
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
+ENDPROC(_diag14_amode31)
+
+/*
+ * int _diag210_amode31(struct diag210 *addr)
+ */
+ENTRY(_diag210_amode31)
+       lgr     %r1,%r2
+       lhi     %r2,-1
+       sam31
+       diag    %r1,%r0,0x210
+.Ldiag210_ex:
+       ipm     %r2
+       srl     %r2,28
+.Ldiag210_fault:
+       sam64
+       lgfr    %r2,%r2
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
+ENDPROC(_diag210_amode31)
+
+/*
+ * int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
+ */
+ENTRY(_diag26c_amode31)
+       lghi    %r5,-EOPNOTSUPP
+       sam31
+       diag    %r2,%r4,0x26c
+.Ldiag26c_ex:
+       sam64
+       lgfr    %r2,%r5
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
+ENDPROC(_diag26c_amode31)
+
+/*
+ * void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
+ */
+ENTRY(_diag0c_amode31)
+       sam31
+       diag    %r2,%r2,0x0c
+       sam64
+       BR_EX_AMODE31_r14
+ENDPROC(_diag0c_amode31)
+
+/*
+ * void _diag308_reset_amode31(void)
+ *
+ * Calls diag 308 subcode 1 and continues execution
+ */
+ENTRY(_diag308_reset_amode31)
+       larl    %r4,.Lctlregs           # Save control registers
+       stctg   %c0,%c15,0(%r4)
+       lg      %r2,0(%r4)              # Disable lowcore protection
+       nilh    %r2,0xefff
+       larl    %r4,.Lctlreg0
+       stg     %r2,0(%r4)
+       lctlg   %c0,%c0,0(%r4)
+       larl    %r4,.Lfpctl             # Floating point control register
+       stfpc   0(%r4)
+       larl    %r4,.Lprefix            # Save prefix register
+       stpx    0(%r4)
+       larl    %r4,.Lprefix_zero       # Set prefix register to 0
+       spx     0(%r4)
+       larl    %r4,.Lcontinue_psw      # Save PSW flags
+       epsw    %r2,%r3
+       stm     %r2,%r3,0(%r4)
+       larl    %r4,.Lrestart_part2     # Setup restart PSW at absolute 0
+       larl    %r3,.Lrestart_diag308_psw
+       og      %r4,0(%r3)              # Save PSW
+       lghi    %r3,0
+       sturg   %r4,%r3                 # Use sturg, because of large pages
+       lghi    %r1,1
+       lghi    %r0,0
+       diag    %r0,%r1,0x308
+.Lrestart_part2:
+       lhi     %r0,0                   # Load r0 with zero
+       lhi     %r1,2                   # Use mode 2 = ESAME (dump)
+       sigp    %r1,%r0,SIGP_SET_ARCHITECTURE   # Switch to ESAME mode
+       sam64                           # Switch to 64 bit addressing mode
+       larl    %r4,.Lctlregs           # Restore control registers
+       lctlg   %c0,%c15,0(%r4)
+       larl    %r4,.Lfpctl             # Restore floating point ctl register
+       lfpc    0(%r4)
+       larl    %r4,.Lprefix            # Restore prefix register
+       spx     0(%r4)
+       larl    %r4,.Lcontinue_psw      # Restore PSW flags
+       larl    %r2,.Lcontinue
+       stg     %r2,8(%r4)
+       lpswe   0(%r4)
+.Lcontinue:
+       BR_EX_AMODE31_r14
+ENDPROC(_diag308_reset_amode31)
+
+       .section .amode31.data,"aw",@progbits
+.align 8
+.Lrestart_diag308_psw:
+       .long   0x00080000,0x80000000
+
+.align 8
+.Lcontinue_psw:
+       .quad   0,0
+
+.align 8
+.Lctlreg0:
+       .quad   0
+.Lctlregs:
+       .rept   16
+       .quad   0
+       .endr
+.Lfpctl:
+       .long   0
+.Lprefix:
+       .long   0
+.Lprefix_zero:
+       .long   0
index 26aa261..d2458a2 100644 (file)
@@ -406,7 +406,7 @@ static ssize_t dispatching_store(struct device *dev,
        if (val != 0 && val != 1)
                return -EINVAL;
        rc = 0;
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        if (cpu_management == val)
                goto out;
@@ -417,7 +417,7 @@ static ssize_t dispatching_store(struct device *dev,
        topology_expect_change();
 out:
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return rc ? rc : count;
 }
 static DEVICE_ATTR_RW(dispatching);
index 7694727..bcefc21 100644 (file)
@@ -291,7 +291,7 @@ static void __init test_monitor_call(void)
 
 void __init trap_init(void)
 {
-       sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
+       sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
        local_mcck_enable();
        test_monitor_call();
 }
index aeb0a15..5a656c7 100644 (file)
@@ -51,24 +51,9 @@ void __init setup_uv(void)
 {
        unsigned long uv_stor_base;
 
-       /*
-        * keep these conditions in line with has_uv_sec_stor_limit()
-        */
        if (!is_prot_virt_host())
                return;
 
-       if (is_prot_virt_guest()) {
-               prot_virt_host = 0;
-               pr_warn("Protected virtualization not available in protected guests.");
-               return;
-       }
-
-       if (!test_facility(158)) {
-               prot_virt_host = 0;
-               pr_warn("Protected virtualization not supported by the hardware.");
-               return;
-       }
-
        uv_stor_base = (unsigned long)memblock_alloc_try_nid(
                uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
                MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
index 3457dcf..e3e6ac5 100644 (file)
@@ -36,6 +36,7 @@ CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
index bff50b6..edf5ff1 100644 (file)
@@ -51,6 +51,7 @@ SECTIONS
 
        .rela.dyn ALIGN(8) : { *(.rela.dyn) }
        .got ALIGN(8)   : { *(.got .toc) }
+       .got.plt ALIGN(8) : { *(.got.plt) }
 
        _end = .;
        PROVIDE(end = .);
index 2a2092c..6568de2 100644 (file)
@@ -39,6 +39,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
index d4fb336..4461ea1 100644 (file)
@@ -51,6 +51,7 @@ SECTIONS
 
        .rela.dyn ALIGN(8) : { *(.rela.dyn) }
        .got ALIGN(8)   : { *(.got .toc) }
+       .got.plt ALIGN(8) : { *(.got.plt) }
 
        _end = .;
        PROVIDE(end = .);
index 4c0e191..63bdb9e 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ftrace.lds.h>
 
 /*
  * Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -46,6 +47,7 @@ SECTIONS
                KPROBES_TEXT
                IRQENTRY_TEXT
                SOFTIRQENTRY_TEXT
+               FTRACE_HOTPATCH_TRAMPOLINES_TEXT
                *(.text.*_indirect_*)
                *(.fixup)
                *(.gnu.warning)
@@ -71,6 +73,13 @@ SECTIONS
        RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
        BOOT_DATA_PRESERVED
 
+       . = ALIGN(8);
+       .amode31.refs : {
+               _start_amode31_refs = .;
+               *(.amode31.refs)
+               _end_amode31_refs = .;
+       }
+
        _edata = .;             /* End of data section */
 
        /* will be freed after init */
@@ -136,6 +145,32 @@ SECTIONS
 
        BOOT_DATA
 
+       /*
+        * .amode31 section for code, data, ex_table that need to stay
+        * below 2 GB, even when the kernel is relocated above 2 GB.
+        */
+       . = ALIGN(PAGE_SIZE);
+       _samode31 = .;
+       .amode31.text : {
+               _stext_amode31 = .;
+               *(.amode31.text)
+               *(.amode31.text.*_indirect_*)
+               . = ALIGN(PAGE_SIZE);
+               _etext_amode31 = .;
+       }
+       . = ALIGN(16);
+       .amode31.ex_table : {
+               _start_amode31_ex_table = .;
+               KEEP(*(.amode31.ex_table))
+               _stop_amode31_ex_table = .;
+       }
+       . = ALIGN(PAGE_SIZE);
+       .amode31.data : {
+               *(.amode31.data)
+       }
+       . = ALIGN(PAGE_SIZE);
+       _eamode31 = .;
+
        /* early.c uses stsi, which requires page aligned data. */
        . = ALIGN(PAGE_SIZE);
        INIT_DATA_SECTION(0x100)
index f289afe..bccbf39 100644 (file)
@@ -7,17 +7,10 @@
  *              Heiko Carstens <heiko.carstens@de.ibm.com>,
  */
 
-#include <linux/sched.h>
+#include <linux/processor.h>
 #include <linux/delay.h>
-#include <linux/timex.h>
-#include <linux/export.h>
-#include <linux/irqflags.h>
-#include <linux/interrupt.h>
-#include <linux/jump_label.h>
-#include <linux/irq.h>
-#include <asm/vtimer.h>
 #include <asm/div64.h>
-#include <asm/idle.h>
+#include <asm/timex.h>
 
 void __delay(unsigned long loops)
 {
index e40a306..0b0c8c2 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/mm.h>
+#include <linux/kfence.h>
 #include <linux/kasan.h>
 #include <asm/ptdump.h>
 #include <asm/kasan.h>
@@ -21,6 +22,10 @@ enum address_markers_idx {
        IDENTITY_BEFORE_END_NR,
        KERNEL_START_NR,
        KERNEL_END_NR,
+#ifdef CONFIG_KFENCE
+       KFENCE_START_NR,
+       KFENCE_END_NR,
+#endif
        IDENTITY_AFTER_NR,
        IDENTITY_AFTER_END_NR,
 #ifdef CONFIG_KASAN
@@ -40,6 +45,10 @@ static struct addr_marker address_markers[] = {
        [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
        [KERNEL_START_NR]       = {(unsigned long)_stext, "Kernel Image Start"},
        [KERNEL_END_NR]         = {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KFENCE
+       [KFENCE_START_NR]       = {0, "KFence Pool Start"},
+       [KFENCE_END_NR]         = {0, "KFence Pool End"},
+#endif
        [IDENTITY_AFTER_NR]     = {(unsigned long)_end, "Identity Mapping Start"},
        [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
 #ifdef CONFIG_KASAN
@@ -248,6 +257,9 @@ static void sort_address_markers(void)
 
 static int pt_dump_init(void)
 {
+#ifdef CONFIG_KFENCE
+       unsigned long kfence_start = (unsigned long)__kfence_pool;
+#endif
        /*
         * Figure out the maximum virtual address being accessible with the
         * kernel ASCE. We need this to keep the page table walker functions
@@ -262,6 +274,10 @@ static int pt_dump_init(void)
        address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
        address_markers[VMALLOC_NR].start_address = VMALLOC_START;
        address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+#ifdef CONFIG_KFENCE
+       address_markers[KFENCE_START_NR].start_address = kfence_start;
+       address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
+#endif
        sort_address_markers();
 #ifdef CONFIG_PTDUMP_DEBUGFS
        debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
index e33c43b..212632d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
 #include <linux/hugetlb.h>
+#include <linux/kfence.h>
 #include <asm/asm-offsets.h>
 #include <asm/diag.h>
 #include <asm/gmap.h>
@@ -230,8 +231,8 @@ const struct exception_table_entry *s390_search_extables(unsigned long addr)
 {
        const struct exception_table_entry *fixup;
 
-       fixup = search_extable(__start_dma_ex_table,
-                              __stop_dma_ex_table - __start_dma_ex_table,
+       fixup = search_extable(__start_amode31_ex_table,
+                              __stop_amode31_ex_table - __start_amode31_ex_table,
                               addr);
        if (!fixup)
                fixup = search_exception_tables(addr);
@@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
        unsigned long address;
        unsigned int flags;
        vm_fault_t fault;
+       bool is_write;
 
        tsk = current;
        /*
@@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
 
        mm = tsk->mm;
        trans_exc_code = regs->int_parm_long;
+       address = trans_exc_code & __FAIL_ADDR_MASK;
+       is_write = (trans_exc_code & store_indication) == 0x400;
 
        /*
         * Verify that the fault happened in user space, that
@@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
        type = get_fault_type(regs);
        switch (type) {
        case KERNEL_FAULT:
+               if (kfence_handle_page_fault(address, is_write, regs))
+                       return 0;
                goto out;
        case USER_FAULT:
        case GMAP_FAULT:
@@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
                break;
        }
 
-       address = trans_exc_code & __FAIL_ADDR_MASK;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        flags = FAULT_FLAG_DEFAULT;
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
-       if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+       if (access == VM_WRITE || is_write)
                flags |= FAULT_FLAG_WRITE;
        mmap_read_lock(mm);
 
index 8ac710d..f3db3ca 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 #include <asm/ptdump.h>
 #include <asm/dma.h>
 #include <asm/lowcore.h>
@@ -200,7 +201,7 @@ void __init mem_init(void)
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
        pv_init();
-
+       kfence_split_mapping();
        /* Setup guest page hinting */
        cmma_init();
 
index a0fdc6d..3e47351 100644 (file)
@@ -107,6 +107,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
                sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
        }
 
+       /*
+        * The first 1MB of 1:1 mapping is mapped with 4KB pages
+        */
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
@@ -157,30 +160,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
 
                pm_dir = pmd_offset(pu_dir, address);
                if (pmd_none(*pm_dir)) {
-                       if (mode == POPULATE_ZERO_SHADOW &&
-                           IS_ALIGNED(address, PMD_SIZE) &&
+                       if (IS_ALIGNED(address, PMD_SIZE) &&
                            end - address >= PMD_SIZE) {
-                               pmd_populate(&init_mm, pm_dir,
-                                               kasan_early_shadow_pte);
-                               address = (address + PMD_SIZE) & PMD_MASK;
-                               continue;
-                       }
-                       /* the first megabyte of 1:1 is mapped with 4k pages */
-                       if (has_edat && address && end - address >= PMD_SIZE &&
-                           mode != POPULATE_ZERO_SHADOW) {
-                               void *page;
-
-                               if (mode == POPULATE_ONE2ONE) {
-                                       page = (void *)address;
-                               } else {
-                                       page = kasan_early_alloc_segment();
-                                       memset(page, 0, _SEGMENT_SIZE);
+                               if (mode == POPULATE_ZERO_SHADOW) {
+                                       pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
+                                       address = (address + PMD_SIZE) & PMD_MASK;
+                                       continue;
+                               } else if (has_edat && address) {
+                                       void *page;
+
+                                       if (mode == POPULATE_ONE2ONE) {
+                                               page = (void *)address;
+                                       } else {
+                                               page = kasan_early_alloc_segment();
+                                               memset(page, 0, _SEGMENT_SIZE);
+                                       }
+                                       pmd_val(*pm_dir) = __pa(page) | sgt_prot;
+                                       address = (address + PMD_SIZE) & PMD_MASK;
+                                       continue;
                                }
-                               pmd_val(*pm_dir) = __pa(page) | sgt_prot;
-                               address = (address + PMD_SIZE) & PMD_MASK;
-                               continue;
                        }
-
                        pt_dir = kasan_early_pte_alloc();
                        pmd_populate(&init_mm, pm_dir, pt_dir);
                } else if (pmd_large(*pm_dir)) {
@@ -300,7 +299,7 @@ void __init kasan_early_init(void)
        pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
        if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
                initrd_end =
-                   round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
+                   round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
                pgalloc_low = max(pgalloc_low, initrd_end);
        }
 
index a0f54bd..9663ce3 100644 (file)
@@ -228,7 +228,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
        void *bounce = (void *) addr;
        unsigned long size;
 
-       get_online_cpus();
+       cpus_read_lock();
        preempt_disable();
        if (is_swapped(addr)) {
                size = PAGE_SIZE - (addr & ~PAGE_MASK);
@@ -237,7 +237,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
                        memcpy_absolute(bounce, (void *) addr, size);
        }
        preempt_enable();
-       put_online_cpus();
+       cpus_read_unlock();
        return bounce;
 }
 
index 68b1530..18a6381 100644 (file)
@@ -228,46 +228,3 @@ void arch_set_page_dat(struct page *page, int order)
                return;
        set_page_stable_dat(page, order);
 }
-
-void arch_set_page_nodat(struct page *page, int order)
-{
-       if (cmma_flag < 2)
-               return;
-       set_page_stable_nodat(page, order);
-}
-
-int arch_test_page_nodat(struct page *page)
-{
-       unsigned char state;
-
-       if (cmma_flag < 2)
-               return 0;
-       state = get_page_state(page);
-       return !!(state & 0x20);
-}
-
-void arch_set_page_states(int make_stable)
-{
-       unsigned long flags, order, t;
-       struct list_head *l;
-       struct page *page;
-       struct zone *zone;
-
-       if (!cmma_flag)
-               return;
-       if (make_stable)
-               drain_local_pages(NULL);
-       for_each_populated_zone(zone) {
-               spin_lock_irqsave(&zone->lock, flags);
-               for_each_migratetype_order(order, t) {
-                       list_for_each(l, &zone->free_area[order].free_list[t]) {
-                               page = list_entry(l, struct page, lru);
-                               if (make_stable)
-                                       set_page_stable_dat(page, order);
-                               else
-                                       set_page_unused(page, order);
-                       }
-               }
-               spin_unlock_irqrestore(&zone->lock, flags);
-       }
-}
index ed8e5b3..fdc86c0 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/cacheflush.h>
 #include <asm/facility.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 #include <asm/page.h>
 #include <asm/set_memory.h>
 
@@ -85,6 +86,8 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
 {
        pte_t *ptep, new;
 
+       if (flags == SET_MEMORY_4K)
+               return 0;
        ptep = pte_offset_kernel(pmdp, addr);
        do {
                new = *ptep;
@@ -155,6 +158,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
                          unsigned long flags)
 {
        unsigned long next;
+       int need_split;
        pmd_t *pmdp;
        int rc = 0;
 
@@ -164,7 +168,10 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
                        return -EINVAL;
                next = pmd_addr_end(addr, end);
                if (pmd_large(*pmdp)) {
-                       if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
+                       need_split  = !!(flags & SET_MEMORY_4K);
+                       need_split |= !!(addr & ~PMD_MASK);
+                       need_split |= !!(addr + PMD_SIZE > next);
+                       if (need_split) {
                                rc = split_pmd_page(pmdp, addr);
                                if (rc)
                                        return rc;
@@ -232,6 +239,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
                          unsigned long flags)
 {
        unsigned long next;
+       int need_split;
        pud_t *pudp;
        int rc = 0;
 
@@ -241,7 +249,10 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
                        return -EINVAL;
                next = pud_addr_end(addr, end);
                if (pud_large(*pudp)) {
-                       if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
+                       need_split  = !!(flags & SET_MEMORY_4K);
+                       need_split |= !!(addr & ~PUD_MASK);
+                       need_split |= !!(addr + PUD_SIZE > next);
+                       if (need_split) {
                                rc = split_pud_page(pudp, addr);
                                if (rc)
                                        break;
@@ -316,7 +327,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
        return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 
 static void ipte_range(pte_t *pte, unsigned long address, int nr)
 {
@@ -340,7 +351,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
        pte_t *pte;
 
        for (i = 0; i < numpages;) {
-               address = page_to_phys(page + i);
+               address = (unsigned long)page_to_virt(page + i);
                pte = virt_to_kpte(address);
                nr = (unsigned long)pte >> ilog2(sizeof(long));
                nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
index 96897fa..2b1c6d9 100644 (file)
@@ -581,7 +581,7 @@ void __init vmem_map_init(void)
        __set_memory((unsigned long)_sinittext,
                     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
                     SET_MEMORY_RO | SET_MEMORY_X);
-       __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+       __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
                     SET_MEMORY_RO | SET_MEMORY_X);
 
        /* we need lowcore executable for our LPSWE instructions */
index b0993e0..e7e6788 100644 (file)
@@ -113,13 +113,16 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
        struct zpci_fib fib = {0};
-       u8 status;
+       u8 cc, status;
 
        WARN_ON_ONCE(iota & 0x3fff);
        fib.pba = base;
        fib.pal = limit;
        fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
-       return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+       cc = zpci_mod_fc(req, &fib, &status);
+       if (cc)
+               zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
+       return cc;
 }
 
 /* Modify PCI: Unregister I/O address translation parameters */
@@ -130,9 +133,9 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
        u8 cc, status;
 
        cc = zpci_mod_fc(req, &fib, &status);
-       if (cc == 3) /* Function already gone. */
-               cc = 0;
-       return cc ? -EIO : 0;
+       if (cc)
+               zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
+       return cc;
 }
 
 /* Modify PCI: Set PCI function measurement parameters */
@@ -560,9 +563,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
 
 int pcibios_add_device(struct pci_dev *pdev)
 {
+       struct zpci_dev *zdev = to_zpci(pdev);
        struct resource *res;
        int i;
 
+       /* The pdev has a reference to the zdev via its bus */
+       zpci_zdev_get(zdev);
        if (pdev->is_physfn)
                pdev->no_vf_scan = 1;
 
@@ -582,7 +588,10 @@ int pcibios_add_device(struct pci_dev *pdev)
 
 void pcibios_release_device(struct pci_dev *pdev)
 {
+       struct zpci_dev *zdev = to_zpci(pdev);
+
        zpci_unmap_resources(pdev);
+       zpci_zdev_put(zdev);
 }
 
 int pcibios_enable_device(struct pci_dev *pdev, int mask)
@@ -653,32 +662,37 @@ void zpci_free_domain(int domain)
 
 int zpci_enable_device(struct zpci_dev *zdev)
 {
-       int rc;
-
-       rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
-       if (rc)
-               goto out;
-
-       rc = zpci_dma_init_device(zdev);
-       if (rc)
-               goto out_dma;
+       u32 fh = zdev->fh;
+       int rc = 0;
 
-       return 0;
-
-out_dma:
-       clp_disable_fh(zdev);
-out:
+       if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
+               rc = -EIO;
+       else
+               zdev->fh = fh;
        return rc;
 }
 
 int zpci_disable_device(struct zpci_dev *zdev)
 {
-       zpci_dma_exit_device(zdev);
-       /*
-        * The zPCI function may already be disabled by the platform, this is
-        * detected in clp_disable_fh() which becomes a no-op.
-        */
-       return clp_disable_fh(zdev);
+       u32 fh = zdev->fh;
+       int cc, rc = 0;
+
+       cc = clp_disable_fh(zdev, &fh);
+       if (!cc) {
+               zdev->fh = fh;
+       } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
+               pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
+                       zdev->fid);
+               /* Function is already disabled - update handle */
+               rc = clp_refresh_fh(zdev->fid, &fh);
+               if (!rc) {
+                       zdev->fh = fh;
+                       rc = -EINVAL;
+               }
+       } else {
+               rc = -EIO;
+       }
+       return rc;
 }
 
 /**
@@ -788,6 +802,11 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
+       if (zdev->dma_table) {
+               rc = zpci_dma_exit_device(zdev);
+               if (rc)
+                       return rc;
+       }
        if (zdev_enabled(zdev)) {
                rc = zpci_disable_device(zdev);
                if (rc)
@@ -811,6 +830,8 @@ void zpci_release_device(struct kref *kref)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
+       if (zdev->dma_table)
+               zpci_dma_exit_device(zdev);
        if (zdev_enabled(zdev))
                zpci_disable_device(zdev);
 
@@ -822,7 +843,8 @@ void zpci_release_device(struct kref *kref)
        case ZPCI_FN_STATE_STANDBY:
                if (zdev->has_hp_slot)
                        zpci_exit_slot(zdev);
-               zpci_cleanup_bus_resources(zdev);
+               if (zdev->has_resources)
+                       zpci_cleanup_bus_resources(zdev);
                zpci_bus_device_unregister(zdev);
                zpci_destroy_iommu(zdev);
                fallthrough;
@@ -886,7 +908,6 @@ static void zpci_mem_exit(void)
 }
 
 static unsigned int s390_pci_probe __initdata = 1;
-static unsigned int s390_pci_no_mio __initdata;
 unsigned int s390_pci_force_floating __initdata;
 static unsigned int s390_pci_initialized;
 
@@ -897,7 +918,7 @@ char * __init pcibios_setup(char *str)
                return NULL;
        }
        if (!strcmp(str, "nomio")) {
-               s390_pci_no_mio = 1;
+               S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
                return NULL;
        }
        if (!strcmp(str, "force_floating")) {
@@ -928,7 +949,7 @@ static int __init pci_base_init(void)
                return 0;
        }
 
-       if (test_facility(153) && !s390_pci_no_mio) {
+       if (MACHINE_HAS_PCI_MIO) {
                static_branch_enable(&have_mio);
                ctl_set_bit(2, 5);
        }
index 9629f97..5d77acb 100644 (file)
@@ -49,6 +49,11 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
                rc = zpci_enable_device(zdev);
                if (rc)
                        return rc;
+               rc = zpci_dma_init_device(zdev);
+               if (rc) {
+                       zpci_disable_device(zdev);
+                       return rc;
+               }
        }
 
        if (!zdev->has_resources) {
@@ -343,11 +348,11 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
 {
        int rc = -EINVAL;
 
-       zdev->zbus = zbus;
        if (zbus->function[zdev->devfn]) {
                pr_err("devfn %04x is already assigned\n", zdev->devfn);
                return rc;
        }
+       zdev->zbus = zbus;
        zbus->function[zdev->devfn] = zdev;
        zpci_nb_devices++;
 
@@ -367,6 +372,7 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
 
 error:
        zbus->function[zdev->devfn] = NULL;
+       zdev->zbus = NULL;
        zpci_nb_devices--;
        return rc;
 }
index b877a97..e359d26 100644 (file)
@@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev)
        kref_put(&zdev->kref, zpci_release_device);
 }
 
+static inline void zpci_zdev_get(struct zpci_dev *zdev)
+{
+       kref_get(&zdev->kref);
+}
+
 int zpci_alloc_domain(int domain);
 void zpci_free_domain(int domain);
 int zpci_setup_bus_resources(struct zpci_dev *zdev,
index d333159..51dc221 100644 (file)
@@ -212,17 +212,22 @@ out:
        return rc;
 }
 
-static int clp_refresh_fh(u32 fid);
-/*
- * Enable/Disable a given PCI function and update its function handle if
- * necessary
+/**
+ * clp_set_pci_fn() - Execute a command on a PCI function
+ * @zdev: Function that will be affected
+ * @fh: Out parameter for updated function handle
+ * @nr_dma_as: DMA address space number
+ * @command: The command code to execute
+ *
+ * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
+ * > 0 for non-success platform responses
  */
-static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
+static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
 {
        struct clp_req_rsp_set_pci *rrb;
        int rc, retries = 100;
-       u32 fid = zdev->fid;
 
+       *fh = 0;
        rrb = clp_alloc_block(GFP_KERNEL);
        if (!rrb)
                return -ENOMEM;
@@ -245,17 +250,13 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
                }
        } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
 
-       if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+       if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+               *fh = rrb->response.fh;
+       } else {
                zpci_err("Set PCI FN:\n");
                zpci_err_clp(rrb->response.hdr.rsp, rc);
-       }
-
-       if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
-               zdev->fh = rrb->response.fh;
-       } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
-                       rrb->response.fh == 0) {
-               /* Function is already in desired state - update handle */
-               rc = clp_refresh_fh(fid);
+               if (!rc)
+                       rc = rrb->response.hdr.rsp;
        }
        clp_free_block(rrb);
        return rc;
@@ -295,35 +296,62 @@ int clp_setup_writeback_mio(void)
        return rc;
 }
 
-int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
+int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
 {
        int rc;
 
-       rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
-       zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
-       if (rc)
-               goto out;
-
-       if (zpci_use_mio(zdev)) {
-               rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
+       rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
+       zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
+       if (!rc && zpci_use_mio(zdev)) {
+               rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
                zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
-                               zdev->fid, zdev->fh, rc);
+                               zdev->fid, *fh, rc);
                if (rc)
-                       clp_disable_fh(zdev);
+                       clp_disable_fh(zdev, fh);
        }
-out:
        return rc;
 }
 
-int clp_disable_fh(struct zpci_dev *zdev)
+int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
 {
        int rc;
 
        if (!zdev_enabled(zdev))
                return 0;
 
-       rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
-       zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+       rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
+       zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
+       return rc;
+}
+
+static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
+                           u64 *resume_token, int *nentries)
+{
+       int rc;
+
+       memset(rrb, 0, sizeof(*rrb));
+       rrb->request.hdr.len = sizeof(rrb->request);
+       rrb->request.hdr.cmd = CLP_LIST_PCI;
+       /* store as many entries as possible */
+       rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
+       rrb->request.resume_token = *resume_token;
+
+       /* Get PCI function handle list */
+       rc = clp_req(rrb, CLP_LPS_PCI);
+       if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+               zpci_err("List PCI FN:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
+               return -EIO;
+       }
+
+       update_uid_checking(rrb->response.uid_checking);
+       WARN_ON_ONCE(rrb->response.entry_size !=
+               sizeof(struct clp_fh_list_entry));
+
+       *nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
+               rrb->response.entry_size;
+       *resume_token = rrb->response.resume_token;
+
        return rc;
 }
 
@@ -331,38 +359,40 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
                        void (*cb)(struct clp_fh_list_entry *, void *))
 {
        u64 resume_token = 0;
-       int entries, i, rc;
+       int nentries, i, rc;
 
        do {
-               memset(rrb, 0, sizeof(*rrb));
-               rrb->request.hdr.len = sizeof(rrb->request);
-               rrb->request.hdr.cmd = CLP_LIST_PCI;
-               /* store as many entries as possible */
-               rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
-               rrb->request.resume_token = resume_token;
-
-               /* Get PCI function handle list */
-               rc = clp_req(rrb, CLP_LPS_PCI);
-               if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
-                       zpci_err("List PCI FN:\n");
-                       zpci_err_clp(rrb->response.hdr.rsp, rc);
-                       rc = -EIO;
-                       goto out;
-               }
+               rc = clp_list_pci_req(rrb, &resume_token, &nentries);
+               if (rc)
+                       return rc;
+               for (i = 0; i < nentries; i++)
+                       cb(&rrb->response.fh_list[i], data);
+       } while (resume_token);
 
-               update_uid_checking(rrb->response.uid_checking);
-               WARN_ON_ONCE(rrb->response.entry_size !=
-                       sizeof(struct clp_fh_list_entry));
+       return rc;
+}
 
-               entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
-                       rrb->response.entry_size;
+static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
+                       struct clp_fh_list_entry *entry)
+{
+       struct clp_fh_list_entry *fh_list;
+       u64 resume_token = 0;
+       int nentries, i, rc;
 
-               resume_token = rrb->response.resume_token;
-               for (i = 0; i < entries; i++)
-                       cb(&rrb->response.fh_list[i], data);
+       do {
+               rc = clp_list_pci_req(rrb, &resume_token, &nentries);
+               if (rc)
+                       return rc;
+               for (i = 0; i < nentries; i++) {
+                       fh_list = rrb->response.fh_list;
+                       if (fh_list[i].fid == fid) {
+                               *entry = fh_list[i];
+                               return 0;
+                       }
+               }
        } while (resume_token);
-out:
-       return rc;
+
+       return -ENODEV;
 }
 
 static void __clp_add(struct clp_fh_list_entry *entry, void *data)
@@ -392,67 +422,41 @@ int clp_scan_pci_devices(void)
        return rc;
 }
 
-static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data)
-{
-       struct zpci_dev *zdev;
-       u32 fid = *((u32 *)data);
-
-       if (!entry->vendor_id || fid != entry->fid)
-               return;
-
-       zdev = get_zdev_by_fid(fid);
-       if (!zdev)
-               return;
-
-       zdev->fh = entry->fh;
-}
-
 /*
- * Refresh the function handle of the function matching @fid
+ * Get the current function handle of the function matching @fid
  */
-static int clp_refresh_fh(u32 fid)
+int clp_refresh_fh(u32 fid, u32 *fh)
 {
        struct clp_req_rsp_list_pci *rrb;
+       struct clp_fh_list_entry entry;
        int rc;
 
        rrb = clp_alloc_block(GFP_NOWAIT);
        if (!rrb)
                return -ENOMEM;
 
-       rc = clp_list_pci(rrb, &fid, __clp_refresh_fh);
+       rc = clp_find_pci(rrb, fid, &entry);
+       if (!rc)
+               *fh = entry.fh;
 
        clp_free_block(rrb);
        return rc;
 }
 
-struct clp_state_data {
-       u32 fid;
-       enum zpci_state state;
-};
-
-static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
-{
-       struct clp_state_data *sd = data;
-
-       if (entry->fid != sd->fid)
-               return;
-
-       sd->state = entry->config_state;
-}
-
 int clp_get_state(u32 fid, enum zpci_state *state)
 {
        struct clp_req_rsp_list_pci *rrb;
-       struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
+       struct clp_fh_list_entry entry;
        int rc;
 
+       *state = ZPCI_FN_STATE_RESERVED;
        rrb = clp_alloc_block(GFP_ATOMIC);
        if (!rrb)
                return -ENOMEM;
 
-       rc = clp_list_pci(rrb, &sd, __clp_get_state);
+       rc = clp_find_pci(rrb, fid, &entry);
        if (!rc)
-               *state = sd.state;
+               *state = entry.config_state;
 
        clp_free_block(rrb);
        return rc;
index ebc9a49..58f2f7a 100644 (file)
@@ -590,10 +590,11 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
                }
 
        }
-       rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
-                               (u64) zdev->dma_table);
-       if (rc)
+       if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+                              (u64)zdev->dma_table)) {
+               rc = -EIO;
                goto free_bitmap;
+       }
 
        return 0;
 free_bitmap:
@@ -608,17 +609,25 @@ out:
        return rc;
 }
 
-void zpci_dma_exit_device(struct zpci_dev *zdev)
+int zpci_dma_exit_device(struct zpci_dev *zdev)
 {
+       int cc = 0;
+
        /*
         * At this point, if the device is part of an IOMMU domain, this would
         * be a strong hint towards a bug in the IOMMU API (common) code and/or
         * simultaneous access via IOMMU and DMA API. So let's issue a warning.
         */
        WARN_ON(zdev->s390_domain);
-
-       if (zpci_unregister_ioat(zdev, 0))
-               return;
+       if (zdev_enabled(zdev))
+               cc = zpci_unregister_ioat(zdev, 0);
+       /*
+        * cc == 3 indicates the function is gone already. This can happen
+        * if the function was deconfigured/disabled suddenly and we have not
+        * received a new handle yet.
+        */
+       if (cc && cc != 3)
+               return -EIO;
 
        dma_cleanup_tables(zdev->dma_table);
        zdev->dma_table = NULL;
@@ -626,8 +635,8 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
        zdev->iommu_bitmap = NULL;
        vfree(zdev->lazy_bitmap);
        zdev->lazy_bitmap = NULL;
-
        zdev->next_bit = 0;
+       return 0;
 }
 
 static int __init dma_alloc_cpu_table_caches(void)
index cd447b9..c856f80 100644 (file)
@@ -84,7 +84,10 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
        /* Even though the device is already gone we still
         * need to free zPCI resources as part of the disable.
         */
-       zpci_disable_device(zdev);
+       if (zdev->dma_table)
+               zpci_dma_exit_device(zdev);
+       if (zdev_enabled(zdev))
+               zpci_disable_device(zdev);
        zdev->state = ZPCI_FN_STATE_STANDBY;
 }
 
index 9c7de90..3823e15 100644 (file)
@@ -365,10 +365,6 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
        for_each_pci_msi_entry(msi, pdev) {
                if (!msi->irq)
                        continue;
-               if (msi->msi_attrib.is_msix)
-                       __pci_msix_desc_mask_irq(msi, 1);
-               else
-                       __pci_msi_desc_mask_irq(msi, 1, 1);
                irq_set_msi_desc(msi->irq, NULL);
                irq_free_desc(msi->irq);
                msi->msg.address_lo = 0;
index 6e2450c..335c281 100644 (file)
@@ -82,13 +82,26 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
        pci_lock_rescan_remove();
        if (pci_dev_is_added(pdev)) {
                pci_stop_and_remove_bus_device(pdev);
-               ret = zpci_disable_device(zdev);
-               if (ret)
-                       goto out;
+               if (zdev->dma_table) {
+                       ret = zpci_dma_exit_device(zdev);
+                       if (ret)
+                               goto out;
+               }
+
+               if (zdev_enabled(zdev)) {
+                       ret = zpci_disable_device(zdev);
+                       if (ret)
+                               goto out;
+               }
 
                ret = zpci_enable_device(zdev);
                if (ret)
                        goto out;
+               ret = zpci_dma_init_device(zdev);
+               if (ret) {
+                       zpci_disable_device(zdev);
+                       goto out;
+               }
                pci_rescan_bus(zdev->zbus->bus);
        }
 out:
index 21c4ebe..360ada8 100644 (file)
@@ -19,6 +19,7 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
index 0e207c4..6db9820 100644 (file)
@@ -189,6 +189,8 @@ ad  stosm   SI_URD
 ae     sigp    RS_RRRD
 af     mc      SI_URD
 b1     lra     RX_RRRD
+b200   lbear   S_RD
+b201   stbear  S_RD
 b202   stidp   S_RD
 b204   sck     S_RD
 b205   stck    S_RD
@@ -523,6 +525,7 @@ b931        clgfr   RRE_RR
 b938   sortl   RRE_RR
 b939   dfltcc  RRF_R0RR2
 b93a   kdsa    RRE_RR
+b93b   nnpa    RRE_00
 b93c   ppno    RRE_RR
 b93e   kimd    RRE_RR
 b93f   klmd    RRE_RR
@@ -562,6 +565,7 @@ b987        dlgr    RRE_RR
 b988   alcgr   RRE_RR
 b989   slbgr   RRE_RR
 b98a   cspg    RRE_RR
+b98b   rdp     RRF_RURR2
 b98d   epsw    RRE_RR
 b98e   idte    RRF_RURR2
 b98f   crdte   RRF_RURR2
@@ -876,19 +880,32 @@ e63d      vstrl   VSI_URDV
 e63f   vstrlr  VRS_RRDV
 e649   vlip    VRI_V0UU2
 e650   vcvb    VRR_RV0UU
+e651   vclzdp  VRR_VV0U2
 e652   vcvbg   VRR_RV0UU
+e654   vupkzh  VRR_VV0U2
+e655   vcnf    VRR_VV0UU2
+e656   vclfnh  VRR_VV0UU2
 e658   vcvd    VRI_VR0UU
 e659   vsrp    VRI_VVUUU2
 e65a   vcvdg   VRI_VR0UU
 e65b   vpsop   VRI_VVUUU2
+e65c   vupkzl  VRR_VV0U2
+e65d   vcfn    VRR_VV0UU2
+e65e   vclfnl  VRR_VV0UU2
 e65f   vtp     VRR_0V
+e670   vpkzr   VRI_VVV0UU2
 e671   vap     VRI_VVV0UU2
+e672   vsrpr   VRI_VVV0UU2
 e673   vsp     VRI_VVV0UU2
+e674   vschp   VRR_VVV0U0U
+e675   vcrnf   VRR_VVV0UU
 e677   vcp     VRR_0VV0U
 e678   vmp     VRI_VVV0UU2
 e679   vmsp    VRI_VVV0UU2
 e67a   vdp     VRI_VVV0UU2
 e67b   vrp     VRI_VVV0UU2
+e67c   vscshp  VRR_VVV
+e67d   vcsph   VRR_VVV0U0
 e67e   vsdp    VRI_VVV0UU2
 e700   vleb    VRX_VRRDU
 e701   vleh    VRX_VRRDU
@@ -1081,6 +1098,7 @@ eb61      stric   RSY_RDRU
 eb62   mric    RSY_RDRU
 eb6a   asi     SIY_IRD
 eb6e   alsi    SIY_IRD
+eb71   lpswey  SIY_URD
 eb7a   agsi    SIY_IRD
 eb7e   algsi   SIY_IRD
 eb80   icmh    RSY_RURD
index 1aedbfe..f9f3b14 100644 (file)
@@ -38,7 +38,7 @@ static void se7343_irq_demux(struct irq_desc *desc)
        mask = ioread16(se7343_irq_regs + PA_CPLD_ST_REG);
 
        for_each_set_bit(bit, &mask, SE7343_FPGA_IRQ_NR)
-               generic_handle_irq(irq_linear_revmap(se7343_irq_domain, bit));
+               generic_handle_domain_irq(se7343_irq_domain, bit);
 
        chip->irq_unmask(data);
 }
index 6d34592..efa96ed 100644 (file)
@@ -37,7 +37,7 @@ static void se7722_irq_demux(struct irq_desc *desc)
        mask = ioread16(se7722_irq_regs + IRQ01_STS_REG);
 
        for_each_set_bit(bit, &mask, SE7722_FPGA_IRQ_NR)
-               generic_handle_irq(irq_linear_revmap(se7722_irq_domain, bit));
+               generic_handle_domain_irq(se7722_irq_domain, bit);
 
        chip->irq_unmask(data);
 }
index efc992f..f82d3a6 100644 (file)
@@ -68,7 +68,7 @@ static void x3proto_gpio_irq_handler(struct irq_desc *desc)
 
        mask = __raw_readw(KEYDETR);
        for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS)
-               generic_handle_irq(irq_linear_revmap(x3proto_irq_domain, pin));
+               generic_handle_domain_irq(x3proto_irq_domain, pin);
 
        chip->irq_unmask(data);
 }
index e497185..cd9dc05 100644 (file)
@@ -1268,8 +1268,7 @@ static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
                rq_for_each_segment(bvec, req, iter) {
                        BUG_ON(i >= io_req->desc_cnt);
 
-                       io_req->io_desc[i].buffer =
-                               page_address(bvec.bv_page) + bvec.bv_offset;
+                       io_req->io_desc[i].buffer = bvec_virt(&bvec);
                        io_req->io_desc[i].length = bvec.bv_len;
                        i++;
                }
index 88fb922..421fa9e 100644 (file)
@@ -119,6 +119,7 @@ config X86
        select ARCH_WANT_HUGE_PMD_SHARE
        select ARCH_WANT_LD_ORPHAN_WARN
        select ARCH_WANTS_THP_SWAP              if X86_64
+       select ARCH_HAS_PARANOID_L1D_FLUSH
        select BUILDTIME_TABLE_SORT
        select CLKEVT_I8253
        select CLOCKSOURCE_VALIDATE_LAST_CYCLE
index 307fd00..d82d014 100644 (file)
@@ -31,8 +31,8 @@ REALMODE_CFLAGS       := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
 
 REALMODE_CFLAGS += -ffreestanding
 REALMODE_CFLAGS += -fno-stack-protector
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
+REALMODE_CFLAGS += -Wno-address-of-packed-member
+REALMODE_CFLAGS += $(cc_stack_align4)
 REALMODE_CFLAGS += $(CLANG_FLAGS)
 export REALMODE_CFLAGS
 
@@ -48,8 +48,7 @@ export BITS
 #
 #    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
 #
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
-KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
 
 # Intel CET isn't enabled in the kernel
 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
@@ -59,9 +58,8 @@ ifeq ($(CONFIG_X86_32),y)
         UTS_MACHINE := i386
         CHECKFLAGS += -D__i386__
 
-        biarch := $(call cc-option,-m32)
-        KBUILD_AFLAGS += $(biarch)
-        KBUILD_CFLAGS += $(biarch)
+        KBUILD_AFLAGS += -m32
+        KBUILD_CFLAGS += -m32
 
         KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
 
@@ -72,7 +70,7 @@ ifeq ($(CONFIG_X86_32),y)
         # Align the stack to the register width instead of using the default
         # alignment of 16 bytes. This reduces stack usage and the number of
         # alignment instructions.
-        KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
+        KBUILD_CFLAGS += $(cc_stack_align4)
 
         # CPU-specific tuning. Anything which can be shared with UML should go here.
         include arch/x86/Makefile_32.cpu
@@ -93,7 +91,6 @@ else
         UTS_MACHINE := x86_64
         CHECKFLAGS += -D__x86_64__
 
-        biarch := -m64
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
@@ -104,7 +101,7 @@ else
         KBUILD_CFLAGS += $(call cc-option,-falign-loops=1)
 
         # Don't autogenerate traditional x87 instructions
-        KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+        KBUILD_CFLAGS += -mno-80387
         KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
 
         # By default gcc and clang use a stack alignment of 16 bytes for x86.
@@ -114,20 +111,17 @@ else
         # default alignment which keep the stack *mis*aligned.
         # Furthermore an alignment to the register width reduces stack usage
         # and the number of alignment instructions.
-        KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
+        KBUILD_CFLAGS += $(cc_stack_align8)
 
        # Use -mskip-rax-setup if supported.
        KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
 
         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-        cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-        cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
-
-        cflags-$(CONFIG_MCORE2) += \
-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
-       cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
-               $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-        cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+        cflags-$(CONFIG_MK8)           += -march=k8
+        cflags-$(CONFIG_MPSC)          += -march=nocona
+        cflags-$(CONFIG_MCORE2)                += -march=core2
+        cflags-$(CONFIG_MATOM)         += -march=atom
+        cflags-$(CONFIG_GENERIC_CPU)   += -mtune=generic
         KBUILD_CFLAGS += $(cflags-y)
 
         KBUILD_CFLAGS += -mno-red-zone
@@ -158,18 +152,6 @@ export CONFIG_X86_X32_ABI
 ifdef CONFIG_FUNCTION_GRAPH_TRACER
   ifndef CONFIG_HAVE_FENTRY
        ACCUMULATE_OUTGOING_ARGS := 1
-  else
-    ifeq ($(call cc-option-yn, -mfentry), n)
-       ACCUMULATE_OUTGOING_ARGS := 1
-
-       # GCC ignores '-maccumulate-outgoing-args' when used with '-Os'.
-       # If '-Os' is enabled, disable it and print a warning.
-        ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-          undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
-          $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
-        endif
-
-    endif
   endif
 endif
 
@@ -193,7 +175,7 @@ ifdef CONFIG_RETPOLINE
   # only been fixed starting from gcc stable version 8.4.0 and
   # onwards, but not for older ones. See gcc bug #86952.
   ifndef CONFIG_CC_IS_CLANG
-    KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+    KBUILD_CFLAGS += -fno-jump-tables
   endif
 endif
 
@@ -275,9 +257,10 @@ endif
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $@
 
-PHONY += install bzlilo
-install bzlilo:
-       $(Q)$(MAKE) $(build)=$(boot) $@
+PHONY += install
+install:
+       $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \
+               $(KBUILD_IMAGE) System.map "$(INSTALL_PATH)"
 
 PHONY += vdso_install
 vdso_install:
index dfbc26a..b5aecb5 100644 (file)
@@ -133,7 +133,7 @@ quiet_cmd_genimage = GENIMAGE $3
 cmd_genimage = $(BASH) $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
                $(obj)/mtools.conf '$(FDARGS)' $(FDINITRD)
 
-PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage install
+PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage
 
 # This requires write access to /dev/fd0
 # All images require syslinux to be installed; hdimage also requires
@@ -156,8 +156,3 @@ hdimage: $(imgdeps)
 isoimage: $(imgdeps)
        $(call cmd,genimage,isoimage,$(obj)/image.iso)
        @$(kecho) 'Kernel: $(obj)/image.iso is ready'
-
-install:
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh \
-               $(KERNELRELEASE) $(obj)/bzImage \
-               System.map "$(INSTALL_PATH)"
index 95a223b..8bb92e9 100644 (file)
@@ -5,9 +5,8 @@
  * Early support for invoking 32-bit EFI services from a 64-bit kernel.
  *
  * Because this thunking occurs before ExitBootServices() we have to
- * restore the firmware's 32-bit GDT before we make EFI service calls,
- * since the firmware's 32-bit IDT is still currently installed and it
- * needs to be able to service interrupts.
+ * restore the firmware's 32-bit GDT and IDT before we make EFI service
+ * calls.
  *
  * On the plus side, we don't have to worry about mangling 64-bit
  * addresses into 32-bits because we're executing with an identity
@@ -39,7 +38,7 @@ SYM_FUNC_START(__efi64_thunk)
        /*
         * Convert x86-64 ABI params to i386 ABI
         */
-       subq    $32, %rsp
+       subq    $64, %rsp
        movl    %esi, 0x0(%rsp)
        movl    %edx, 0x4(%rsp)
        movl    %ecx, 0x8(%rsp)
@@ -49,14 +48,19 @@ SYM_FUNC_START(__efi64_thunk)
        leaq    0x14(%rsp), %rbx
        sgdt    (%rbx)
 
+       addq    $16, %rbx
+       sidt    (%rbx)
+
        /*
-        * Switch to gdt with 32-bit segments. This is the firmware GDT
-        * that was installed when the kernel started executing. This
-        * pointer was saved at the EFI stub entry point in head_64.S.
+        * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
+        * and IDT that was installed when the kernel started executing. The
+        * pointers were saved at the EFI stub entry point in head_64.S.
         *
         * Pass the saved DS selector to the 32-bit code, and use far return to
         * restore the saved CS selector.
         */
+       leaq    efi32_boot_idt(%rip), %rax
+       lidt    (%rax)
        leaq    efi32_boot_gdt(%rip), %rax
        lgdt    (%rax)
 
@@ -67,7 +71,7 @@ SYM_FUNC_START(__efi64_thunk)
        pushq   %rax
        lretq
 
-1:     addq    $32, %rsp
+1:     addq    $64, %rsp
        movq    %rdi, %rax
 
        pop     %rbx
@@ -128,10 +132,13 @@ SYM_FUNC_START_LOCAL(efi_enter32)
 
        /*
         * Some firmware will return with interrupts enabled. Be sure to
-        * disable them before we switch GDTs.
+        * disable them before we switch GDTs and IDTs.
         */
        cli
 
+       lidtl   (%ebx)
+       subl    $16, %ebx
+
        lgdtl   (%ebx)
 
        movl    %cr4, %eax
@@ -166,6 +173,11 @@ SYM_DATA_START(efi32_boot_gdt)
        .quad   0
 SYM_DATA_END(efi32_boot_gdt)
 
+SYM_DATA_START(efi32_boot_idt)
+       .word   0
+       .quad   0
+SYM_DATA_END(efi32_boot_idt)
+
 SYM_DATA_START(efi32_boot_cs)
        .word   0
 SYM_DATA_END(efi32_boot_cs)
index a2347de..572c535 100644 (file)
@@ -319,6 +319,9 @@ SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
        movw    %cs, rva(efi32_boot_cs)(%ebp)
        movw    %ds, rva(efi32_boot_ds)(%ebp)
 
+       /* Store firmware IDT descriptor */
+       sidtl   rva(efi32_boot_idt)(%ebp)
+
        /* Disable paging */
        movl    %cr0, %eax
        btrl    $X86_CR0_PG_BIT, %eax
index e366907..67c3208 100644 (file)
@@ -668,7 +668,7 @@ static bool process_mem_region(struct mem_vector *region,
 
                if (slot_area_index == MAX_SLOT_AREA) {
                        debug_putstr("Aborted e820/efi memmap scan when walking immovable regions(slot_areas full)!\n");
-                       return 1;
+                       return true;
                }
        }
 #endif
index d0959e7..f307c93 100644 (file)
@@ -88,6 +88,12 @@ nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
 
 obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
 
+obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64) += sm4-aesni-avx-x86_64.o
+sm4-aesni-avx-x86_64-y := sm4-aesni-avx-asm_64.o sm4_aesni_avx_glue.o
+
+obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64) += sm4-aesni-avx2-x86_64.o
+sm4-aesni-avx2-x86_64-y := sm4-aesni-avx2-asm_64.o sm4_aesni_avx2_glue.o
+
 quiet_cmd_perlasm = PERLASM $@
       cmd_perlasm = $(PERL) $< > $@
 $(obj)/%.S: $(src)/%.pl FORCE
index 2144e54..0fc961b 100644 (file)
@@ -849,6 +849,8 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
                return -EINVAL;
 
        err = skcipher_walk_virt(&walk, req, false);
+       if (!walk.nbytes)
+               return err;
 
        if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
                int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
@@ -862,7 +864,10 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
                skcipher_request_set_crypt(&subreq, req->src, req->dst,
                                           blocks * AES_BLOCK_SIZE, req->iv);
                req = &subreq;
+
                err = skcipher_walk_virt(&walk, req, false);
+               if (err)
+                       return err;
        } else {
                tail = 0;
        }
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
new file mode 100644 (file)
index 0000000..fa2c3f5
--- /dev/null
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
+ * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
+ *  https://github.com/mjosaarinen/sm4ni
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+#define rRIP         (%rip)
+
+#define RX0          %xmm0
+#define RX1          %xmm1
+#define MASK_4BIT    %xmm2
+#define RTMP0        %xmm3
+#define RTMP1        %xmm4
+#define RTMP2        %xmm5
+#define RTMP3        %xmm6
+#define RTMP4        %xmm7
+
+#define RA0          %xmm8
+#define RA1          %xmm9
+#define RA2          %xmm10
+#define RA3          %xmm11
+
+#define RB0          %xmm12
+#define RB1          %xmm13
+#define RB2          %xmm14
+#define RB3          %xmm15
+
+#define RNOT         %xmm0
+#define RBSWAP       %xmm1
+
+
+/* Transpose four 32-bit words between 128-bit vectors. */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+       vpunpckhdq x1, x0, t2;                \
+       vpunpckldq x1, x0, x0;                \
+                                             \
+       vpunpckldq x3, x2, t1;                \
+       vpunpckhdq x3, x2, x2;                \
+                                             \
+       vpunpckhqdq t1, x0, x1;               \
+       vpunpcklqdq t1, x0, x0;               \
+                                             \
+       vpunpckhqdq x2, t2, x3;               \
+       vpunpcklqdq x2, t2, x2;
+
+/* pre-SubByte transform. */
+#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpand x, mask4bit, tmp0;                     \
+       vpandn x, mask4bit, x;                       \
+       vpsrld $4, x, x;                             \
+                                                    \
+       vpshufb tmp0, lo_t, tmp0;                    \
+       vpshufb x, hi_t, x;                          \
+       vpxor tmp0, x, x;
+
+/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
+ * 'vaeslastenc' instruction.
+ */
+#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpandn mask4bit, x, tmp0;                     \
+       vpsrld $4, x, x;                              \
+       vpand x, mask4bit, x;                         \
+                                                     \
+       vpshufb tmp0, lo_t, tmp0;                     \
+       vpshufb x, hi_t, x;                           \
+       vpxor tmp0, x, x;
+
+
+.section       .rodata.cst164, "aM", @progbits, 164
+.align 16
+
+/*
+ * Following four affine transform look-up tables are from work by
+ * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
+ *
+ * These allow exposing SM4 S-Box from AES SubByte.
+ */
+
+/* pre-SubByte affine transform, from SM4 field to AES field. */
+.Lpre_tf_lo_s:
+       .quad 0x9197E2E474720701, 0xC7C1B4B222245157
+.Lpre_tf_hi_s:
+       .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
+
+/* post-SubByte affine transform, from AES field to SM4 field. */
+.Lpost_tf_lo_s:
+       .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
+.Lpost_tf_hi_s:
+       .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
+
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+       .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+       .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+
+/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_8:
+       .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
+       .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
+
+/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_16:
+       .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
+       .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
+
+/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_24:
+       .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
+       .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+       .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/* For input word byte-swap */
+.Lbswap32_mask:
+       .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+
+.align 4
+/* 4-bit mask */
+.L0f0f0f0f:
+       .long 0x0f0f0f0f
+
+
+.text
+.align 16
+
+/*
+ * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
+ *                           const u8 *src, int nblocks)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_crypt4)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (1..4 blocks)
+        *      %rdx: src (1..4 blocks)
+        *      %rcx: num blocks (1..4)
+        */
+       FRAME_BEGIN
+
+       vmovdqu 0*16(%rdx), RA0;
+       vmovdqa RA0, RA1;
+       vmovdqa RA0, RA2;
+       vmovdqa RA0, RA3;
+       cmpq $2, %rcx;
+       jb .Lblk4_load_input_done;
+       vmovdqu 1*16(%rdx), RA1;
+       je .Lblk4_load_input_done;
+       vmovdqu 2*16(%rdx), RA2;
+       cmpq $3, %rcx;
+       je .Lblk4_load_input_done;
+       vmovdqu 3*16(%rdx), RA3;
+
+.Lblk4_load_input_done:
+
+       vmovdqa .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+
+       vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
+       vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;
+       vmovdqa .Lpre_tf_hi_s rRIP, RB0;
+       vmovdqa .Lpost_tf_lo_s rRIP, RB1;
+       vmovdqa .Lpost_tf_hi_s rRIP, RB2;
+       vmovdqa .Linv_shift_row rRIP, RB3;
+       vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2;
+       vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3)                                \
+       vbroadcastss (4*(round))(%rdi), RX0;                        \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+                                                                   \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0);           \
+       vaesenclast MASK_4BIT, RX0, RX0;                            \
+       transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0);            \
+                                                                   \
+       /* linear part */                                           \
+       vpshufb RB3, RX0, RTMP0;                                    \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP2, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP3, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1;            \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP1, s0, s0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk4:
+       ROUND(0, RA0, RA1, RA2, RA3);
+       ROUND(1, RA1, RA2, RA3, RA0);
+       ROUND(2, RA2, RA3, RA0, RA1);
+       ROUND(3, RA3, RA0, RA1, RA2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk4;
+
+#undef ROUND
+
+       vmovdqa .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+
+       vmovdqu RA0, 0*16(%rsi);
+       cmpq $2, %rcx;
+       jb .Lblk4_store_output_done;
+       vmovdqu RA1, 1*16(%rsi);
+       je .Lblk4_store_output_done;
+       vmovdqu RA2, 2*16(%rsi);
+       cmpq $3, %rcx;
+       je .Lblk4_store_output_done;
+       vmovdqu RA3, 3*16(%rsi);
+
+.Lblk4_store_output_done:
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_crypt4)
+
+.align 8
+SYM_FUNC_START_LOCAL(__sm4_crypt_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+        *                                              plaintext blocks
+        * output:
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+        *                                              ciphertext blocks
+        */
+       FRAME_BEGIN
+
+       vmovdqa .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
+       vbroadcastss (4*(round))(%rdi), RX0;                        \
+       vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;                          \
+       vmovdqa .Lpre_tf_hi_s rRIP, RTMP1;                          \
+       vmovdqa RX0, RX1;                                           \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+       vmovdqa .Lpost_tf_lo_s rRIP, RTMP2;                         \
+       vmovdqa .Lpost_tf_hi_s rRIP, RTMP3;                         \
+       vpxor r1, RX1, RX1;                                         \
+       vpxor r2, RX1, RX1;                                         \
+       vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
+                                                                    \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       vmovdqa .Linv_shift_row rRIP, RTMP4;                        \
+       vaesenclast MASK_4BIT, RX0, RX0;                            \
+       vaesenclast MASK_4BIT, RX1, RX1;                            \
+       transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+       transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+                                                                    \
+       /* linear part */                                           \
+       vpshufb RTMP4, RX0, RTMP0;                                  \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP4, RX1, RTMP2;                                  \
+       vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4;                  \
+       vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4;                 \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4;                 \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       vpxor RTMP1, s0, s0;                                        \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
+       /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpslld $2, RTMP2, RTMP3;                                    \
+       vpsrld $30, RTMP2, RTMP2;                                   \
+       vpxor RTMP2, r0, r0;                                        \
+       vpxor RTMP3, r0, r0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk8:
+       ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
+       ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
+       ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
+       ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk8;
+
+#undef ROUND
+
+       vmovdqa .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       FRAME_END
+       ret;
+SYM_FUNC_END(__sm4_crypt_blk8)
+
+/*
+ * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
+ *                           const u8 *src, int nblocks)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_crypt8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (1..8 blocks)
+        *      %rdx: src (1..8 blocks)
+        *      %rcx: num blocks (1..8)
+        */
+       FRAME_BEGIN
+
+       cmpq $5, %rcx;
+       jb sm4_aesni_avx_crypt4;
+       vmovdqu (0 * 16)(%rdx), RA0;
+       vmovdqu (1 * 16)(%rdx), RA1;
+       vmovdqu (2 * 16)(%rdx), RA2;
+       vmovdqu (3 * 16)(%rdx), RA3;
+       vmovdqu (4 * 16)(%rdx), RB0;
+       vmovdqa RB0, RB1;
+       vmovdqa RB0, RB2;
+       vmovdqa RB0, RB3;
+       je .Lblk8_load_input_done;
+       vmovdqu (5 * 16)(%rdx), RB1;
+       cmpq $7, %rcx;
+       jb .Lblk8_load_input_done;
+       vmovdqu (6 * 16)(%rdx), RB2;
+       je .Lblk8_load_input_done;
+       vmovdqu (7 * 16)(%rdx), RB3;
+
+.Lblk8_load_input_done:
+       call __sm4_crypt_blk8;
+
+       cmpq $6, %rcx;
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       jb .Lblk8_store_output_done;
+       vmovdqu RB1, (5 * 16)(%rsi);
+       je .Lblk8_store_output_done;
+       vmovdqu RB2, (6 * 16)(%rsi);
+       cmpq $7, %rcx;
+       je .Lblk8_store_output_done;
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+.Lblk8_store_output_done:
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_crypt8)
+
+/*
+ * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv (big endian, 128bit)
+        */
+       FRAME_BEGIN
+
+       /* load IV and byteswap */
+       vmovdqu (%rcx), RA0;
+
+       vmovdqa .Lbswap128_mask rRIP, RBSWAP;
+       vpshufb RBSWAP, RA0, RTMP0; /* be => le */
+
+       vpcmpeqd RNOT, RNOT, RNOT;
+       vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */
+
+#define inc_le128(x, minus_one, tmp) \
+       vpcmpeqq minus_one, x, tmp;  \
+       vpsubq minus_one, x, x;      \
+       vpslldq $8, tmp, tmp;        \
+       vpsubq tmp, x, x;
+
+       /* construct IVs */
+       inc_le128(RTMP0, RNOT, RTMP2); /* +1 */
+       vpshufb RBSWAP, RTMP0, RA1;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +2 */
+       vpshufb RBSWAP, RTMP0, RA2;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +3 */
+       vpshufb RBSWAP, RTMP0, RA3;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +4 */
+       vpshufb RBSWAP, RTMP0, RB0;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +5 */
+       vpshufb RBSWAP, RTMP0, RB1;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +6 */
+       vpshufb RBSWAP, RTMP0, RB2;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +7 */
+       vpshufb RBSWAP, RTMP0, RB3;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +8 */
+       vpshufb RBSWAP, RTMP0, RTMP1;
+
+       /* store new IV */
+       vmovdqu RTMP1, (%rcx);
+
+       call __sm4_crypt_blk8;
+
+       vpxor (0 * 16)(%rdx), RA0, RA0;
+       vpxor (1 * 16)(%rdx), RA1, RA1;
+       vpxor (2 * 16)(%rdx), RA2, RA2;
+       vpxor (3 * 16)(%rdx), RA3, RA3;
+       vpxor (4 * 16)(%rdx), RB0, RB0;
+       vpxor (5 * 16)(%rdx), RB1, RB1;
+       vpxor (6 * 16)(%rdx), RB2, RB2;
+       vpxor (7 * 16)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
+
+/*
+ * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vmovdqu (0 * 16)(%rdx), RA0;
+       vmovdqu (1 * 16)(%rdx), RA1;
+       vmovdqu (2 * 16)(%rdx), RA2;
+       vmovdqu (3 * 16)(%rdx), RA3;
+       vmovdqu (4 * 16)(%rdx), RB0;
+       vmovdqu (5 * 16)(%rdx), RB1;
+       vmovdqu (6 * 16)(%rdx), RB2;
+       vmovdqu (7 * 16)(%rdx), RB3;
+
+       call __sm4_crypt_blk8;
+
+       vmovdqu (7 * 16)(%rdx), RNOT;
+       vpxor (%rcx), RA0, RA0;
+       vpxor (0 * 16)(%rdx), RA1, RA1;
+       vpxor (1 * 16)(%rdx), RA2, RA2;
+       vpxor (2 * 16)(%rdx), RA3, RA3;
+       vpxor (3 * 16)(%rdx), RB0, RB0;
+       vpxor (4 * 16)(%rdx), RB1, RB1;
+       vpxor (5 * 16)(%rdx), RB2, RB2;
+       vpxor (6 * 16)(%rdx), RB3, RB3;
+       vmovdqu RNOT, (%rcx); /* store new IV */
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
+
+/*
+ * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       /* Load input */
+       vmovdqu (%rcx), RA0;
+       vmovdqu 0 * 16(%rdx), RA1;
+       vmovdqu 1 * 16(%rdx), RA2;
+       vmovdqu 2 * 16(%rdx), RA3;
+       vmovdqu 3 * 16(%rdx), RB0;
+       vmovdqu 4 * 16(%rdx), RB1;
+       vmovdqu 5 * 16(%rdx), RB2;
+       vmovdqu 6 * 16(%rdx), RB3;
+
+       /* Update IV */
+       vmovdqu 7 * 16(%rdx), RNOT;
+       vmovdqu RNOT, (%rcx);
+
+       call __sm4_crypt_blk8;
+
+       vpxor (0 * 16)(%rdx), RA0, RA0;
+       vpxor (1 * 16)(%rdx), RA1, RA1;
+       vpxor (2 * 16)(%rdx), RA2, RA2;
+       vpxor (3 * 16)(%rdx), RA3, RA3;
+       vpxor (4 * 16)(%rdx), RB0, RB0;
+       vpxor (5 * 16)(%rdx), RB1, RB1;
+       vpxor (6 * 16)(%rdx), RB2, RB2;
+       vpxor (7 * 16)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
new file mode 100644 (file)
index 0000000..d2ffd7f
--- /dev/null
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
+ * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
+ *  https://github.com/mjosaarinen/sm4ni
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+#define rRIP         (%rip)
+
+/* vector registers */
+#define RX0          %ymm0
+#define RX1          %ymm1
+#define MASK_4BIT    %ymm2
+#define RTMP0        %ymm3
+#define RTMP1        %ymm4
+#define RTMP2        %ymm5
+#define RTMP3        %ymm6
+#define RTMP4        %ymm7
+
+#define RA0          %ymm8
+#define RA1          %ymm9
+#define RA2          %ymm10
+#define RA3          %ymm11
+
+#define RB0          %ymm12
+#define RB1          %ymm13
+#define RB2          %ymm14
+#define RB3          %ymm15
+
+#define RNOT         %ymm0
+#define RBSWAP       %ymm1
+
+#define RX0x         %xmm0
+#define RX1x         %xmm1
+#define MASK_4BITx   %xmm2
+
+#define RNOTx        %xmm0
+#define RBSWAPx      %xmm1
+
+#define RTMP0x       %xmm3
+#define RTMP1x       %xmm4
+#define RTMP2x       %xmm5
+#define RTMP3x       %xmm6
+#define RTMP4x       %xmm7
+
+
+/* helper macros */
+
+/* Transpose four 32-bit words between 128-bit vector lanes. */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+       vpunpckhdq x1, x0, t2;                \
+       vpunpckldq x1, x0, x0;                \
+                                             \
+       vpunpckldq x3, x2, t1;                \
+       vpunpckhdq x3, x2, x2;                \
+                                             \
+       vpunpckhqdq t1, x0, x1;               \
+       vpunpcklqdq t1, x0, x0;               \
+                                             \
+       vpunpckhqdq x2, t2, x3;               \
+       vpunpcklqdq x2, t2, x2;
+
+/* post-SubByte transform. */
+#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpand x, mask4bit, tmp0;                     \
+       vpandn x, mask4bit, x;                       \
+       vpsrld $4, x, x;                             \
+                                                    \
+       vpshufb tmp0, lo_t, tmp0;                    \
+       vpshufb x, hi_t, x;                          \
+       vpxor tmp0, x, x;
+
+/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
+ * 'vaeslastenc' instruction. */
+#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpandn mask4bit, x, tmp0;                     \
+       vpsrld $4, x, x;                              \
+       vpand x, mask4bit, x;                         \
+                                                     \
+       vpshufb tmp0, lo_t, tmp0;                     \
+       vpshufb x, hi_t, x;                           \
+       vpxor tmp0, x, x;
+
+
+.section       .rodata.cst164, "aM", @progbits, 164
+.align 16
+
+/*
+ * Following four affine transform look-up tables are from work by
+ * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
+ *
+ * These allow exposing SM4 S-Box from AES SubByte.
+ */
+
+/* pre-SubByte affine transform, from SM4 field to AES field. */
+.Lpre_tf_lo_s:
+       .quad 0x9197E2E474720701, 0xC7C1B4B222245157
+.Lpre_tf_hi_s:
+       .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
+
+/* post-SubByte affine transform, from AES field to SM4 field. */
+.Lpost_tf_lo_s:
+       .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
+.Lpost_tf_hi_s:
+       .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
+
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+       .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+       .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+
+/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_8:
+       .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
+       .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
+
+/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_16:
+       .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
+       .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
+
+/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_24:
+       .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
+       .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+       .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/* For input word byte-swap */
+.Lbswap32_mask:
+       .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+
+.align 4
+/* 4-bit mask */
+.L0f0f0f0f:
+       .long 0x0f0f0f0f
+
+.text
+.align 16
+
+.align 8
+SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
+        *                                              plaintext blocks
+        * output:
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
+        *                                              ciphertext blocks
+        */
+       FRAME_BEGIN
+
+       vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
+       vpbroadcastd (4*(round))(%rdi), RX0;                        \
+       vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4;                   \
+       vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1;                   \
+       vmovdqa RX0, RX1;                                           \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+       vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2;                  \
+       vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3;                  \
+       vpxor r1, RX1, RX1;                                         \
+       vpxor r2, RX1, RX1;                                         \
+       vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
+                                                                   \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       vextracti128 $1, RX0, RTMP4x;                               \
+       vextracti128 $1, RX1, RTMP0x;                               \
+       vaesenclast MASK_4BITx, RX0x, RX0x;                         \
+       vaesenclast MASK_4BITx, RTMP4x, RTMP4x;                     \
+       vaesenclast MASK_4BITx, RX1x, RX1x;                         \
+       vaesenclast MASK_4BITx, RTMP0x, RTMP0x;                     \
+       vinserti128 $1, RTMP4x, RX0, RX0;                           \
+       vbroadcasti128 .Linv_shift_row rRIP, RTMP4;                 \
+       vinserti128 $1, RTMP0x, RX1, RX1;                           \
+       transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+       transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+                                                                   \
+       /* linear part */                                           \
+       vpshufb RTMP4, RX0, RTMP0;                                  \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP4, RX1, RTMP2;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4;           \
+       vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4;          \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4;          \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP1, s0, s0;                                        \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP2, RTMP3;                                    \
+       vpsrld $30, RTMP2, RTMP2;                                   \
+       vpxor RTMP2, r0, r0;                                        \
+       /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP3, r0, r0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk8:
+       ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
+       ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
+       ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
+       ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk8;
+
+#undef ROUND
+
+       vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       FRAME_END
+       ret;
+SYM_FUNC_END(__sm4_crypt_blk16)
+
+#define inc_le128(x, minus_one, tmp) \
+       vpcmpeqq minus_one, x, tmp;  \
+       vpsubq minus_one, x, x;      \
+       vpslldq $8, tmp, tmp;        \
+       vpsubq tmp, x, x;
+
+/*
+ * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv (big endian, 128bit)
+        */
+       FRAME_BEGIN
+
+       movq 8(%rcx), %rax;
+       bswapq %rax;
+
+       vzeroupper;
+
+       vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
+       vpcmpeqd RNOT, RNOT, RNOT;
+       vpsrldq $8, RNOT, RNOT;   /* ab: -1:0 ; cd: -1:0 */
+       vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
+
+       /* load IV and byteswap */
+       vmovdqu (%rcx), RTMP4x;
+       vpshufb RTMP3x, RTMP4x, RTMP4x;
+       vmovdqa RTMP4x, RTMP0x;
+       inc_le128(RTMP4x, RNOTx, RTMP1x);
+       vinserti128 $1, RTMP4x, RTMP0, RTMP0;
+       vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
+
+       /* check need for handling 64-bit overflow and carry */
+       cmpq $(0xffffffffffffffff - 16), %rax;
+       ja .Lhandle_ctr_carry;
+
+       /* construct IVs */
+       vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
+       vpshufb RTMP3, RTMP0, RA1;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
+       vpshufb RTMP3, RTMP0, RA2;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
+       vpshufb RTMP3, RTMP0, RA3;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
+       vpshufb RTMP3, RTMP0, RB0;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
+       vpshufb RTMP3, RTMP0, RB1;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
+       vpshufb RTMP3, RTMP0, RB2;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
+       vpshufb RTMP3, RTMP0, RB3;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
+       vpshufb RTMP3x, RTMP0x, RTMP0x;
+
+       jmp .Lctr_carry_done;
+
+.Lhandle_ctr_carry:
+       /* construct IVs */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vextracti128 $1, RTMP0, RTMP0x;
+       vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
+
+.align 4
+.Lctr_carry_done:
+       /* store new IV */
+       vmovdqu RTMP0x, (%rcx);
+
+       call __sm4_crypt_blk16;
+
+       vpxor (0 * 32)(%rdx), RA0, RA0;
+       vpxor (1 * 32)(%rdx), RA1, RA1;
+       vpxor (2 * 32)(%rdx), RA2, RA2;
+       vpxor (3 * 32)(%rdx), RA3, RA3;
+       vpxor (4 * 32)(%rdx), RB0, RB0;
+       vpxor (5 * 32)(%rdx), RB1, RB1;
+       vpxor (6 * 32)(%rdx), RB2, RB2;
+       vpxor (7 * 32)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
+
+/*
+ * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vzeroupper;
+
+       vmovdqu (0 * 32)(%rdx), RA0;
+       vmovdqu (1 * 32)(%rdx), RA1;
+       vmovdqu (2 * 32)(%rdx), RA2;
+       vmovdqu (3 * 32)(%rdx), RA3;
+       vmovdqu (4 * 32)(%rdx), RB0;
+       vmovdqu (5 * 32)(%rdx), RB1;
+       vmovdqu (6 * 32)(%rdx), RB2;
+       vmovdqu (7 * 32)(%rdx), RB3;
+
+       call __sm4_crypt_blk16;
+
+       vmovdqu (%rcx), RNOTx;
+       vinserti128 $1, (%rdx), RNOT, RNOT;
+       vpxor RNOT, RA0, RA0;
+       vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
+       vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
+       vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
+       vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
+       vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
+       vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
+       vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
+       vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
+       vmovdqu RNOTx, (%rcx); /* store new IV */
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
+
+/*
+ * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vzeroupper;
+
+       /* Load input */
+       vmovdqu (%rcx), RNOTx;
+       vinserti128 $1, (%rdx), RNOT, RA0;
+       vmovdqu (0 * 32 + 16)(%rdx), RA1;
+       vmovdqu (1 * 32 + 16)(%rdx), RA2;
+       vmovdqu (2 * 32 + 16)(%rdx), RA3;
+       vmovdqu (3 * 32 + 16)(%rdx), RB0;
+       vmovdqu (4 * 32 + 16)(%rdx), RB1;
+       vmovdqu (5 * 32 + 16)(%rdx), RB2;
+       vmovdqu (6 * 32 + 16)(%rdx), RB3;
+
+       /* Update IV */
+       vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
+       vmovdqu RNOTx, (%rcx);
+
+       call __sm4_crypt_blk16;
+
+       vpxor (0 * 32)(%rdx), RA0, RA0;
+       vpxor (1 * 32)(%rdx), RA1, RA1;
+       vpxor (2 * 32)(%rdx), RA2, RA2;
+       vpxor (3 * 32)(%rdx), RA3, RA3;
+       vpxor (4 * 32)(%rdx), RB0, RB0;
+       vpxor (5 * 32)(%rdx), RB1, RB1;
+       vpxor (6 * 32)(%rdx), RB2, RB2;
+       vpxor (7 * 32)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
diff --git a/arch/x86/crypto/sm4-avx.h b/arch/x86/crypto/sm4-avx.h
new file mode 100644 (file)
index 0000000..1bceab7
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef ASM_X86_SM4_AVX_H
+#define ASM_X86_SM4_AVX_H
+
+#include <linux/types.h>
+#include <crypto/sm4.h>
+
+typedef void (*sm4_crypt_func)(const u32 *rk, u8 *dst, const u8 *src, u8 *iv);
+
+int sm4_avx_ecb_encrypt(struct skcipher_request *req);
+int sm4_avx_ecb_decrypt(struct skcipher_request *req);
+
+int sm4_cbc_encrypt(struct skcipher_request *req);
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+int sm4_cfb_encrypt(struct skcipher_request *req);
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+#endif
diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c
new file mode 100644 (file)
index 0000000..84bc718
--- /dev/null
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (c) 2021, Alibaba Group.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-avx.h"
+
+#define SM4_CRYPT16_BLOCK_SIZE (SM4_BLOCK_SIZE * 16)
+
+asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+
+static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                       unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
+static int cbc_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cbc_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_cbc_dec_blk16);
+}
+
+
+static int cfb_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cfb_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_cfb_dec_blk16);
+}
+
+static int ctr_crypt(struct skcipher_request *req)
+{
+       return sm4_avx_ctr_crypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_ctr_enc_blk16);
+}
+
+static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
+       {
+               .base = {
+                       .cra_name               = "__ecb(sm4)",
+                       .cra_driver_name        = "__ecb-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_avx_ecb_encrypt,
+               .decrypt        = sm4_avx_ecb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cbc(sm4)",
+                       .cra_driver_name        = "__cbc-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cbc_encrypt,
+               .decrypt        = cbc_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cfb(sm4)",
+                       .cra_driver_name        = "__cfb-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cfb_encrypt,
+               .decrypt        = cfb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__ctr(sm4)",
+                       .cra_driver_name        = "__ctr-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = ctr_crypt,
+               .decrypt        = ctr_crypt,
+       }
+};
+
+static struct simd_skcipher_alg *
+simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)];
+
+static int __init sm4_init(void)
+{
+       const char *feature_name;
+
+       if (!boot_cpu_has(X86_FEATURE_AVX) ||
+           !boot_cpu_has(X86_FEATURE_AVX2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
+               return -ENODEV;
+       }
+
+       return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx2_skciphers),
+                                       simd_sm4_aesni_avx2_skciphers);
+}
+
+static void __exit sm4_exit(void)
+{
+       simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
+                               ARRAY_SIZE(sm4_aesni_avx2_skciphers),
+                               simd_sm4_aesni_avx2_skciphers);
+}
+
+module_init(sm4_init);
+module_exit(sm4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX2 optimized");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-aesni-avx2");
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
new file mode 100644 (file)
index 0000000..7800f77
--- /dev/null
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (c) 2021, Alibaba Group.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-avx.h"
+
+#define SM4_CRYPT8_BLOCK_SIZE  (SM4_BLOCK_SIZE * 8)
+
+asmlinkage void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
+                               const u8 *src, int nblocks);
+asmlinkage void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
+                               const u8 *src, int nblocks);
+asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+
+static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                       unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
+static int ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
+{
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+               while (nbytes >= SM4_CRYPT8_BLOCK_SIZE) {
+                       sm4_aesni_avx_crypt8(rkey, dst, src, 8);
+                       dst += SM4_CRYPT8_BLOCK_SIZE;
+                       src += SM4_CRYPT8_BLOCK_SIZE;
+                       nbytes -= SM4_CRYPT8_BLOCK_SIZE;
+               }
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       unsigned int nblocks = min(nbytes >> 4, 4u);
+                       sm4_aesni_avx_crypt4(rkey, dst, src, nblocks);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+               kernel_fpu_end();
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+
+int sm4_avx_ecb_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return ecb_do_crypt(req, ctx->rkey_enc);
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_encrypt);
+
+int sm4_avx_ecb_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return ecb_do_crypt(req, ctx->rkey_dec);
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_decrypt);
+
+int sm4_cbc_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *iv = walk.iv;
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       crypto_xor_cpy(dst, src, iv, SM4_BLOCK_SIZE);
+                       sm4_crypt_block(ctx->rkey_enc, dst, dst);
+                       iv = dst;
+                       src += SM4_BLOCK_SIZE;
+                       dst += SM4_BLOCK_SIZE;
+                       nbytes -= SM4_BLOCK_SIZE;
+               }
+               if (iv != walk.iv)
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_cbc_encrypt);
+
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_dec, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       u8 iv[SM4_BLOCK_SIZE];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+                       int i;
+
+                       sm4_aesni_avx_crypt8(ctx->rkey_dec, keystream,
+                                               src, nblocks);
+
+                       src += ((int)nblocks - 2) * SM4_BLOCK_SIZE;
+                       dst += (nblocks - 1) * SM4_BLOCK_SIZE;
+                       memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE);
+
+                       for (i = nblocks - 1; i > 0; i--) {
+                               crypto_xor_cpy(dst, src,
+                                       &keystream[i * SM4_BLOCK_SIZE],
+                                       SM4_BLOCK_SIZE);
+                               src -= SM4_BLOCK_SIZE;
+                               dst -= SM4_BLOCK_SIZE;
+                       }
+                       crypto_xor_cpy(dst, walk.iv, keystream, SM4_BLOCK_SIZE);
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += (nblocks + 1) * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_cbc_decrypt);
+
+static int cbc_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cbc_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_cbc_dec_blk8);
+}
+
+int sm4_cfb_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               u8 keystream[SM4_BLOCK_SIZE];
+               const u8 *iv = walk.iv;
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       sm4_crypt_block(ctx->rkey_enc, keystream, iv);
+                       crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
+                       iv = dst;
+                       src += SM4_BLOCK_SIZE;
+                       dst += SM4_BLOCK_SIZE;
+                       nbytes -= SM4_BLOCK_SIZE;
+               }
+               if (iv != walk.iv)
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_cfb_encrypt);
+
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_enc, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+
+                       memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
+                       if (nblocks > 1)
+                               memcpy(&keystream[SM4_BLOCK_SIZE], src,
+                                       (nblocks - 1) * SM4_BLOCK_SIZE);
+                       memcpy(walk.iv, src + (nblocks - 1) * SM4_BLOCK_SIZE,
+                               SM4_BLOCK_SIZE);
+
+                       sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
+                                               keystream, nblocks);
+
+                       crypto_xor_cpy(dst, src, keystream,
+                                       nblocks * SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       u8 keystream[SM4_BLOCK_SIZE];
+
+                       sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt);
+
+static int cfb_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_cfb_dec_blk8);
+}
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_enc, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+                       int i;
+
+                       for (i = 0; i < nblocks; i++) {
+                               memcpy(&keystream[i * SM4_BLOCK_SIZE],
+                                       walk.iv, SM4_BLOCK_SIZE);
+                               crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+                       }
+                       sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
+                                       keystream, nblocks);
+
+                       crypto_xor_cpy(dst, src, keystream,
+                                       nblocks * SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       u8 keystream[SM4_BLOCK_SIZE];
+
+                       memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
+                       crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+
+                       sm4_crypt_block(ctx->rkey_enc, keystream, keystream);
+
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       dst += nbytes;
+                       src += nbytes;
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ctr_crypt);
+
+static int ctr_crypt(struct skcipher_request *req)
+{
+       return sm4_avx_ctr_crypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_ctr_enc_blk8);
+}
+
+static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
+       {
+               .base = {
+                       .cra_name               = "__ecb(sm4)",
+                       .cra_driver_name        = "__ecb-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_avx_ecb_encrypt,
+               .decrypt        = sm4_avx_ecb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cbc(sm4)",
+                       .cra_driver_name        = "__cbc-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cbc_encrypt,
+               .decrypt        = cbc_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cfb(sm4)",
+                       .cra_driver_name        = "__cfb-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cfb_encrypt,
+               .decrypt        = cfb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__ctr(sm4)",
+                       .cra_driver_name        = "__ctr-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = ctr_crypt,
+               .decrypt        = ctr_crypt,
+       }
+};
+
+static struct simd_skcipher_alg *
+simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)];
+
+static int __init sm4_init(void)
+{
+       const char *feature_name;
+
+       if (!boot_cpu_has(X86_FEATURE_AVX) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
+               return -ENODEV;
+       }
+
+       return simd_register_skciphers_compat(sm4_aesni_avx_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx_skciphers),
+                                       simd_sm4_aesni_avx_skciphers);
+}
+
+static void __exit sm4_exit(void)
+{
+       simd_unregister_skciphers(sm4_aesni_avx_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx_skciphers),
+                                       simd_sm4_aesni_avx_skciphers);
+}
+
+module_init(sm4_init);
+module_exit(sm4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX optimized");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-aesni-avx");
index 39d9ded..d6cdfe6 100644 (file)
@@ -34,4 +34,14 @@ config PERF_EVENTS_AMD_POWER
          (CPUID Fn8000_0007_EDX[12]) interface to calculate the
          average power consumption on Family 15h processors.
 
+config PERF_EVENTS_AMD_UNCORE
+       tristate "AMD Uncore performance events"
+       depends on PERF_EVENTS && CPU_SUP_AMD
+       default y
+       help
+         Include support for AMD uncore performance events for use with
+         e.g., perf stat -e amd_l3/.../,amd_df/.../.
+
+         To compile this driver as a module, choose M here: the
+         module will be called 'amd-uncore'.
 endmenu
index fe8795a..6cbe38d 100644 (file)
@@ -1,8 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CPU_SUP_AMD)              += core.o uncore.o
+obj-$(CONFIG_CPU_SUP_AMD)              += core.o
 obj-$(CONFIG_PERF_EVENTS_AMD_POWER)    += power.o
 obj-$(CONFIG_X86_LOCAL_APIC)           += ibs.o
+obj-$(CONFIG_PERF_EVENTS_AMD_UNCORE)   += amd-uncore.o
+amd-uncore-objs                                := uncore.o
 ifdef CONFIG_AMD_IOMMU
 obj-$(CONFIG_CPU_SUP_AMD)              += iommu.o
 endif
-
index 40669ea..9739019 100644 (file)
@@ -26,6 +26,7 @@ static u32 ibs_caps;
 #include <linux/hardirq.h>
 
 #include <asm/nmi.h>
+#include <asm/amd-ibs.h>
 
 #define IBS_FETCH_CONFIG_MASK  (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
 #define IBS_OP_CONFIG_MASK     IBS_OP_MAX_CNT
@@ -90,6 +91,7 @@ struct perf_ibs {
        unsigned long                   offset_mask[1];
        int                             offset_max;
        unsigned int                    fetch_count_reset_broken : 1;
+       unsigned int                    fetch_ignore_if_zero_rip : 1;
        struct cpu_perf_ibs __percpu    *pcpu;
 
        struct attribute                **format_attrs;
@@ -99,15 +101,6 @@ struct perf_ibs {
        u64                             (*get_count)(u64 config);
 };
 
-struct perf_ibs_data {
-       u32             size;
-       union {
-               u32     data[0];        /* data buffer starts here */
-               u32     caps;
-       };
-       u64             regs[MSR_AMD64_IBS_REG_COUNT_MAX];
-};
-
 static int
 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
 {
@@ -328,11 +321,14 @@ static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
 
 static u64 get_ibs_fetch_count(u64 config)
 {
-       return (config & IBS_FETCH_CNT) >> 12;
+       union ibs_fetch_ctl fetch_ctl = (union ibs_fetch_ctl)config;
+
+       return fetch_ctl.fetch_cnt << 4;
 }
 
 static u64 get_ibs_op_count(u64 config)
 {
+       union ibs_op_ctl op_ctl = (union ibs_op_ctl)config;
        u64 count = 0;
 
        /*
@@ -340,12 +336,12 @@ static u64 get_ibs_op_count(u64 config)
         * and the lower 7 bits of CurCnt are randomized.
         * Otherwise CurCnt has the full 27-bit current counter value.
         */
-       if (config & IBS_OP_VAL) {
-               count = (config & IBS_OP_MAX_CNT) << 4;
+       if (op_ctl.op_val) {
+               count = op_ctl.opmaxcnt << 4;
                if (ibs_caps & IBS_CAPS_OPCNTEXT)
-                       count += config & IBS_OP_MAX_CNT_EXT_MASK;
+                       count += op_ctl.opmaxcnt_ext << 20;
        } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
-               count = (config & IBS_OP_CUR_CNT) >> 32;
+               count = op_ctl.opcurcnt;
        }
 
        return count;
@@ -570,6 +566,7 @@ static struct perf_ibs perf_ibs_op = {
                .start          = perf_ibs_start,
                .stop           = perf_ibs_stop,
                .read           = perf_ibs_read,
+               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
        },
        .msr                    = MSR_AMD64_IBSOPCTL,
        .config_mask            = IBS_OP_CONFIG_MASK,
@@ -672,6 +669,10 @@ fail:
        if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
                regs.flags &= ~PERF_EFLAGS_EXACT;
        } else {
+               /* Workaround for erratum #1197 */
+               if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
+                       goto out;
+
                set_linear_ip(&regs, ibs_data.regs[1]);
                regs.flags |= PERF_EFLAGS_EXACT;
        }
@@ -769,6 +770,9 @@ static __init void perf_event_ibs_init(void)
        if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
                perf_ibs_fetch.fetch_count_reset_broken = 1;
 
+       if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
+               perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
+
        perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
 
        if (ibs_caps & IBS_CAPS_OPCNT) {
index 16a2369..37d5b38 100644 (file)
@@ -213,6 +213,7 @@ static struct pmu pmu_class = {
        .stop           = pmu_event_stop,
        .read           = pmu_event_read,
        .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .module         = THIS_MODULE,
 };
 
 static int power_cpu_exit(unsigned int cpu)
index 582c0ff..0d04414 100644 (file)
 #include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/cpufeature.h>
+#include <linux/smp.h>
 
-#include <asm/cpufeature.h>
 #include <asm/perf_event.h>
 #include <asm/msr.h>
-#include <asm/smp.h>
 
 #define NUM_COUNTERS_NB                4
 #define NUM_COUNTERS_L2                4
@@ -347,6 +347,7 @@ static struct pmu amd_nb_pmu = {
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
        .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+       .module         = THIS_MODULE,
 };
 
 static struct pmu amd_llc_pmu = {
@@ -360,6 +361,7 @@ static struct pmu amd_llc_pmu = {
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
        .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+       .module         = THIS_MODULE,
 };
 
 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
@@ -452,7 +454,7 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
 
        if (amd_uncore_llc) {
                uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
-               uncore->id = per_cpu(cpu_llc_id, cpu);
+               uncore->id = get_llc_id(cpu);
 
                uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
                *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
@@ -659,12 +661,34 @@ fail_prep:
 fail_llc:
        if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
                perf_pmu_unregister(&amd_nb_pmu);
-       if (amd_uncore_llc)
-               free_percpu(amd_uncore_llc);
+       free_percpu(amd_uncore_llc);
 fail_nb:
-       if (amd_uncore_nb)
-               free_percpu(amd_uncore_nb);
+       free_percpu(amd_uncore_nb);
 
        return ret;
 }
-device_initcall(amd_uncore_init);
+
+static void __exit amd_uncore_exit(void)
+{
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
+       cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
+
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
+               perf_pmu_unregister(&amd_llc_pmu);
+               free_percpu(amd_uncore_llc);
+               amd_uncore_llc = NULL;
+       }
+
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
+               perf_pmu_unregister(&amd_nb_pmu);
+               free_percpu(amd_uncore_nb);
+               amd_uncore_nb = NULL;
+       }
+}
+
+module_init(amd_uncore_init);
+module_exit(amd_uncore_exit);
+
+MODULE_DESCRIPTION("AMD Uncore Driver");
+MODULE_LICENSE("GPL v2");
index 1eb4513..2a57dbe 100644 (file)
@@ -1087,10 +1087,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
         * validate an event group (assign == NULL)
         */
        if (!unsched && assign) {
-               for (i = 0; i < n; i++) {
-                       e = cpuc->event_list[i];
+               for (i = 0; i < n; i++)
                        static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
-               }
        } else {
                for (i = n0; i < n; i++) {
                        e = cpuc->event_list[i];
@@ -2489,13 +2487,15 @@ void perf_clear_dirty_counters(void)
                return;
 
        for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
-               /* Metrics and fake events don't have corresponding HW counters. */
-               if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
-                       continue;
-               else if (i >= INTEL_PMC_IDX_FIXED)
+               if (i >= INTEL_PMC_IDX_FIXED) {
+                       /* Metrics and fake events don't have corresponding HW counters. */
+                       if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed))
+                               continue;
+
                        wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
-               else
+               } else {
                        wrmsrl(x86_pmu_event_addr(i), 0);
+               }
        }
 
        bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
index fca7a6e..7011e87 100644 (file)
@@ -2904,24 +2904,28 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
  */
 static int intel_pmu_handle_irq(struct pt_regs *regs)
 {
-       struct cpu_hw_events *cpuc;
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
+       bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
        int loops;
        u64 status;
        int handled;
        int pmu_enabled;
 
-       cpuc = this_cpu_ptr(&cpu_hw_events);
-
        /*
         * Save the PMU state.
         * It needs to be restored when leaving the handler.
         */
        pmu_enabled = cpuc->enabled;
        /*
-        * No known reason to not always do late ACK,
-        * but just in case do it opt-in.
+        * In general, the early ACK is only applied for old platforms.
+        * For the big core starts from Haswell, the late ACK should be
+        * applied.
+        * For the small core after Tremont, we have to do the ACK right
+        * before re-enabling counters, which is in the middle of the
+        * NMI handler.
         */
-       if (!x86_pmu.late_ack)
+       if (!late_ack && !mid_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        intel_bts_disable_local();
        cpuc->enabled = 0;
@@ -2958,6 +2962,8 @@ again:
                goto again;
 
 done:
+       if (mid_ack)
+               apic_write(APIC_LVTPC, APIC_DM_NMI);
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
        cpuc->enabled = pmu_enabled;
        if (pmu_enabled)
@@ -2969,7 +2975,7 @@ done:
         * have been reset. This avoids spurious NMIs on
         * Haswell CPUs.
         */
-       if (x86_pmu.late_ack)
+       if (late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        return handled;
 }
@@ -5026,9 +5032,9 @@ static ssize_t freeze_on_smi_store(struct device *cdev,
 
        x86_pmu.attr_freeze_on_smi = val;
 
-       get_online_cpus();
+       cpus_read_lock();
        on_each_cpu(flip_smm_bit, &val, 1);
-       put_online_cpus();
+       cpus_read_unlock();
 done:
        mutex_unlock(&freeze_on_smi_mutex);
 
@@ -5071,9 +5077,9 @@ static ssize_t set_sysctl_tfa(struct device *cdev,
 
        allow_tsx_force_abort = val;
 
-       get_online_cpus();
+       cpus_read_lock();
        on_each_cpu(update_tfa_sched, NULL, 1);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return count;
 }
@@ -6129,7 +6135,6 @@ __init int intel_pmu_init(void)
                static_branch_enable(&perf_is_hybrid);
                x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
 
-               x86_pmu.late_ack = true;
                x86_pmu.pebs_aliases = NULL;
                x86_pmu.pebs_prec_dist = true;
                x86_pmu.pebs_block = true;
@@ -6167,6 +6172,7 @@ __init int intel_pmu_init(void)
                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
                pmu->name = "cpu_core";
                pmu->cpu_type = hybrid_big;
+               pmu->late_ack = true;
                if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
                        pmu->num_counters = x86_pmu.num_counters + 2;
                        pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
@@ -6192,6 +6198,7 @@ __init int intel_pmu_init(void)
                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
                pmu->name = "cpu_atom";
                pmu->cpu_type = hybrid_small;
+               pmu->mid_ack = true;
                pmu->num_counters = x86_pmu.num_counters;
                pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
                pmu->max_pebs_events = x86_pmu.max_pebs_events;
index 9158476..7f406c1 100644 (file)
@@ -62,7 +62,7 @@ static struct pt_cap_desc {
        PT_CAP(single_range_output,     0, CPUID_ECX, BIT(2)),
        PT_CAP(output_subsys,           0, CPUID_ECX, BIT(3)),
        PT_CAP(payloads_lip,            0, CPUID_ECX, BIT(31)),
-       PT_CAP(num_address_ranges,      1, CPUID_EAX, 0x3),
+       PT_CAP(num_address_ranges,      1, CPUID_EAX, 0x7),
        PT_CAP(mtc_periods,             1, CPUID_EAX, 0xffff0000),
        PT_CAP(cycle_thresholds,        1, CPUID_EBX, 0xffff),
        PT_CAP(psb_periods,             1, CPUID_EBX, 0xffff0000),
@@ -1708,7 +1708,7 @@ static __init int pt_init(void)
        if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
                return -ENODEV;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                u64 ctl;
 
@@ -1716,7 +1716,7 @@ static __init int pt_init(void)
                if (!ret && (ctl & RTIT_CTL_TRACEEN))
                        prior_warn++;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (prior_warn) {
                x86_add_exclusive(x86_lbr_exclusive_pt);
index 9bf4dbb..c72e368 100644 (file)
@@ -842,6 +842,18 @@ static const struct attribute_group uncore_pmu_attr_group = {
        .attrs = uncore_pmu_attrs,
 };
 
+void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu)
+{
+       struct intel_uncore_type *type = pmu->type;
+
+       if (type->num_boxes == 1)
+               sprintf(pmu_name, "uncore_type_%u", type->type_id);
+       else {
+               sprintf(pmu_name, "uncore_type_%u_%d",
+                       type->type_id, type->box_ids[pmu->pmu_idx]);
+       }
+}
+
 static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
 {
        struct intel_uncore_type *type = pmu->type;
@@ -851,12 +863,7 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
         * Use uncore_type_&typeid_&boxid as name.
         */
        if (!type->name) {
-               if (type->num_boxes == 1)
-                       sprintf(pmu->name, "uncore_type_%u", type->type_id);
-               else {
-                       sprintf(pmu->name, "uncore_type_%u_%d",
-                               type->type_id, type->box_ids[pmu->pmu_idx]);
-               }
+               uncore_get_alias_name(pmu->name, pmu);
                return;
        }
 
@@ -865,9 +872,13 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
                        sprintf(pmu->name, "uncore_%s", type->name);
                else
                        sprintf(pmu->name, "uncore");
-       } else
-               sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx);
-
+       } else {
+               /*
+                * Use the box ID from the discovery table if applicable.
+                */
+               sprintf(pmu->name, "uncore_%s_%d", type->name,
+                       type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx);
+       }
 }
 
 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
@@ -1663,6 +1674,7 @@ struct intel_uncore_init_fun {
        void    (*cpu_init)(void);
        int     (*pci_init)(void);
        void    (*mmio_init)(void);
+       bool    use_discovery;
 };
 
 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
@@ -1765,6 +1777,13 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
        .mmio_init = snr_uncore_mmio_init,
 };
 
+static const struct intel_uncore_init_fun spr_uncore_init __initconst = {
+       .cpu_init = spr_uncore_cpu_init,
+       .pci_init = spr_uncore_pci_init,
+       .mmio_init = spr_uncore_mmio_init,
+       .use_discovery = true,
+};
+
 static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
        .cpu_init = intel_uncore_generic_uncore_cpu_init,
        .pci_init = intel_uncore_generic_uncore_pci_init,
@@ -1809,6 +1828,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &rkl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &adl_uncore_init),
+       X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &spr_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &snr_uncore_init),
        {},
 };
@@ -1832,8 +1852,13 @@ static int __init intel_uncore_init(void)
                        uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
                else
                        return -ENODEV;
-       } else
+       } else {
                uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
+               if (uncore_no_discover && uncore_init->use_discovery)
+                       return -ENODEV;
+               if (uncore_init->use_discovery && !intel_uncore_has_discovery_tables())
+                       return -ENODEV;
+       }
 
        if (uncore_init->pci_init) {
                pret = uncore_init->pci_init();
index 187d728..b968798 100644 (file)
@@ -561,6 +561,7 @@ struct event_constraint *
 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
+void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
 
 extern struct intel_uncore_type *empty_uncore[];
 extern struct intel_uncore_type **uncore_msr_uncores;
@@ -608,6 +609,9 @@ void snr_uncore_mmio_init(void);
 int icx_uncore_pci_init(void);
 void icx_uncore_cpu_init(void);
 void icx_uncore_mmio_init(void);
+int spr_uncore_pci_init(void);
+void spr_uncore_cpu_init(void);
+void spr_uncore_mmio_init(void);
 
 /* uncore_nhmex.c */
 void nhmex_uncore_cpu_init(void);
index aba9bff..3049c64 100644 (file)
@@ -337,17 +337,17 @@ static const struct attribute_group generic_uncore_format_group = {
        .attrs = generic_uncore_formats_attr,
 };
 
-static void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
+void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
 {
        wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
 }
 
-static void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
 {
        wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
 }
 
-static void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
 {
        wrmsrl(uncore_msr_box_ctl(box), 0);
 }
@@ -377,7 +377,7 @@ static struct intel_uncore_ops generic_uncore_msr_ops = {
        .read_counter           = uncore_msr_read_counter,
 };
 
-static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
+void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
@@ -386,7 +386,7 @@ static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
        pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
 }
 
-static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
@@ -394,7 +394,7 @@ static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
        pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
 }
 
-static void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
@@ -411,8 +411,8 @@ static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
        pci_write_config_dword(pdev, hwc->config_base, hwc->config);
 }
 
-static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
-                                            struct perf_event *event)
+void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
+                                           struct perf_event *event)
 {
        struct pci_dev *pdev = box->pci_dev;
        struct hw_perf_event *hwc = &event->hw;
@@ -420,8 +420,8 @@ static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
        pci_write_config_dword(pdev, hwc->config_base, 0);
 }
 
-static u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
-                                          struct perf_event *event)
+u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
+                                         struct perf_event *event)
 {
        struct pci_dev *pdev = box->pci_dev;
        struct hw_perf_event *hwc = &event->hw;
@@ -454,7 +454,7 @@ static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
        return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
 }
 
-static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
+void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
 {
        unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
        struct intel_uncore_type *type = box->pmu->type;
@@ -478,7 +478,7 @@ static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
        writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
 }
 
-static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
 {
        if (!box->io_addr)
                return;
@@ -486,7 +486,7 @@ static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
        writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
 }
 
-static void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
 {
        if (!box->io_addr)
                return;
@@ -505,8 +505,8 @@ static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
        writel(hwc->config, box->io_addr + hwc->config_base);
 }
 
-static void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
-                                             struct perf_event *event)
+void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
+                                            struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
 
@@ -568,8 +568,8 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id,
        return true;
 }
 
-static struct intel_uncore_type **
-intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
+struct intel_uncore_type **
+intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
 {
        struct intel_uncore_discovery_type *type;
        struct intel_uncore_type **uncores;
@@ -577,7 +577,7 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
        struct rb_node *node;
        int i = 0;
 
-       uncores = kcalloc(num_discovered_types[type_id] + 1,
+       uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
                          sizeof(struct intel_uncore_type *), GFP_KERNEL);
        if (!uncores)
                return empty_uncore;
@@ -606,17 +606,17 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
 
 void intel_uncore_generic_uncore_cpu_init(void)
 {
-       uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR);
+       uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
 }
 
 int intel_uncore_generic_uncore_pci_init(void)
 {
-       uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI);
+       uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
 
        return 0;
 }
 
 void intel_uncore_generic_uncore_mmio_init(void)
 {
-       uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO);
+       uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
 }
index 1d65293..7280c8a 100644 (file)
@@ -129,3 +129,24 @@ void intel_uncore_clear_discovery_tables(void);
 void intel_uncore_generic_uncore_cpu_init(void);
 int intel_uncore_generic_uncore_pci_init(void);
 void intel_uncore_generic_uncore_mmio_init(void);
+
+void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box);
+void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box);
+
+void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box);
+void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
+                                            struct perf_event *event);
+
+void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box);
+void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
+                                           struct perf_event *event);
+u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
+                                         struct perf_event *event);
+
+struct intel_uncore_type **
+intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra);
index 609c24a..5ddc0f3 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /* SandyBridge-EP/IvyTown uncore support */
 #include "uncore.h"
+#include "uncore_discovery.h"
 
 /* SNB-EP pci bus to socket mapping */
 #define SNBEP_CPUNODEID                        0x40
 #define ICX_NUMBER_IMC_CHN                     2
 #define ICX_IMC_MEM_STRIDE                     0x4
 
+/* SPR */
+#define SPR_RAW_EVENT_MASK_EXT                 0xffffff
+
+/* SPR CHA */
+#define SPR_CHA_PMON_CTL_TID_EN                        (1 << 16)
+#define SPR_CHA_PMON_EVENT_MASK                        (SNBEP_PMON_RAW_EVENT_MASK | \
+                                                SPR_CHA_PMON_CTL_TID_EN)
+#define SPR_CHA_PMON_BOX_FILTER_TID            0x3ff
+
+#define SPR_C0_MSR_PMON_BOX_FILTER0            0x200e
+
 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
@@ -466,6 +478,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
@@ -3838,26 +3851,32 @@ clear_attr_update:
        return ret;
 }
 
-static int skx_iio_set_mapping(struct intel_uncore_type *type)
-{
-       return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
-}
-
-static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+static void
+pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
 {
-       struct attribute **attr = skx_iio_mapping_group.attrs;
+       struct attribute **attr = ag->attrs;
 
        if (!attr)
                return;
 
        for (; *attr; attr++)
                kfree((*attr)->name);
-       kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
-       kfree(skx_iio_mapping_group.attrs);
-       skx_iio_mapping_group.attrs = NULL;
+       kfree(attr_to_ext_attr(*ag->attrs));
+       kfree(ag->attrs);
+       ag->attrs = NULL;
        kfree(type->topology);
 }
 
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
+{
+       return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
+}
+
+static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
+}
+
 static struct intel_uncore_type skx_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -4501,6 +4520,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
        return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
 }
 
+static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
+}
+
 static struct intel_uncore_type snr_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -4517,7 +4541,7 @@ static struct intel_uncore_type snr_uncore_iio = {
        .attr_update            = snr_iio_attr_update,
        .get_topology           = snr_iio_get_topology,
        .set_mapping            = snr_iio_set_mapping,
-       .cleanup_mapping        = skx_iio_cleanup_mapping,
+       .cleanup_mapping        = snr_iio_cleanup_mapping,
 };
 
 static struct intel_uncore_type snr_uncore_irp = {
@@ -4783,13 +4807,15 @@ int snr_uncore_pci_init(void)
        return 0;
 }
 
-static struct pci_dev *snr_uncore_get_mc_dev(int id)
+#define SNR_MC_DEVICE_ID       0x3451
+
+static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
 {
        struct pci_dev *mc_dev = NULL;
        int pkg;
 
        while (1) {
-               mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
+               mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
                if (!mc_dev)
                        break;
                pkg = uncore_pcibus_to_dieid(mc_dev->bus);
@@ -4799,19 +4825,20 @@ static struct pci_dev *snr_uncore_get_mc_dev(int id)
        return mc_dev;
 }
 
-static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
-                                      unsigned int box_ctl, int mem_offset)
+static int snr_uncore_mmio_map(struct intel_uncore_box *box,
+                              unsigned int box_ctl, int mem_offset,
+                              unsigned int device)
 {
-       struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
+       struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
        struct intel_uncore_type *type = box->pmu->type;
        resource_size_t addr;
        u32 pci_dword;
 
        if (!pdev)
-               return;
+               return -ENODEV;
 
        pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
-       addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
+       addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
 
        pci_read_config_dword(pdev, mem_offset, &pci_dword);
        addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
@@ -4821,16 +4848,25 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
        box->io_addr = ioremap(addr, type->mmio_map_size);
        if (!box->io_addr) {
                pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
-               return;
+               return -EINVAL;
        }
 
-       writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
+       return 0;
+}
+
+static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
+                                      unsigned int box_ctl, int mem_offset,
+                                      unsigned int device)
+{
+       if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
+               writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
 }
 
 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
 {
        __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
-                                  SNR_IMC_MMIO_MEM0_OFFSET);
+                                  SNR_IMC_MMIO_MEM0_OFFSET,
+                                  SNR_MC_DEVICE_ID);
 }
 
 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
@@ -5092,6 +5128,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
        return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
 }
 
+static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
+}
+
 static struct intel_uncore_type icx_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -5109,7 +5150,7 @@ static struct intel_uncore_type icx_uncore_iio = {
        .attr_update            = icx_iio_attr_update,
        .get_topology           = icx_iio_get_topology,
        .set_mapping            = icx_iio_set_mapping,
-       .cleanup_mapping        = skx_iio_cleanup_mapping,
+       .cleanup_mapping        = icx_iio_cleanup_mapping,
 };
 
 static struct intel_uncore_type icx_uncore_irp = {
@@ -5405,7 +5446,8 @@ static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
        int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
                         SNR_IMC_MMIO_MEM0_OFFSET;
 
-       __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
+       __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
+                                  SNR_MC_DEVICE_ID);
 }
 
 static struct intel_uncore_ops icx_uncore_mmio_ops = {
@@ -5475,7 +5517,8 @@ static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
        int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
                         SNR_IMC_MMIO_MEM0_OFFSET;
 
-       __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
+       snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
+                           mem_offset, SNR_MC_DEVICE_ID);
 }
 
 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
@@ -5509,3 +5552,497 @@ void icx_uncore_mmio_init(void)
 }
 
 /* end of ICX uncore support */
+
+/* SPR uncore support */
+
+static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (reg1->idx != EXTRA_REG_NONE)
+               wrmsrl(reg1->reg, reg1->config);
+
+       wrmsrl(hwc->config_base, hwc->config);
+}
+
+static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                        struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (reg1->idx != EXTRA_REG_NONE)
+               wrmsrl(reg1->reg, 0);
+
+       wrmsrl(hwc->config_base, 0);
+}
+
+static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+       struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+       bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
+       struct intel_uncore_type *type = box->pmu->type;
+
+       if (tie_en) {
+               reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
+                           HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
+               reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
+               reg1->idx = 0;
+       }
+
+       return 0;
+}
+
+static struct intel_uncore_ops spr_uncore_chabox_ops = {
+       .init_box               = intel_generic_uncore_msr_init_box,
+       .disable_box            = intel_generic_uncore_msr_disable_box,
+       .enable_box             = intel_generic_uncore_msr_enable_box,
+       .disable_event          = spr_uncore_msr_disable_event,
+       .enable_event           = spr_uncore_msr_enable_event,
+       .read_counter           = uncore_msr_read_counter,
+       .hw_config              = spr_cha_hw_config,
+       .get_constraint         = uncore_get_constraint,
+       .put_constraint         = uncore_put_constraint,
+};
+
+static struct attribute *spr_uncore_cha_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask_ext4.attr,
+       &format_attr_tid_en2.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       &format_attr_filter_tid5.attr,
+       NULL,
+};
+static const struct attribute_group spr_uncore_chabox_format_group = {
+       .name = "format",
+       .attrs = spr_uncore_cha_formats_attr,
+};
+
+static ssize_t alias_show(struct device *dev,
+                         struct device_attribute *attr,
+                         char *buf)
+{
+       struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
+       char pmu_name[UNCORE_PMU_NAME_LEN];
+
+       uncore_get_alias_name(pmu_name, pmu);
+       return sysfs_emit(buf, "%s\n", pmu_name);
+}
+
+static DEVICE_ATTR_RO(alias);
+
+static struct attribute *uncore_alias_attrs[] = {
+       &dev_attr_alias.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(uncore_alias);
+
+static struct intel_uncore_type spr_uncore_chabox = {
+       .name                   = "cha",
+       .event_mask             = SPR_CHA_PMON_EVENT_MASK,
+       .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,
+       .num_shared_regs        = 1,
+       .ops                    = &spr_uncore_chabox_ops,
+       .format_group           = &spr_uncore_chabox_format_group,
+       .attr_update            = uncore_alias_groups,
+};
+
+static struct intel_uncore_type spr_uncore_iio = {
+       .name                   = "iio",
+       .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
+       .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
+       .format_group           = &snr_uncore_iio_format_group,
+       .attr_update            = uncore_alias_groups,
+};
+
+static struct attribute *spr_uncore_raw_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask_ext4.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       NULL,
+};
+
+static const struct attribute_group spr_uncore_raw_format_group = {
+       .name                   = "format",
+       .attrs                  = spr_uncore_raw_formats_attr,
+};
+
+#define SPR_UNCORE_COMMON_FORMAT()                             \
+       .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,    \
+       .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,       \
+       .format_group           = &spr_uncore_raw_format_group, \
+       .attr_update            = uncore_alias_groups
+
+static struct intel_uncore_type spr_uncore_irp = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "irp",
+
+};
+
+static struct intel_uncore_type spr_uncore_m2pcie = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "m2pcie",
+};
+
+static struct intel_uncore_type spr_uncore_pcu = {
+       .name                   = "pcu",
+       .attr_update            = uncore_alias_groups,
+};
+
+static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
+                                        struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!box->io_addr)
+               return;
+
+       if (uncore_pmc_fixed(hwc->idx))
+               writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
+       else
+               writel(hwc->config, box->io_addr + hwc->config_base);
+}
+
+static struct intel_uncore_ops spr_uncore_mmio_ops = {
+       .init_box               = intel_generic_uncore_mmio_init_box,
+       .exit_box               = uncore_mmio_exit_box,
+       .disable_box            = intel_generic_uncore_mmio_disable_box,
+       .enable_box             = intel_generic_uncore_mmio_enable_box,
+       .disable_event          = intel_generic_uncore_mmio_disable_event,
+       .enable_event           = spr_uncore_mmio_enable_event,
+       .read_counter           = uncore_mmio_read_counter,
+};
+
+static struct intel_uncore_type spr_uncore_imc = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "imc",
+       .fixed_ctr_bits         = 48,
+       .fixed_ctr              = SNR_IMC_MMIO_PMON_FIXED_CTR,
+       .fixed_ctl              = SNR_IMC_MMIO_PMON_FIXED_CTL,
+       .ops                    = &spr_uncore_mmio_ops,
+};
+
+static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+
+       pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
+       pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
+}
+
+static struct intel_uncore_ops spr_uncore_pci_ops = {
+       .init_box               = intel_generic_uncore_pci_init_box,
+       .disable_box            = intel_generic_uncore_pci_disable_box,
+       .enable_box             = intel_generic_uncore_pci_enable_box,
+       .disable_event          = intel_generic_uncore_pci_disable_event,
+       .enable_event           = spr_uncore_pci_enable_event,
+       .read_counter           = intel_generic_uncore_pci_read_counter,
+};
+
+#define SPR_UNCORE_PCI_COMMON_FORMAT()                 \
+       SPR_UNCORE_COMMON_FORMAT(),                     \
+       .ops                    = &spr_uncore_pci_ops
+
+static struct intel_uncore_type spr_uncore_m2m = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "m2m",
+};
+
+static struct intel_uncore_type spr_uncore_upi = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "upi",
+};
+
+static struct intel_uncore_type spr_uncore_m3upi = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "m3upi",
+};
+
+static struct intel_uncore_type spr_uncore_mdf = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "mdf",
+};
+
+#define UNCORE_SPR_NUM_UNCORE_TYPES            12
+#define UNCORE_SPR_IIO                         1
+#define UNCORE_SPR_IMC                         6
+
+static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
+       &spr_uncore_chabox,
+       &spr_uncore_iio,
+       &spr_uncore_irp,
+       &spr_uncore_m2pcie,
+       &spr_uncore_pcu,
+       NULL,
+       &spr_uncore_imc,
+       &spr_uncore_m2m,
+       &spr_uncore_upi,
+       &spr_uncore_m3upi,
+       NULL,
+       &spr_uncore_mdf,
+};
+
+enum perf_uncore_spr_iio_freerunning_type_id {
+       SPR_IIO_MSR_IOCLK,
+       SPR_IIO_MSR_BW_IN,
+       SPR_IIO_MSR_BW_OUT,
+
+       SPR_IIO_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters spr_iio_freerunning[] = {
+       [SPR_IIO_MSR_IOCLK]     = { 0x340e, 0x1, 0x10, 1, 48 },
+       [SPR_IIO_MSR_BW_IN]     = { 0x3800, 0x1, 0x10, 8, 48 },
+       [SPR_IIO_MSR_BW_OUT]    = { 0x3808, 0x1, 0x10, 8, 48 },
+};
+
+static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
+       /* Free-Running IIO CLOCKS Counter */
+       INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
+       /* Free-Running IIO BANDWIDTH IN Counters */
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
+       /* Free-Running IIO BANDWIDTH OUT Counters */
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x30"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x31"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x32"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x33"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4,           "event=0xff,umask=0x34"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5,           "event=0xff,umask=0x35"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6,           "event=0xff,umask=0x36"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7,           "event=0xff,umask=0x37"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,      "MiB"),
+       { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type spr_uncore_iio_free_running = {
+       .name                   = "iio_free_running",
+       .num_counters           = 17,
+       .num_freerunning_types  = SPR_IIO_FREERUNNING_TYPE_MAX,
+       .freerunning            = spr_iio_freerunning,
+       .ops                    = &skx_uncore_iio_freerunning_ops,
+       .event_descs            = spr_uncore_iio_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
+enum perf_uncore_spr_imc_freerunning_type_id {
+       SPR_IMC_DCLK,
+       SPR_IMC_PQ_CYCLES,
+
+       SPR_IMC_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters spr_imc_freerunning[] = {
+       [SPR_IMC_DCLK]          = { 0x22b0, 0x0, 0, 1, 48 },
+       [SPR_IMC_PQ_CYCLES]     = { 0x2318, 0x8, 0, 2, 48 },
+};
+
+static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
+       INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
+
+       INTEL_UNCORE_EVENT_DESC(rpq_cycles,             "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(wpq_cycles,             "event=0xff,umask=0x21"),
+       { /* end: all zeroes */ },
+};
+
+#define SPR_MC_DEVICE_ID       0x3251
+
+static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+{
+       int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
+
+       snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
+                           mem_offset, SPR_MC_DEVICE_ID);
+}
+
+static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
+       .init_box       = spr_uncore_imc_freerunning_init_box,
+       .exit_box       = uncore_mmio_exit_box,
+       .read_counter   = uncore_mmio_read_counter,
+       .hw_config      = uncore_freerunning_hw_config,
+};
+
+static struct intel_uncore_type spr_uncore_imc_free_running = {
+       .name                   = "imc_free_running",
+       .num_counters           = 3,
+       .mmio_map_size          = SNR_IMC_MMIO_SIZE,
+       .num_freerunning_types  = SPR_IMC_FREERUNNING_TYPE_MAX,
+       .freerunning            = spr_imc_freerunning,
+       .ops                    = &spr_uncore_imc_freerunning_ops,
+       .event_descs            = spr_uncore_imc_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
+#define UNCORE_SPR_MSR_EXTRA_UNCORES           1
+#define UNCORE_SPR_MMIO_EXTRA_UNCORES          1
+
+static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
+       &spr_uncore_iio_free_running,
+};
+
+static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
+       &spr_uncore_imc_free_running,
+};
+
+static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
+                                       struct intel_uncore_type *from_type)
+{
+       if (!to_type || !from_type)
+               return;
+
+       if (from_type->name)
+               to_type->name = from_type->name;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->event_mask)
+               to_type->event_mask = from_type->event_mask;
+       if (from_type->event_mask_ext)
+               to_type->event_mask_ext = from_type->event_mask_ext;
+       if (from_type->fixed_ctr)
+               to_type->fixed_ctr = from_type->fixed_ctr;
+       if (from_type->fixed_ctl)
+               to_type->fixed_ctl = from_type->fixed_ctl;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->num_shared_regs)
+               to_type->num_shared_regs = from_type->num_shared_regs;
+       if (from_type->constraints)
+               to_type->constraints = from_type->constraints;
+       if (from_type->ops)
+               to_type->ops = from_type->ops;
+       if (from_type->event_descs)
+               to_type->event_descs = from_type->event_descs;
+       if (from_type->format_group)
+               to_type->format_group = from_type->format_group;
+       if (from_type->attr_update)
+               to_type->attr_update = from_type->attr_update;
+}
+
+static struct intel_uncore_type **
+uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
+                   struct intel_uncore_type **extra)
+{
+       struct intel_uncore_type **types, **start_types;
+       int i;
+
+       start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
+
+       /* Only copy the customized features */
+       for (; *types; types++) {
+               if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
+                       continue;
+               uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
+       }
+
+       for (i = 0; i < num_extra; i++, types++)
+               *types = extra[i];
+
+       return start_types;
+}
+
+static struct intel_uncore_type *
+uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
+{
+       for (; *types; types++) {
+               if (type_id == (*types)->type_id)
+                       return *types;
+       }
+
+       return NULL;
+}
+
+static int uncore_type_max_boxes(struct intel_uncore_type **types,
+                                int type_id)
+{
+       struct intel_uncore_type *type;
+       int i, max = 0;
+
+       type = uncore_find_type_by_id(types, type_id);
+       if (!type)
+               return 0;
+
+       for (i = 0; i < type->num_boxes; i++) {
+               if (type->box_ids[i] > max)
+                       max = type->box_ids[i];
+       }
+
+       return max + 1;
+}
+
+void spr_uncore_cpu_init(void)
+{
+       uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
+                                               UNCORE_SPR_MSR_EXTRA_UNCORES,
+                                               spr_msr_uncores);
+
+       spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
+}
+
+int spr_uncore_pci_init(void)
+{
+       uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
+       return 0;
+}
+
+void spr_uncore_mmio_init(void)
+{
+       int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
+
+       if (ret)
+               uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
+       else {
+               uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
+                                                        UNCORE_SPR_MMIO_EXTRA_UNCORES,
+                                                        spr_mmio_uncores);
+
+               spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
+       }
+}
+
+/* end of SPR uncore support */
index 2bf1c7e..e3ac05c 100644 (file)
@@ -656,6 +656,10 @@ struct x86_hybrid_pmu {
        struct event_constraint         *event_constraints;
        struct event_constraint         *pebs_constraints;
        struct extra_reg                *extra_regs;
+
+       unsigned int                    late_ack        :1,
+                                       mid_ack         :1,
+                                       enabled_ack     :1;
 };
 
 static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
@@ -686,6 +690,16 @@ extern struct static_key_false perf_is_hybrid;
        __Fp;                                           \
 }))
 
+#define hybrid_bit(_pmu, _field)                       \
+({                                                     \
+       bool __Fp = x86_pmu._field;                     \
+                                                       \
+       if (is_hybrid() && (_pmu))                      \
+               __Fp = hybrid_pmu(_pmu)->_field;        \
+                                                       \
+       __Fp;                                           \
+})
+
 enum hybrid_pmu_type {
        hybrid_big              = 0x40,
        hybrid_small            = 0x20,
@@ -755,6 +769,7 @@ struct x86_pmu {
 
        /* PMI handler bits */
        unsigned int    late_ack                :1,
+                       mid_ack                 :1,
                        enabled_ack             :1;
        /*
         * sysfs attrs
@@ -1115,9 +1130,10 @@ void x86_pmu_stop(struct perf_event *event, int flags);
 
 static inline void x86_pmu_disable_event(struct perf_event *event)
 {
+       u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
        struct hw_perf_event *hwc = &event->hw;
 
-       wrmsrl(hwc->config_base, hwc->config);
+       wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
 
        if (is_counter_pair(hwc))
                wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
diff --git a/arch/x86/include/asm/amd-ibs.h b/arch/x86/include/asm/amd-ibs.h
new file mode 100644 (file)
index 0000000..46e1df4
--- /dev/null
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * From PPR Vol 1 for AMD Family 19h Model 01h B1
+ * 55898 Rev 0.35 - Feb 5, 2021
+ */
+
+#include <asm/msr-index.h>
+
+/*
+ * IBS Hardware MSRs
+ */
+
+/* MSR 0xc0011030: IBS Fetch Control */
+union ibs_fetch_ctl {
+       __u64 val;
+       struct {
+               __u64   fetch_maxcnt:16,/* 0-15: instruction fetch max. count */
+                       fetch_cnt:16,   /* 16-31: instruction fetch count */
+                       fetch_lat:16,   /* 32-47: instruction fetch latency */
+                       fetch_en:1,     /* 48: instruction fetch enable */
+                       fetch_val:1,    /* 49: instruction fetch valid */
+                       fetch_comp:1,   /* 50: instruction fetch complete */
+                       ic_miss:1,      /* 51: i-cache miss */
+                       phy_addr_valid:1,/* 52: physical address valid */
+                       l1tlb_pgsz:2,   /* 53-54: i-cache L1TLB page size
+                                        *        (needs IbsPhyAddrValid) */
+                       l1tlb_miss:1,   /* 55: i-cache fetch missed in L1TLB */
+                       l2tlb_miss:1,   /* 56: i-cache fetch missed in L2TLB */
+                       rand_en:1,      /* 57: random tagging enable */
+                       fetch_l2_miss:1,/* 58: L2 miss for sampled fetch
+                                        *      (needs IbsFetchComp) */
+                       reserved:5;     /* 59-63: reserved */
+       };
+};
+
+/* MSR 0xc0011033: IBS Execution Control */
+union ibs_op_ctl {
+       __u64 val;
+       struct {
+               __u64   opmaxcnt:16,    /* 0-15: periodic op max. count */
+                       reserved0:1,    /* 16: reserved */
+                       op_en:1,        /* 17: op sampling enable */
+                       op_val:1,       /* 18: op sample valid */
+                       cnt_ctl:1,      /* 19: periodic op counter control */
+                       opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
+                       reserved1:5,    /* 27-31: reserved */
+                       opcurcnt:27,    /* 32-58: periodic op counter current count */
+                       reserved2:5;    /* 59-63: reserved */
+       };
+};
+
+/* MSR 0xc0011035: IBS Op Data 2 */
+union ibs_op_data {
+       __u64 val;
+       struct {
+               __u64   comp_to_ret_ctr:16,     /* 0-15: op completion to retire count */
+                       tag_to_ret_ctr:16,      /* 15-31: op tag to retire count */
+                       reserved1:2,            /* 32-33: reserved */
+                       op_return:1,            /* 34: return op */
+                       op_brn_taken:1,         /* 35: taken branch op */
+                       op_brn_misp:1,          /* 36: mispredicted branch op */
+                       op_brn_ret:1,           /* 37: branch op retired */
+                       op_rip_invalid:1,       /* 38: RIP is invalid */
+                       op_brn_fuse:1,          /* 39: fused branch op */
+                       op_microcode:1,         /* 40: microcode op */
+                       reserved2:23;           /* 41-63: reserved */
+       };
+};
+
+/* MSR 0xc0011036: IBS Op Data 2 */
+union ibs_op_data2 {
+       __u64 val;
+       struct {
+               __u64   data_src:3,     /* 0-2: data source */
+                       reserved0:1,    /* 3: reserved */
+                       rmt_node:1,     /* 4: destination node */
+                       cache_hit_st:1, /* 5: cache hit state */
+                       reserved1:57;   /* 5-63: reserved */
+       };
+};
+
+/* MSR 0xc0011037: IBS Op Data 3 */
+union ibs_op_data3 {
+       __u64 val;
+       struct {
+               __u64   ld_op:1,                        /* 0: load op */
+                       st_op:1,                        /* 1: store op */
+                       dc_l1tlb_miss:1,                /* 2: data cache L1TLB miss */
+                       dc_l2tlb_miss:1,                /* 3: data cache L2TLB hit in 2M page */
+                       dc_l1tlb_hit_2m:1,              /* 4: data cache L1TLB hit in 2M page */
+                       dc_l1tlb_hit_1g:1,              /* 5: data cache L1TLB hit in 1G page */
+                       dc_l2tlb_hit_2m:1,              /* 6: data cache L2TLB hit in 2M page */
+                       dc_miss:1,                      /* 7: data cache miss */
+                       dc_mis_acc:1,                   /* 8: misaligned access */
+                       reserved:4,                     /* 9-12: reserved */
+                       dc_wc_mem_acc:1,                /* 13: write combining memory access */
+                       dc_uc_mem_acc:1,                /* 14: uncacheable memory access */
+                       dc_locked_op:1,                 /* 15: locked operation */
+                       dc_miss_no_mab_alloc:1,         /* 16: DC miss with no MAB allocated */
+                       dc_lin_addr_valid:1,            /* 17: data cache linear address valid */
+                       dc_phy_addr_valid:1,            /* 18: data cache physical address valid */
+                       dc_l2_tlb_hit_1g:1,             /* 19: data cache L2 hit in 1GB page */
+                       l2_miss:1,                      /* 20: L2 cache miss */
+                       sw_pf:1,                        /* 21: software prefetch */
+                       op_mem_width:4,                 /* 22-25: load/store size in bytes */
+                       op_dc_miss_open_mem_reqs:6,     /* 26-31: outstanding mem reqs on DC fill */
+                       dc_miss_lat:16,                 /* 32-47: data cache miss latency */
+                       tlb_refill_lat:16;              /* 48-63: L1 TLB refill latency */
+       };
+};
+
+/* MSR 0xc001103c: IBS Fetch Control Extended */
+union ic_ibs_extd_ctl {
+       __u64 val;
+       struct {
+               __u64   itlb_refill_lat:16,     /* 0-15: ITLB Refill latency for sampled fetch */
+                       reserved:48;            /* 16-63: reserved */
+       };
+};
+
+/*
+ * IBS driver related
+ */
+
+struct perf_ibs_data {
+       u32             size;
+       union {
+               u32     data[0];        /* data buffer starts here */
+               u32     caps;
+       };
+       u64             regs[MSR_AMD64_IBS_REG_COUNT_MAX];
+};
index 89789e8..637fa1d 100644 (file)
@@ -19,6 +19,8 @@ extern unsigned int cached_irq_mask;
 #define PIC_MASTER_OCW3                PIC_MASTER_ISR
 #define PIC_SLAVE_CMD          0xa0
 #define PIC_SLAVE_IMR          0xa1
+#define PIC_ELCR1              0x4d0
+#define PIC_ELCR2              0x4d1
 
 /* i8259A PIC related value */
 #define PIC_CASCADE_IR         2
index 05b48b3..ff5c713 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _ASM_X86_KFENCE_H
 #define _ASM_X86_KFENCE_H
 
+#ifndef MODULE
+
 #include <linux/bug.h>
 #include <linux/kfence.h>
 
@@ -66,4 +68,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
        return true;
 }
 
+#endif /* !MODULE */
+
 #endif /* _ASM_X86_KFENCE_H */
index 974cbfb..af6ce8d 100644 (file)
@@ -1038,6 +1038,13 @@ struct kvm_arch {
        struct list_head lpage_disallowed_mmu_pages;
        struct kvm_page_track_notifier_node mmu_sp_tracker;
        struct kvm_page_track_notifier_head track_notifier_head;
+       /*
+        * Protects marking pages unsync during page faults, as TDP MMU page
+        * faults only take mmu_lock for read.  For simplicity, the unsync
+        * pages lock is always taken when marking pages unsync regardless of
+        * whether mmu_lock is held for read or write.
+        */
+       spinlock_t mmu_unsync_pages_lock;
 
        struct list_head assigned_dev_head;
        struct iommu_domain *iommu_domain;
index 0607ec4..da93215 100644 (file)
@@ -265,6 +265,7 @@ enum mcp_flags {
        MCP_TIMESTAMP   = BIT(0),       /* log time stamp */
        MCP_UC          = BIT(1),       /* log uncorrected errors */
        MCP_DONTLOG     = BIT(2),       /* only clear, don't log */
+       MCP_QUEUE_LOG   = BIT(3),       /* only queue to genpool */
 };
 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
 
index 3ad8c6d..ec2d5c8 100644 (file)
@@ -252,6 +252,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
 
+DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+
 #include <asm/segment.h>
 
 /**
diff --git a/arch/x86/include/asm/pc-conf-reg.h b/arch/x86/include/asm/pc-conf-reg.h
new file mode 100644 (file)
index 0000000..56bcece
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for the configuration register space at port I/O locations
+ * 0x22 and 0x23 variously used by PC architectures, e.g. the MP Spec,
+ * Cyrix CPUs, numerous chipsets.
+ */
+#ifndef _ASM_X86_PC_CONF_REG_H
+#define _ASM_X86_PC_CONF_REG_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define PC_CONF_INDEX          0x22
+#define PC_CONF_DATA           0x23
+
+#define PC_CONF_MPS_IMCR       0x70
+
+extern raw_spinlock_t pc_conf_lock;
+
+static inline u8 pc_conf_get(u8 reg)
+{
+       outb(reg, PC_CONF_INDEX);
+       return inb(PC_CONF_DATA);
+}
+
+static inline void pc_conf_set(u8 reg, u8 data)
+{
+       outb(reg, PC_CONF_INDEX);
+       outb(data, PC_CONF_DATA);
+}
+
+#endif /* _ASM_X86_PC_CONF_REG_H */
index df700a6..efe3e46 100644 (file)
@@ -5,14 +5,14 @@
  * Access order is always 0x22 (=offset), 0x23 (=value)
  */
 
+#include <asm/pc-conf-reg.h>
+
 static inline u8 getCx86(u8 reg)
 {
-       outb(reg, 0x22);
-       return inb(0x23);
+       return pc_conf_get(reg);
 }
 
 static inline void setCx86(u8 reg, u8 data)
 {
-       outb(reg, 0x22);
-       outb(data, 0x23);
+       pc_conf_set(reg, data);
 }
index f3020c5..9ad2aca 100644 (file)
@@ -136,6 +136,8 @@ struct cpuinfo_x86 {
        u16                     logical_die_id;
        /* Index into per_cpu list: */
        u16                     cpu_index;
+       /*  Is SMT active on this core? */
+       bool                    smt_active;
        u32                     microcode;
        /* Address space bits used by the cache internally */
        u8                      x86_cache_bits;
@@ -795,6 +797,8 @@ extern int set_tsc_mode(unsigned int val);
 
 DECLARE_PER_CPU(u64, msr_misc_features_shadow);
 
+extern u16 get_llc_id(unsigned int cpu);
+
 #ifdef CONFIG_CPU_SUP_AMD
 extern u32 amd_get_nodes_per_socket(void);
 extern u32 amd_get_highest_perf(void);
index e322676..b00dbc5 100644 (file)
@@ -184,6 +184,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define V_IGN_TPR_SHIFT 20
 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
 
+#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
+
 #define V_INTR_MASKING_SHIFT 24
 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
 
index de406d9..cf13266 100644 (file)
@@ -81,7 +81,7 @@ struct thread_info {
 #define TIF_SINGLESTEP         4       /* reenable singlestep on user return*/
 #define TIF_SSBD               5       /* Speculative store bypass disable */
 #define TIF_SPEC_IB            9       /* Indirect branch speculation mitigation */
-#define TIF_SPEC_FORCE_UPDATE  10      /* Force speculation MSR update in context switch */
+#define TIF_SPEC_L1D_FLUSH     10      /* Flush L1D on mm switches (processes) */
 #define TIF_USER_RETURN_NOTIFY 11      /* notify kernel of userspace return */
 #define TIF_UPROBE             12      /* breakpointed or singlestepping */
 #define TIF_PATCH_PENDING      13      /* pending live patching update */
@@ -93,6 +93,7 @@ struct thread_info {
 #define TIF_MEMDIE             20      /* is terminating due to OOM killer */
 #define TIF_POLLING_NRFLAG     21      /* idle is polling for TIF_NEED_RESCHED */
 #define TIF_IO_BITMAP          22      /* uses I/O bitmap */
+#define TIF_SPEC_FORCE_UPDATE  23      /* Force speculation MSR update in context switch */
 #define TIF_FORCED_TF          24      /* true if TF in eflags artificially */
 #define TIF_BLOCKSTEP          25      /* set when we want DEBUGCTLMSR_BTF */
 #define TIF_LAZY_MMU_UPDATES   27      /* task is updating the mmu lazily */
@@ -104,7 +105,7 @@ struct thread_info {
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_SSBD              (1 << TIF_SSBD)
 #define _TIF_SPEC_IB           (1 << TIF_SPEC_IB)
-#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
+#define _TIF_SPEC_L1D_FLUSH    (1 << TIF_SPEC_L1D_FLUSH)
 #define _TIF_USER_RETURN_NOTIFY        (1 << TIF_USER_RETURN_NOTIFY)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
 #define _TIF_PATCH_PENDING     (1 << TIF_PATCH_PENDING)
@@ -115,6 +116,7 @@ struct thread_info {
 #define _TIF_SLD               (1 << TIF_SLD)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_IO_BITMAP         (1 << TIF_IO_BITMAP)
+#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
 #define _TIF_FORCED_TF         (1 << TIF_FORCED_TF)
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES  (1 << TIF_LAZY_MMU_UPDATES)
index fa952ea..b587a9e 100644 (file)
@@ -83,7 +83,7 @@ struct tlb_state {
        /* Last user mm for optimizing IBPB */
        union {
                struct mm_struct        *last_user_mm;
-               unsigned long           last_user_mm_ibpb;
+               unsigned long           last_user_mm_spec;
        };
 
        u16 loaded_mm_asid;
index e55e0c1..14bcd59 100644 (file)
@@ -558,10 +558,10 @@ acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end
  * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
  * it may require Edge Trigger -- use "acpi_sci=edge"
  *
- * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
+ * Port 0x4d0-4d1 are ELCR1 and ELCR2, the Edge/Level Control Registers
  * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
- * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
- * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
+ * ELCR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
+ * ELCR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
  */
 
 void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
@@ -570,7 +570,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
        unsigned int old, new;
 
        /* Real old ELCR mask */
-       old = inb(0x4d0) | (inb(0x4d1) << 8);
+       old = inb(PIC_ELCR1) | (inb(PIC_ELCR2) << 8);
 
        /*
         * If we use ACPI to set PCI IRQs, then we should clear ELCR
@@ -596,8 +596,8 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
                return;
 
        pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
-       outb(new, 0x4d0);
-       outb(new >> 8, 0x4d1);
+       outb(new, PIC_ELCR1);
+       outb(new >> 8, PIC_ELCR2);
 }
 
 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
index d262811..b70344b 100644 (file)
@@ -38,6 +38,7 @@
 
 #include <asm/trace/irq_vectors.h>
 #include <asm/irq_remapping.h>
+#include <asm/pc-conf-reg.h>
 #include <asm/perf_event.h>
 #include <asm/x86_init.h>
 #include <linux/atomic.h>
@@ -132,18 +133,14 @@ static int enabled_via_apicbase __ro_after_init;
  */
 static inline void imcr_pic_to_apic(void)
 {
-       /* select IMCR register */
-       outb(0x70, 0x22);
        /* NMI and 8259 INTR go through APIC */
-       outb(0x01, 0x23);
+       pc_conf_set(PC_CONF_MPS_IMCR, 0x01);
 }
 
 static inline void imcr_apic_to_pic(void)
 {
-       /* select IMCR register */
-       outb(0x70, 0x22);
        /* NMI and 8259 INTR go directly to BSP */
-       outb(0x00, 0x23);
+       pc_conf_set(PC_CONF_MPS_IMCR, 0x00);
 }
 #endif
 
index d5c691a..c1bb384 100644 (file)
@@ -764,7 +764,7 @@ static bool irq_active_low(int idx)
 static bool EISA_ELCR(unsigned int irq)
 {
        if (irq < nr_legacy_irqs()) {
-               unsigned int port = 0x4d0 + (irq >> 3);
+               unsigned int port = PIC_ELCR1 + (irq >> 3);
                return (inb(port) >> (irq & 7)) & 1;
        }
        apic_printk(APIC_VERBOSE, KERN_INFO
@@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static struct irq_chip ioapic_ir_chip __read_mostly = {
@@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static inline void init_IO_APIC_traps(void)
index 44ebe25..dbacb9e 100644 (file)
@@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
         *   The quirk bit is not set in this case.
         * - The new vector is the same as the old vector
         * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+        * - The interrupt is not yet started up
         * - The new destination CPU is the same as the old destination CPU
         */
        if (!irqd_msi_nomask_quirk(irqd) ||
            cfg->vector == old_cfg.vector ||
            old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+           !irqd_is_started(irqd) ||
            cfg->dest_apicid == old_cfg.dest_apicid) {
                irq_msi_update_msg(irqd, cfg);
                return ret;
@@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_affinity       = msi_set_affinity,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
@@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = {
        .irq_mask               = pci_msi_mask_irq,
        .irq_ack                = irq_chip_ack_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static struct msi_domain_info pci_msi_ir_domain_info = {
@@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = {
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_compose_msi_msg    = dmar_msi_compose_msg,
        .irq_write_msi_msg      = dmar_msi_write_msg,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static int dmar_msi_init(struct irq_domain *domain,
index fb67ed5..c132daa 100644 (file)
@@ -1299,7 +1299,7 @@ static void __init print_PIC(void)
 
        pr_debug("... PIC  ISR: %04x\n", v);
 
-       v = inb(0x4d1) << 8 | inb(0x4d0);
+       v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1);
        pr_debug("... PIC ELCR: %04x\n", v);
 }
 
index b7c0030..2131af9 100644 (file)
@@ -438,7 +438,7 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
 
        node = numa_cpu_node(cpu);
        if (node == NUMA_NO_NODE)
-               node = per_cpu(cpu_llc_id, cpu);
+               node = get_llc_id(cpu);
 
        /*
         * On multi-fabric platform (e.g. Numascale NumaChip) a
index d41b70f..ecfca3b 100644 (file)
@@ -43,6 +43,7 @@ static void __init mds_select_mitigation(void);
 static void __init mds_print_mitigation(void);
 static void __init taa_select_mitigation(void);
 static void __init srbds_select_mitigation(void);
+static void __init l1d_flush_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
 u64 x86_spec_ctrl_base;
@@ -76,6 +77,13 @@ EXPORT_SYMBOL_GPL(mds_user_clear);
 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
 EXPORT_SYMBOL_GPL(mds_idle_clear);
 
+/*
+ * Controls whether l1d flush based mitigations are enabled,
+ * based on hw features and admin setting via boot parameter
+ * defaults to false
+ */
+DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -111,6 +119,7 @@ void __init check_bugs(void)
        mds_select_mitigation();
        taa_select_mitigation();
        srbds_select_mitigation();
+       l1d_flush_select_mitigation();
 
        /*
         * As MDS and TAA mitigations are inter-related, print MDS
@@ -491,6 +500,34 @@ static int __init srbds_parse_cmdline(char *str)
 }
 early_param("srbds", srbds_parse_cmdline);
 
+#undef pr_fmt
+#define pr_fmt(fmt)     "L1D Flush : " fmt
+
+enum l1d_flush_mitigations {
+       L1D_FLUSH_OFF = 0,
+       L1D_FLUSH_ON,
+};
+
+static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
+
+static void __init l1d_flush_select_mitigation(void)
+{
+       if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
+               return;
+
+       static_branch_enable(&switch_mm_cond_l1d_flush);
+       pr_info("Conditional flush on switch_mm() enabled\n");
+}
+
+static int __init l1d_flush_parse_cmdline(char *str)
+{
+       if (!strcmp(str, "on"))
+               l1d_flush_mitigation = L1D_FLUSH_ON;
+
+       return 0;
+}
+early_param("l1d_flush", l1d_flush_parse_cmdline);
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V1 : " fmt
 
@@ -1215,6 +1252,24 @@ static void task_update_spec_tif(struct task_struct *tsk)
                speculation_ctrl_update_current();
 }
 
+static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+
+       if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
+               return -EPERM;
+
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
+               return 0;
+       case PR_SPEC_DISABLE:
+               clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
+               return 0;
+       default:
+               return -ERANGE;
+       }
+}
+
 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
 {
        if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
@@ -1324,6 +1379,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
                return ssb_prctl_set(task, ctrl);
        case PR_SPEC_INDIRECT_BRANCH:
                return ib_prctl_set(task, ctrl);
+       case PR_SPEC_L1D_FLUSH:
+               return l1d_flush_prctl_set(task, ctrl);
        default:
                return -ENODEV;
        }
@@ -1340,6 +1397,17 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
 }
 #endif
 
+static int l1d_flush_prctl_get(struct task_struct *task)
+{
+       if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
+               return PR_SPEC_FORCE_DISABLE;
+
+       if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+       else
+               return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+}
+
 static int ssb_prctl_get(struct task_struct *task)
 {
        switch (ssb_mode) {
@@ -1390,6 +1458,8 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
                return ssb_prctl_get(task);
        case PR_SPEC_INDIRECT_BRANCH:
                return ib_prctl_get(task);
+       case PR_SPEC_L1D_FLUSH:
+               return l1d_flush_prctl_get(task);
        default:
                return -ENODEV;
        }
index 64b805b..0f88859 100644 (file)
@@ -79,6 +79,12 @@ EXPORT_SYMBOL(smp_num_siblings);
 /* Last level cache ID of each logical CPU */
 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
 
+u16 get_llc_id(unsigned int cpu)
+{
+       return per_cpu(cpu_llc_id, cpu);
+}
+EXPORT_SYMBOL_GPL(get_llc_id);
+
 /* correctly size the local cpu masks */
 void __init setup_cpu_local_masks(void)
 {
index 22791aa..8cb7816 100644 (file)
@@ -817,7 +817,10 @@ log_it:
                if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
                        goto clear_it;
 
-               mce_log(&m);
+               if (flags & MCP_QUEUE_LOG)
+                       mce_gen_pool_add(&m);
+               else
+                       mce_log(&m);
 
 clear_it:
                /*
@@ -1639,10 +1642,12 @@ static void __mcheck_cpu_init_generic(void)
                m_fl = MCP_DONTLOG;
 
        /*
-        * Log the machine checks left over from the previous reset.
+        * Log the machine checks left over from the previous reset. Log them
+        * only, do not start processing them. That will happen in mcheck_late_init()
+        * when all consumers have been registered on the notifier chain.
         */
        bitmap_fill(all_banks, MAX_NR_BANKS);
-       machine_check_poll(MCP_UC | m_fl, &all_banks);
+       machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
 
        cr4_set_bits(X86_CR4_MCE);
 
index 4e86d97..0bfc140 100644 (file)
@@ -235,7 +235,7 @@ static void __maybe_unused raise_mce(struct mce *m)
                unsigned long start;
                int cpu;
 
-               get_online_cpus();
+               cpus_read_lock();
                cpumask_copy(mce_inject_cpumask, cpu_online_mask);
                cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
                for_each_online_cpu(cpu) {
@@ -269,7 +269,7 @@ static void __maybe_unused raise_mce(struct mce *m)
                }
                raise_local();
                put_cpu();
-               put_online_cpus();
+               cpus_read_unlock();
        } else {
                preempt_disable();
                raise_local();
@@ -529,7 +529,7 @@ static void do_inject(void)
                cpu = get_nbc_for_node(topology_die_id(cpu));
        }
 
-       get_online_cpus();
+       cpus_read_lock();
        if (!cpu_online(cpu))
                goto err;
 
@@ -553,7 +553,7 @@ static void do_inject(void)
        }
 
 err:
-       put_online_cpus();
+       cpus_read_unlock();
 
 }
 
index 6a6318e..efb69be 100644 (file)
@@ -55,7 +55,7 @@ LIST_HEAD(microcode_cache);
  * All non cpu-hotplug-callback call sites use:
  *
  * - microcode_mutex to synchronize with each other;
- * - get/put_online_cpus() to synchronize with
+ * - cpus_read_lock/unlock() to synchronize with
  *   the cpu-hotplug-callback call sites.
  *
  * We guarantee that only a single cpu is being
@@ -431,7 +431,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
                return ret;
        }
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&microcode_mutex);
 
        if (do_microcode_update(buf, len) == 0)
@@ -441,7 +441,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
                perf_check_microcode();
 
        mutex_unlock(&microcode_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return ret;
 }
@@ -629,7 +629,7 @@ static ssize_t reload_store(struct device *dev,
        if (val != 1)
                return size;
 
-       get_online_cpus();
+       cpus_read_lock();
 
        ret = check_online_cpus();
        if (ret)
@@ -644,7 +644,7 @@ static ssize_t reload_store(struct device *dev,
        mutex_unlock(&microcode_mutex);
 
 put:
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (ret == 0)
                ret = size;
@@ -853,14 +853,14 @@ static int __init microcode_init(void)
        if (IS_ERR(microcode_pdev))
                return PTR_ERR(microcode_pdev);
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&microcode_mutex);
 
        error = subsys_interface_register(&mc_cpu_interface);
        if (!error)
                perf_check_microcode();
        mutex_unlock(&microcode_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (error)
                goto out_pdev;
@@ -892,13 +892,13 @@ static int __init microcode_init(void)
                           &cpu_root_microcode_group);
 
  out_driver:
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&microcode_mutex);
 
        subsys_interface_unregister(&mc_cpu_interface);
 
        mutex_unlock(&microcode_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
  out_pdev:
        platform_device_unregister(microcode_pdev);
index a76694b..2746cac 100644 (file)
@@ -336,7 +336,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
        replace = -1;
 
        /* No CPU hotplug when we change MTRR entries */
-       get_online_cpus();
+       cpus_read_lock();
 
        /* Search for existing MTRR  */
        mutex_lock(&mtrr_mutex);
@@ -398,7 +398,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
        error = i;
  out:
        mutex_unlock(&mtrr_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return error;
 }
 
@@ -485,7 +485,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 
        max = num_var_ranges;
        /* No CPU hotplug when we change MTRR entries */
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&mtrr_mutex);
        if (reg < 0) {
                /*  Search for existing MTRR  */
@@ -520,7 +520,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
        error = reg;
  out:
        mutex_unlock(&mtrr_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return error;
 }
 
index 23001ae..4b8813b 100644 (file)
@@ -57,128 +57,57 @@ static void
 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
              struct rdt_resource *r);
 
-#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
 
-struct rdt_resource rdt_resources_all[] = {
+struct rdt_hw_resource rdt_resources_all[] = {
        [RDT_RESOURCE_L3] =
        {
-               .rid                    = RDT_RESOURCE_L3,
-               .name                   = "L3",
-               .domains                = domain_init(RDT_RESOURCE_L3),
-               .msr_base               = MSR_IA32_L3_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 1,
-                       .cbm_idx_offset = 0,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L3DATA] =
-       {
-               .rid                    = RDT_RESOURCE_L3DATA,
-               .name                   = "L3DATA",
-               .domains                = domain_init(RDT_RESOURCE_L3DATA),
-               .msr_base               = MSR_IA32_L3_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 0,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_L3,
+                       .name                   = "L3",
+                       .cache_level            = 3,
+                       .cache = {
+                               .min_cbm_bits   = 1,
+                       },
+                       .domains                = domain_init(RDT_RESOURCE_L3),
+                       .parse_ctrlval          = parse_cbm,
+                       .format_str             = "%d=%0*x",
+                       .fflags                 = RFTYPE_RES_CACHE,
                },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L3CODE] =
-       {
-               .rid                    = RDT_RESOURCE_L3CODE,
-               .name                   = "L3CODE",
-               .domains                = domain_init(RDT_RESOURCE_L3CODE),
                .msr_base               = MSR_IA32_L3_CBM_BASE,
                .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 1,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
        },
        [RDT_RESOURCE_L2] =
        {
-               .rid                    = RDT_RESOURCE_L2,
-               .name                   = "L2",
-               .domains                = domain_init(RDT_RESOURCE_L2),
-               .msr_base               = MSR_IA32_L2_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 1,
-                       .cbm_idx_offset = 0,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_L2,
+                       .name                   = "L2",
+                       .cache_level            = 2,
+                       .cache = {
+                               .min_cbm_bits   = 1,
+                       },
+                       .domains                = domain_init(RDT_RESOURCE_L2),
+                       .parse_ctrlval          = parse_cbm,
+                       .format_str             = "%d=%0*x",
+                       .fflags                 = RFTYPE_RES_CACHE,
                },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L2DATA] =
-       {
-               .rid                    = RDT_RESOURCE_L2DATA,
-               .name                   = "L2DATA",
-               .domains                = domain_init(RDT_RESOURCE_L2DATA),
                .msr_base               = MSR_IA32_L2_CBM_BASE,
                .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 0,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L2CODE] =
-       {
-               .rid                    = RDT_RESOURCE_L2CODE,
-               .name                   = "L2CODE",
-               .domains                = domain_init(RDT_RESOURCE_L2CODE),
-               .msr_base               = MSR_IA32_L2_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 1,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
        },
        [RDT_RESOURCE_MBA] =
        {
-               .rid                    = RDT_RESOURCE_MBA,
-               .name                   = "MB",
-               .domains                = domain_init(RDT_RESOURCE_MBA),
-               .cache_level            = 3,
-               .parse_ctrlval          = parse_bw,
-               .format_str             = "%d=%*u",
-               .fflags                 = RFTYPE_RES_MB,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_MBA,
+                       .name                   = "MB",
+                       .cache_level            = 3,
+                       .domains                = domain_init(RDT_RESOURCE_MBA),
+                       .parse_ctrlval          = parse_bw,
+                       .format_str             = "%d=%*u",
+                       .fflags                 = RFTYPE_RES_MB,
+               },
        },
 };
 
-static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
-{
-       return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
-}
-
 /*
  * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
  * as they do not have CPUID enumeration support for Cache allocation.
@@ -199,7 +128,8 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
  */
 static inline void cache_alloc_hsw_probe(void)
 {
-       struct rdt_resource *r  = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_resource *r  = &hw_res->r_resctrl;
        u32 l, h, max_cbm = BIT_MASK(20) - 1;
 
        if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
@@ -211,7 +141,7 @@ static inline void cache_alloc_hsw_probe(void)
        if (l != max_cbm)
                return;
 
-       r->num_closid = 4;
+       hw_res->num_closid = 4;
        r->default_ctrl = max_cbm;
        r->cache.cbm_len = 20;
        r->cache.shareable_bits = 0xc0000;
@@ -225,7 +155,7 @@ static inline void cache_alloc_hsw_probe(void)
 bool is_mba_sc(struct rdt_resource *r)
 {
        if (!r)
-               return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
+               return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
 
        return r->membw.mba_sc;
 }
@@ -253,12 +183,13 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
 
 static bool __get_mem_config_intel(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_3_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx, max_delay;
 
        cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        max_delay = eax.split.max_delay + 1;
        r->default_ctrl = MAX_MBA_BW;
        r->membw.arch_needs_linear = true;
@@ -287,12 +218,13 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
 
 static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_3_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx;
 
        cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        r->default_ctrl = MAX_MBA_BW_AMD;
 
        /* AMD does not use delay */
@@ -317,12 +249,13 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
 
 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_1_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx;
 
        cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        r->cache.cbm_len = eax.split.cbm_len + 1;
        r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
        r->cache.shareable_bits = ebx & r->default_ctrl;
@@ -331,43 +264,35 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
        r->alloc_enabled = true;
 }
 
-static void rdt_get_cdp_config(int level, int type)
+static void rdt_get_cdp_config(int level)
 {
-       struct rdt_resource *r_l = &rdt_resources_all[level];
-       struct rdt_resource *r = &rdt_resources_all[type];
-
-       r->num_closid = r_l->num_closid / 2;
-       r->cache.cbm_len = r_l->cache.cbm_len;
-       r->default_ctrl = r_l->default_ctrl;
-       r->cache.shareable_bits = r_l->cache.shareable_bits;
-       r->data_width = (r->cache.cbm_len + 3) / 4;
-       r->alloc_capable = true;
        /*
         * By default, CDP is disabled. CDP can be enabled by mount parameter
         * "cdp" during resctrl file system mount time.
         */
-       r->alloc_enabled = false;
+       rdt_resources_all[level].cdp_enabled = false;
+       rdt_resources_all[level].r_resctrl.cdp_capable = true;
 }
 
 static void rdt_get_cdp_l3_config(void)
 {
-       rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
-       rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
+       rdt_get_cdp_config(RDT_RESOURCE_L3);
 }
 
 static void rdt_get_cdp_l2_config(void)
 {
-       rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
-       rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
+       rdt_get_cdp_config(RDT_RESOURCE_L2);
 }
 
 static void
 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + i, d->ctrl_val[i]);
+               wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
 }
 
 /*
@@ -389,19 +314,23 @@ mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
                struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        /*  Write the delay values for mba. */
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
+               wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
 }
 
 static void
 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
+               wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
 }
 
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
@@ -417,16 +346,22 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
        return NULL;
 }
 
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
+{
+       return resctrl_to_arch_res(r)->num_closid;
+}
+
 void rdt_ctrl_update(void *arg)
 {
        struct msr_param *m = arg;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
        struct rdt_resource *r = m->res;
        int cpu = smp_processor_id();
        struct rdt_domain *d;
 
        d = get_domain_from_cpu(cpu, r);
        if (d) {
-               r->msr_update(d, m, r);
+               hw_res->msr_update(d, m, r);
                return;
        }
        pr_warn_once("cpu %d not found in any domain for resource %s\n",
@@ -468,6 +403,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
 
 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        int i;
 
        /*
@@ -476,7 +412,7 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
         * For Memory Allocation: Set b/w requested to 100%
         * and the bandwidth in MBps to U32_MAX
         */
-       for (i = 0; i < r->num_closid; i++, dc++, dm++) {
+       for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
                *dc = r->default_ctrl;
                *dm = MBA_MAX_MBPS;
        }
@@ -484,26 +420,30 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
 
 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
        struct msr_param m;
        u32 *dc, *dm;
 
-       dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
+       dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
+                          GFP_KERNEL);
        if (!dc)
                return -ENOMEM;
 
-       dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
+       dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
+                          GFP_KERNEL);
        if (!dm) {
                kfree(dc);
                return -ENOMEM;
        }
 
-       d->ctrl_val = dc;
-       d->mbps_val = dm;
+       hw_dom->ctrl_val = dc;
+       hw_dom->mbps_val = dm;
        setup_default_ctrlval(r, dc, dm);
 
        m.low = 0;
-       m.high = r->num_closid;
-       r->msr_update(d, &m, r);
+       m.high = hw_res->num_closid;
+       hw_res->msr_update(d, &m, r);
        return 0;
 }
 
@@ -560,6 +500,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 {
        int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
        struct list_head *add_pos = NULL;
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        d = rdt_find_domain(r, id, &add_pos);
@@ -575,10 +516,11 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
                return;
        }
 
-       d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
-       if (!d)
+       hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
+       if (!hw_dom)
                return;
 
+       d = &hw_dom->d_resctrl;
        d->id = id;
        cpumask_set_cpu(cpu, &d->cpu_mask);
 
@@ -607,6 +549,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 {
        int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        d = rdt_find_domain(r, id, NULL);
@@ -614,6 +557,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                pr_warn("Couldn't find cache id for CPU %d\n", cpu);
                return;
        }
+       hw_dom = resctrl_to_arch_dom(d);
 
        cpumask_clear_cpu(cpu, &d->cpu_mask);
        if (cpumask_empty(&d->cpu_mask)) {
@@ -646,16 +590,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                if (d->plr)
                        d->plr->d = NULL;
 
-               kfree(d->ctrl_val);
-               kfree(d->mbps_val);
+               kfree(hw_dom->ctrl_val);
+               kfree(hw_dom->mbps_val);
                bitmap_free(d->rmid_busy_llc);
                kfree(d->mbm_total);
                kfree(d->mbm_local);
-               kfree(d);
+               kfree(hw_dom);
                return;
        }
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
+       if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
                if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
                        cancel_delayed_work(&d->mbm_over);
                        mbm_setup_overflow_handler(d, 0);
@@ -732,13 +676,8 @@ static int resctrl_offline_cpu(unsigned int cpu)
 static __init void rdt_init_padding(void)
 {
        struct rdt_resource *r;
-       int cl;
 
        for_each_alloc_capable_rdt_resource(r) {
-               cl = strlen(r->name);
-               if (cl > max_name_width)
-                       max_name_width = cl;
-
                if (r->data_width > max_data_width)
                        max_data_width = r->data_width;
        }
@@ -827,19 +766,22 @@ static bool __init rdt_cpu_has(int flag)
 
 static __init bool get_mem_config(void)
 {
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
+
        if (!rdt_cpu_has(X86_FEATURE_MBA))
                return false;
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
+               return __get_mem_config_intel(&hw_res->r_resctrl);
        else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
-               return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
+               return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
 
        return false;
 }
 
 static __init bool get_rdt_alloc_resources(void)
 {
+       struct rdt_resource *r;
        bool ret = false;
 
        if (rdt_alloc_capable)
@@ -849,14 +791,16 @@ static __init bool get_rdt_alloc_resources(void)
                return false;
 
        if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
-               rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+               r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+               rdt_get_cache_alloc_cfg(1, r);
                if (rdt_cpu_has(X86_FEATURE_CDP_L3))
                        rdt_get_cdp_l3_config();
                ret = true;
        }
        if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
                /* CPUID 0x10.2 fields are same format at 0x10.1 */
-               rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+               r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
+               rdt_get_cache_alloc_cfg(2, r);
                if (rdt_cpu_has(X86_FEATURE_CDP_L2))
                        rdt_get_cdp_l2_config();
                ret = true;
@@ -870,6 +814,8 @@ static __init bool get_rdt_alloc_resources(void)
 
 static __init bool get_rdt_mon_resources(void)
 {
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+
        if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
                rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
        if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
@@ -880,7 +826,7 @@ static __init bool get_rdt_mon_resources(void)
        if (!rdt_mon_features)
                return false;
 
-       return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
+       return !rdt_get_mon_l3_config(r);
 }
 
 static __init void __check_quirks_intel(void)
@@ -918,42 +864,40 @@ static __init bool get_rdt_resources(void)
 
 static __init void rdt_init_res_defs_intel(void)
 {
+       struct rdt_hw_resource *hw_res;
        struct rdt_resource *r;
 
        for_each_rdt_resource(r) {
+               hw_res = resctrl_to_arch_res(r);
+
                if (r->rid == RDT_RESOURCE_L3 ||
-                   r->rid == RDT_RESOURCE_L3DATA ||
-                   r->rid == RDT_RESOURCE_L3CODE ||
-                   r->rid == RDT_RESOURCE_L2 ||
-                   r->rid == RDT_RESOURCE_L2DATA ||
-                   r->rid == RDT_RESOURCE_L2CODE) {
+                   r->rid == RDT_RESOURCE_L2) {
                        r->cache.arch_has_sparse_bitmaps = false;
                        r->cache.arch_has_empty_bitmaps = false;
                        r->cache.arch_has_per_cpu_cfg = false;
                } else if (r->rid == RDT_RESOURCE_MBA) {
-                       r->msr_base = MSR_IA32_MBA_THRTL_BASE;
-                       r->msr_update = mba_wrmsr_intel;
+                       hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
+                       hw_res->msr_update = mba_wrmsr_intel;
                }
        }
 }
 
 static __init void rdt_init_res_defs_amd(void)
 {
+       struct rdt_hw_resource *hw_res;
        struct rdt_resource *r;
 
        for_each_rdt_resource(r) {
+               hw_res = resctrl_to_arch_res(r);
+
                if (r->rid == RDT_RESOURCE_L3 ||
-                   r->rid == RDT_RESOURCE_L3DATA ||
-                   r->rid == RDT_RESOURCE_L3CODE ||
-                   r->rid == RDT_RESOURCE_L2 ||
-                   r->rid == RDT_RESOURCE_L2DATA ||
-                   r->rid == RDT_RESOURCE_L2CODE) {
+                   r->rid == RDT_RESOURCE_L2) {
                        r->cache.arch_has_sparse_bitmaps = true;
                        r->cache.arch_has_empty_bitmaps = true;
                        r->cache.arch_has_per_cpu_cfg = true;
                } else if (r->rid == RDT_RESOURCE_MBA) {
-                       r->msr_base = MSR_IA32_MBA_BW_BASE;
-                       r->msr_update = mba_wrmsr_amd;
+                       hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
+                       hw_res->msr_update = mba_wrmsr_amd;
                }
        }
 }
index c877642..8766627 100644 (file)
@@ -57,20 +57,23 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
        return true;
 }
 
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
             struct rdt_domain *d)
 {
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        unsigned long bw_val;
 
-       if (d->have_new_ctrl) {
+       cfg = &d->staged_config[s->conf_type];
+       if (cfg->have_new_ctrl) {
                rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
                return -EINVAL;
        }
 
        if (!bw_validate(data->buf, &bw_val, r))
                return -EINVAL;
-       d->new_ctrl = bw_val;
-       d->have_new_ctrl = true;
+       cfg->new_ctrl = bw_val;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -125,13 +128,16 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
  * Read one cache bit mask (hex). Check that it is valid for the current
  * resource type.
  */
-int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
              struct rdt_domain *d)
 {
        struct rdtgroup *rdtgrp = data->rdtgrp;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        u32 cbm_val;
 
-       if (d->have_new_ctrl) {
+       cfg = &d->staged_config[s->conf_type];
+       if (cfg->have_new_ctrl) {
                rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
                return -EINVAL;
        }
@@ -160,12 +166,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
         * The CBM may not overlap with the CBM of another closid if
         * either is exclusive.
         */
-       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
+       if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
                rdt_last_cmd_puts("Overlaps with exclusive group\n");
                return -EINVAL;
        }
 
-       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
+       if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
                if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
                    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
                        rdt_last_cmd_puts("Overlaps with other group\n");
@@ -173,8 +179,8 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
                }
        }
 
-       d->new_ctrl = cbm_val;
-       d->have_new_ctrl = true;
+       cfg->new_ctrl = cbm_val;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -185,9 +191,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
  * separated by ";". The "id" is in decimal, and must match one of
  * the "id"s for this resource.
  */
-static int parse_line(char *line, struct rdt_resource *r,
+static int parse_line(char *line, struct resctrl_schema *s,
                      struct rdtgroup *rdtgrp)
 {
+       enum resctrl_conf_type t = s->conf_type;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        struct rdt_parse_data data;
        char *dom = NULL, *id;
        struct rdt_domain *d;
@@ -213,9 +222,10 @@ next:
                if (d->id == dom_id) {
                        data.buf = dom;
                        data.rdtgrp = rdtgrp;
-                       if (r->parse_ctrlval(&data, r, d))
+                       if (r->parse_ctrlval(&data, s, d))
                                return -EINVAL;
                        if (rdtgrp->mode ==  RDT_MODE_PSEUDO_LOCKSETUP) {
+                               cfg = &d->staged_config[t];
                                /*
                                 * In pseudo-locking setup mode and just
                                 * parsed a valid CBM that should be
@@ -224,9 +234,9 @@ next:
                                 * the required initialization for single
                                 * region and return.
                                 */
-                               rdtgrp->plr->r = r;
+                               rdtgrp->plr->s = s;
                                rdtgrp->plr->d = d;
-                               rdtgrp->plr->cbm = d->new_ctrl;
+                               rdtgrp->plr->cbm = cfg->new_ctrl;
                                d->plr = rdtgrp->plr;
                                return 0;
                        }
@@ -236,28 +246,72 @@ next:
        return -EINVAL;
 }
 
-int update_domains(struct rdt_resource *r, int closid)
+static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
 {
+       switch (type) {
+       default:
+       case CDP_NONE:
+               return closid;
+       case CDP_CODE:
+               return closid * 2 + 1;
+       case CDP_DATA:
+               return closid * 2;
+       }
+}
+
+static bool apply_config(struct rdt_hw_domain *hw_dom,
+                        struct resctrl_staged_config *cfg, u32 idx,
+                        cpumask_var_t cpu_mask, bool mba_sc)
+{
+       struct rdt_domain *dom = &hw_dom->d_resctrl;
+       u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
+
+       if (cfg->new_ctrl != dc[idx]) {
+               cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
+               dc[idx] = cfg->new_ctrl;
+
+               return true;
+       }
+
+       return false;
+}
+
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
+{
+       struct resctrl_staged_config *cfg;
+       struct rdt_hw_domain *hw_dom;
        struct msr_param msr_param;
+       enum resctrl_conf_type t;
        cpumask_var_t cpu_mask;
        struct rdt_domain *d;
        bool mba_sc;
-       u32 *dc;
        int cpu;
+       u32 idx;
 
        if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       msr_param.low = closid;
-       msr_param.high = msr_param.low + 1;
-       msr_param.res = r;
-
        mba_sc = is_mba_sc(r);
+       msr_param.res = NULL;
        list_for_each_entry(d, &r->domains, list) {
-               dc = !mba_sc ? d->ctrl_val : d->mbps_val;
-               if (d->have_new_ctrl && d->new_ctrl != dc[closid]) {
-                       cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
-                       dc[closid] = d->new_ctrl;
+               hw_dom = resctrl_to_arch_dom(d);
+               for (t = 0; t < CDP_NUM_TYPES; t++) {
+                       cfg = &hw_dom->d_resctrl.staged_config[t];
+                       if (!cfg->have_new_ctrl)
+                               continue;
+
+                       idx = get_config_index(closid, t);
+                       if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
+                               continue;
+
+                       if (!msr_param.res) {
+                               msr_param.low = idx;
+                               msr_param.high = msr_param.low + 1;
+                               msr_param.res = r;
+                       } else {
+                               msr_param.low = min(msr_param.low, idx);
+                               msr_param.high = max(msr_param.high, idx + 1);
+                       }
                }
        }
 
@@ -284,11 +338,11 @@ done:
 static int rdtgroup_parse_resource(char *resname, char *tok,
                                   struct rdtgroup *rdtgrp)
 {
-       struct rdt_resource *r;
+       struct resctrl_schema *s;
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
-                       return parse_line(tok, r, rdtgrp);
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
+                       return parse_line(tok, s, rdtgrp);
        }
        rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
        return -EINVAL;
@@ -297,6 +351,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off)
 {
+       struct resctrl_schema *s;
        struct rdtgroup *rdtgrp;
        struct rdt_domain *dom;
        struct rdt_resource *r;
@@ -327,9 +382,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               list_for_each_entry(dom, &r->domains, list)
-                       dom->have_new_ctrl = false;
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               list_for_each_entry(dom, &s->res->domains, list)
+                       memset(dom->staged_config, 0, sizeof(dom->staged_config));
        }
 
        while ((tok = strsep(&buf, "\n")) != NULL) {
@@ -349,8 +404,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                        goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               ret = update_domains(r, rdtgrp->closid);
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
+               ret = resctrl_arch_update_domains(r, rdtgrp->closid);
                if (ret)
                        goto out;
        }
@@ -371,19 +427,31 @@ out:
        return ret ?: nbytes;
 }
 
-static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+                           u32 closid, enum resctrl_conf_type type)
+{
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       u32 idx = get_config_index(closid, type);
+
+       if (!is_mba_sc(r))
+               return hw_dom->ctrl_val[idx];
+       return hw_dom->mbps_val[idx];
+}
+
+static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
 {
+       struct rdt_resource *r = schema->res;
        struct rdt_domain *dom;
        bool sep = false;
        u32 ctrl_val;
 
-       seq_printf(s, "%*s:", max_name_width, r->name);
+       seq_printf(s, "%*s:", max_name_width, schema->name);
        list_for_each_entry(dom, &r->domains, list) {
                if (sep)
                        seq_puts(s, ";");
 
-               ctrl_val = (!is_mba_sc(r) ? dom->ctrl_val[closid] :
-                           dom->mbps_val[closid]);
+               ctrl_val = resctrl_arch_get_config(r, dom, closid,
+                                                  schema->conf_type);
                seq_printf(s, r->format_str, dom->id, max_data_width,
                           ctrl_val);
                sep = true;
@@ -394,16 +462,17 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v)
 {
+       struct resctrl_schema *schema;
        struct rdtgroup *rdtgrp;
-       struct rdt_resource *r;
        int ret = 0;
        u32 closid;
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
        if (rdtgrp) {
                if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
-                       for_each_alloc_enabled_rdt_resource(r)
-                               seq_printf(s, "%s:uninitialized\n", r->name);
+                       list_for_each_entry(schema, &resctrl_schema_all, list) {
+                               seq_printf(s, "%s:uninitialized\n", schema->name);
+                       }
                } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
                        if (!rdtgrp->plr->d) {
                                rdt_last_cmd_clear();
@@ -411,15 +480,15 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
                                ret = -ENODEV;
                        } else {
                                seq_printf(s, "%s:%d=%x\n",
-                                          rdtgrp->plr->r->name,
+                                          rdtgrp->plr->s->res->name,
                                           rdtgrp->plr->d->id,
                                           rdtgrp->plr->cbm);
                        }
                } else {
                        closid = rdtgrp->closid;
-                       for_each_alloc_enabled_rdt_resource(r) {
-                               if (closid < r->num_closid)
-                                       show_doms(s, r, closid);
+                       list_for_each_entry(schema, &resctrl_schema_all, list) {
+                               if (closid < schema->num_closid)
+                                       show_doms(s, schema, closid);
                        }
                }
        } else {
@@ -449,6 +518,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
 {
        struct kernfs_open_file *of = m->private;
+       struct rdt_hw_resource *hw_res;
        u32 resid, evtid, domid;
        struct rdtgroup *rdtgrp;
        struct rdt_resource *r;
@@ -468,7 +538,8 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
        domid = md.u.domid;
        evtid = md.u.evtid;
 
-       r = &rdt_resources_all[resid];
+       hw_res = &rdt_resources_all[resid];
+       r = &hw_res->r_resctrl;
        d = rdt_find_domain(r, domid, NULL);
        if (IS_ERR_OR_NULL(d)) {
                ret = -ENOENT;
@@ -482,7 +553,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
        else if (rr.val & RMID_VAL_UNAVAIL)
                seq_puts(m, "Unavailable\n");
        else
-               seq_printf(m, "%llu\n", rr.val * r->mon_scale);
+               seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
 
 out:
        rdtgroup_kn_unlock(of->kn);
index 6a5f60a..1d64718 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_X86_RESCTRL_INTERNAL_H
 #define _ASM_X86_RESCTRL_INTERNAL_H
 
+#include <linux/resctrl.h>
 #include <linux/sched.h>
 #include <linux/kernfs.h>
 #include <linux/fs_context.h>
@@ -109,6 +110,7 @@ extern unsigned int resctrl_cqm_threshold;
 extern bool rdt_alloc_capable;
 extern bool rdt_mon_capable;
 extern unsigned int rdt_mon_features;
+extern struct list_head resctrl_schema_all;
 
 enum rdt_group_type {
        RDTCTRL_GROUP = 0,
@@ -161,8 +163,8 @@ struct mongroup {
 
 /**
  * struct pseudo_lock_region - pseudo-lock region information
- * @r:                 RDT resource to which this pseudo-locked region
- *                     belongs
+ * @s:                 Resctrl schema for the resource to which this
+ *                     pseudo-locked region belongs
  * @d:                 RDT domain to which this pseudo-locked region
  *                     belongs
  * @cbm:               bitmask of the pseudo-locked region
@@ -182,7 +184,7 @@ struct mongroup {
  * @pm_reqs:           Power management QoS requests related to this region
  */
 struct pseudo_lock_region {
-       struct rdt_resource     *r;
+       struct resctrl_schema   *s;
        struct rdt_domain       *d;
        u32                     cbm;
        wait_queue_head_t       lock_thread_wq;
@@ -303,44 +305,25 @@ struct mbm_state {
 };
 
 /**
- * struct rdt_domain - group of cpus sharing an RDT resource
- * @list:      all instances of this resource
- * @id:                unique id for this instance
- * @cpu_mask:  which cpus share this resource
- * @rmid_busy_llc:
- *             bitmap of which limbo RMIDs are above threshold
- * @mbm_total: saved state for MBM total bandwidth
- * @mbm_local: saved state for MBM local bandwidth
- * @mbm_over:  worker to periodically read MBM h/w counters
- * @cqm_limbo: worker to periodically read CQM h/w counters
- * @mbm_work_cpu:
- *             worker cpu for MBM h/w counters
- * @cqm_work_cpu:
- *             worker cpu for CQM h/w counters
+ * struct rdt_hw_domain - Arch private attributes of a set of CPUs that share
+ *                       a resource
+ * @d_resctrl: Properties exposed to the resctrl file system
  * @ctrl_val:  array of cache or mem ctrl values (indexed by CLOSID)
  * @mbps_val:  When mba_sc is enabled, this holds the bandwidth in MBps
- * @new_ctrl:  new ctrl value to be loaded
- * @have_new_ctrl: did user provide new_ctrl for this domain
- * @plr:       pseudo-locked region (if any) associated with domain
+ *
+ * Members of this structure are accessed via helpers that provide abstraction.
  */
-struct rdt_domain {
-       struct list_head                list;
-       int                             id;
-       struct cpumask                  cpu_mask;
-       unsigned long                   *rmid_busy_llc;
-       struct mbm_state                *mbm_total;
-       struct mbm_state                *mbm_local;
-       struct delayed_work             mbm_over;
-       struct delayed_work             cqm_limbo;
-       int                             mbm_work_cpu;
-       int                             cqm_work_cpu;
+struct rdt_hw_domain {
+       struct rdt_domain               d_resctrl;
        u32                             *ctrl_val;
        u32                             *mbps_val;
-       u32                             new_ctrl;
-       bool                            have_new_ctrl;
-       struct pseudo_lock_region       *plr;
 };
 
+static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
+{
+       return container_of(r, struct rdt_hw_domain, d_resctrl);
+}
+
 /**
  * struct msr_param - set a range of MSRs from a domain
  * @res:       The resource to use
@@ -349,69 +332,8 @@ struct rdt_domain {
  */
 struct msr_param {
        struct rdt_resource     *res;
-       int                     low;
-       int                     high;
-};
-
-/**
- * struct rdt_cache - Cache allocation related data
- * @cbm_len:           Length of the cache bit mask
- * @min_cbm_bits:      Minimum number of consecutive bits to be set
- * @cbm_idx_mult:      Multiplier of CBM index
- * @cbm_idx_offset:    Offset of CBM index. CBM index is computed by:
- *                     closid * cbm_idx_multi + cbm_idx_offset
- *                     in a cache bit mask
- * @shareable_bits:    Bitmask of shareable resource with other
- *                     executing entities
- * @arch_has_sparse_bitmaps:   True if a bitmap like f00f is valid.
- * @arch_has_empty_bitmaps:    True if the '0' bitmap is valid.
- * @arch_has_per_cpu_cfg:      True if QOS_CFG register for this cache
- *                             level has CPU scope.
- */
-struct rdt_cache {
-       unsigned int    cbm_len;
-       unsigned int    min_cbm_bits;
-       unsigned int    cbm_idx_mult;
-       unsigned int    cbm_idx_offset;
-       unsigned int    shareable_bits;
-       bool            arch_has_sparse_bitmaps;
-       bool            arch_has_empty_bitmaps;
-       bool            arch_has_per_cpu_cfg;
-};
-
-/**
- * enum membw_throttle_mode - System's memory bandwidth throttling mode
- * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
- * @THREAD_THROTTLE_MAX:       Memory bandwidth is throttled at the core
- *                             always using smallest bandwidth percentage
- *                             assigned to threads, aka "max throttling"
- * @THREAD_THROTTLE_PER_THREAD:        Memory bandwidth is throttled at the thread
- */
-enum membw_throttle_mode {
-       THREAD_THROTTLE_UNDEFINED = 0,
-       THREAD_THROTTLE_MAX,
-       THREAD_THROTTLE_PER_THREAD,
-};
-
-/**
- * struct rdt_membw - Memory bandwidth allocation related data
- * @min_bw:            Minimum memory bandwidth percentage user can request
- * @bw_gran:           Granularity at which the memory bandwidth is allocated
- * @delay_linear:      True if memory B/W delay is in linear scale
- * @arch_needs_linear: True if we can't configure non-linear resources
- * @throttle_mode:     Bandwidth throttling mode when threads request
- *                     different memory bandwidths
- * @mba_sc:            True if MBA software controller(mba_sc) is enabled
- * @mb_map:            Mapping of memory B/W percentage to memory B/W delay
- */
-struct rdt_membw {
-       u32                             min_bw;
-       u32                             bw_gran;
-       u32                             delay_linear;
-       bool                            arch_needs_linear;
-       enum membw_throttle_mode        throttle_mode;
-       bool                            mba_sc;
-       u32                             *mb_map;
+       u32                     low;
+       u32                     high;
 };
 
 static inline bool is_llc_occupancy_enabled(void)
@@ -446,111 +368,103 @@ struct rdt_parse_data {
 };
 
 /**
- * struct rdt_resource - attributes of an RDT resource
- * @rid:               The index of the resource
- * @alloc_enabled:     Is allocation enabled on this machine
- * @mon_enabled:       Is monitoring enabled for this feature
- * @alloc_capable:     Is allocation available on this machine
- * @mon_capable:       Is monitor feature available on this machine
- * @name:              Name to use in "schemata" file
- * @num_closid:                Number of CLOSIDs available
- * @cache_level:       Which cache level defines scope of this resource
- * @default_ctrl:      Specifies default cache cbm or memory B/W percent.
+ * struct rdt_hw_resource - arch private attributes of a resctrl resource
+ * @r_resctrl:         Attributes of the resource used directly by resctrl.
+ * @num_closid:                Maximum number of closid this hardware can support,
+ *                     regardless of CDP. This is exposed via
+ *                     resctrl_arch_get_num_closid() to avoid confusion
+ *                     with struct resctrl_schema's property of the same name,
+ *                     which has been corrected for features like CDP.
  * @msr_base:          Base MSR address for CBMs
  * @msr_update:                Function pointer to update QOS MSRs
- * @data_width:                Character width of data when displaying
- * @domains:           All domains for this resource
- * @cache:             Cache allocation related data
- * @membw:             If the component has bandwidth controls, their properties.
- * @format_str:                Per resource format string to show domain value
- * @parse_ctrlval:     Per resource function pointer to parse control values
- * @evt_list:          List of monitoring events
- * @num_rmid:          Number of RMIDs available
  * @mon_scale:         cqm counter * mon_scale = occupancy in bytes
  * @mbm_width:         Monitor width, to detect and correct for overflow.
- * @fflags:            flags to choose base and info files
+ * @cdp_enabled:       CDP state of this resource
+ *
+ * Members of this structure are either private to the architecture
+ * e.g. mbm_width, or accessed via helpers that provide abstraction. e.g.
+ * msr_update and msr_base.
  */
-struct rdt_resource {
-       int                     rid;
-       bool                    alloc_enabled;
-       bool                    mon_enabled;
-       bool                    alloc_capable;
-       bool                    mon_capable;
-       char                    *name;
-       int                     num_closid;
-       int                     cache_level;
-       u32                     default_ctrl;
+struct rdt_hw_resource {
+       struct rdt_resource     r_resctrl;
+       u32                     num_closid;
        unsigned int            msr_base;
        void (*msr_update)      (struct rdt_domain *d, struct msr_param *m,
                                 struct rdt_resource *r);
-       int                     data_width;
-       struct list_head        domains;
-       struct rdt_cache        cache;
-       struct rdt_membw        membw;
-       const char              *format_str;
-       int (*parse_ctrlval)(struct rdt_parse_data *data,
-                            struct rdt_resource *r,
-                            struct rdt_domain *d);
-       struct list_head        evt_list;
-       int                     num_rmid;
        unsigned int            mon_scale;
        unsigned int            mbm_width;
-       unsigned long           fflags;
+       bool                    cdp_enabled;
 };
 
-int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r)
+{
+       return container_of(r, struct rdt_hw_resource, r_resctrl);
+}
+
+int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
              struct rdt_domain *d);
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
             struct rdt_domain *d);
 
 extern struct mutex rdtgroup_mutex;
 
-extern struct rdt_resource rdt_resources_all[];
+extern struct rdt_hw_resource rdt_resources_all[];
 extern struct rdtgroup rdtgroup_default;
 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
 
 extern struct dentry *debugfs_resctrl;
 
-enum {
+enum resctrl_res_level {
        RDT_RESOURCE_L3,
-       RDT_RESOURCE_L3DATA,
-       RDT_RESOURCE_L3CODE,
        RDT_RESOURCE_L2,
-       RDT_RESOURCE_L2DATA,
-       RDT_RESOURCE_L2CODE,
        RDT_RESOURCE_MBA,
 
        /* Must be the last */
        RDT_NUM_RESOURCES,
 };
 
+static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res)
+{
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res);
+
+       hw_res++;
+       return &hw_res->r_resctrl;
+}
+
+static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
+{
+       return rdt_resources_all[l].cdp_enabled;
+}
+
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+
+/*
+ * To return the common struct rdt_resource, which is contained in struct
+ * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource.
+ */
 #define for_each_rdt_resource(r)                                             \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)
+       for (r = &rdt_resources_all[0].r_resctrl;                             \
+            r <= &rdt_resources_all[RDT_NUM_RESOURCES - 1].r_resctrl;        \
+            r = resctrl_inc(r))
 
 #define for_each_capable_rdt_resource(r)                                     \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_capable || r->mon_capable)
 
 #define for_each_alloc_capable_rdt_resource(r)                               \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_capable)
 
 #define for_each_mon_capable_rdt_resource(r)                                 \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->mon_capable)
 
 #define for_each_alloc_enabled_rdt_resource(r)                               \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_enabled)
 
 #define for_each_mon_enabled_rdt_resource(r)                                 \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->mon_enabled)
 
 /* CPUID.(EAX=10H, ECX=ResID=1).EAX */
@@ -594,7 +508,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off);
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v);
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
                           unsigned long cbm, int closid, bool exclusive);
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
                                  unsigned long cbm);
@@ -609,7 +523,6 @@ void rdt_pseudo_lock_release(void);
 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
-int update_domains(struct rdt_resource *r, int closid);
 int closids_supported(void);
 void closid_free(int closid);
 int alloc_rmid(void);
index f07c10b..c9f0f3d 100644 (file)
@@ -174,7 +174,7 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
        struct rdt_resource *r;
        u32 crmid = 1, nrmid;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 
        /*
         * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
@@ -232,7 +232,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
        int cpu;
        u64 val;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 
        entry->busy = 0;
        cpu = get_cpu();
@@ -285,15 +285,15 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
        return chunks >>= shift;
 }
 
-static int __mon_event_count(u32 rmid, struct rmid_read *rr)
+static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
        struct mbm_state *m;
        u64 chunks, tval;
 
        tval = __rmid_read(rmid, rr->evtid);
        if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
-               rr->val = tval;
-               return -EINVAL;
+               return tval;
        }
        switch (rr->evtid) {
        case QOS_L3_OCCUP_EVENT_ID:
@@ -307,10 +307,10 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
                break;
        default:
                /*
-                * Code would never reach here because
-                * an invalid event id would fail the __rmid_read.
+                * Code would never reach here because an invalid
+                * event id would fail the __rmid_read.
                 */
-               return -EINVAL;
+               return RMID_VAL_ERROR;
        }
 
        if (rr->first) {
@@ -319,7 +319,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
                return 0;
        }
 
-       chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width);
+       chunks = mbm_overflow_count(m->prev_msr, tval, hw_res->mbm_width);
        m->chunks += chunks;
        m->prev_msr = tval;
 
@@ -334,7 +334,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
  */
 static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
 {
-       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
        struct mbm_state *m = &rr->d->mbm_local[rmid];
        u64 tval, cur_bw, chunks;
 
@@ -342,8 +342,8 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
        if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
                return;
 
-       chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
-       cur_bw = (get_corrected_mbm_count(rmid, chunks) * r->mon_scale) >> 20;
+       chunks = mbm_overflow_count(m->prev_bw_msr, tval, hw_res->mbm_width);
+       cur_bw = (get_corrected_mbm_count(rmid, chunks) * hw_res->mon_scale) >> 20;
 
        if (m->delta_comp)
                m->delta_bw = abs(cur_bw - m->prev_bw);
@@ -361,23 +361,29 @@ void mon_event_count(void *info)
        struct rdtgroup *rdtgrp, *entry;
        struct rmid_read *rr = info;
        struct list_head *head;
+       u64 ret_val;
 
        rdtgrp = rr->rgrp;
 
-       if (__mon_event_count(rdtgrp->mon.rmid, rr))
-               return;
+       ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
 
        /*
-        * For Ctrl groups read data from child monitor groups.
+        * For Ctrl groups read data from child monitor groups and
+        * add them together. Count events which are read successfully.
+        * Discard the rmid_read's reporting errors.
         */
        head = &rdtgrp->mon.crdtgrp_list;
 
        if (rdtgrp->type == RDTCTRL_GROUP) {
                list_for_each_entry(entry, head, mon.crdtgrp_list) {
-                       if (__mon_event_count(entry->mon.rmid, rr))
-                               return;
+                       if (__mon_event_count(entry->mon.rmid, rr) == 0)
+                               ret_val = 0;
                }
        }
+
+       /* Report error if none of rmid_reads are successful */
+       if (ret_val)
+               rr->val = ret_val;
 }
 
 /*
@@ -416,6 +422,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 {
        u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
        struct mbm_state *pmbm_data, *cmbm_data;
+       struct rdt_hw_resource *hw_r_mba;
+       struct rdt_hw_domain *hw_dom_mba;
        u32 cur_bw, delta_bw, user_bw;
        struct rdt_resource *r_mba;
        struct rdt_domain *dom_mba;
@@ -425,7 +433,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
        if (!is_mbm_local_enabled())
                return;
 
-       r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+       hw_r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+       r_mba = &hw_r_mba->r_resctrl;
        closid = rgrp->closid;
        rmid = rgrp->mon.rmid;
        pmbm_data = &dom_mbm->mbm_local[rmid];
@@ -435,11 +444,16 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
                pr_warn_once("Failure to get domain for MBA update\n");
                return;
        }
+       hw_dom_mba = resctrl_to_arch_dom(dom_mba);
 
        cur_bw = pmbm_data->prev_bw;
-       user_bw = dom_mba->mbps_val[closid];
+       user_bw = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
        delta_bw = pmbm_data->delta_bw;
-       cur_msr_val = dom_mba->ctrl_val[closid];
+       /*
+        * resctrl_arch_get_config() chooses the mbps/ctrl value to return
+        * based on is_mba_sc(). For now, reach into the hw_dom.
+        */
+       cur_msr_val = hw_dom_mba->ctrl_val[closid];
 
        /*
         * For Ctrl groups read data from child monitor groups.
@@ -474,9 +488,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
                return;
        }
 
-       cur_msr = r_mba->msr_base + closid;
+       cur_msr = hw_r_mba->msr_base + closid;
        wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
-       dom_mba->ctrl_val[closid] = new_msr_val;
+       hw_dom_mba->ctrl_val[closid] = new_msr_val;
 
        /*
         * Delta values are updated dynamically package wise for each
@@ -538,7 +552,7 @@ void cqm_handle_limbo(struct work_struct *work)
 
        mutex_lock(&rdtgroup_mutex);
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
        d = container_of(work, struct rdt_domain, cqm_limbo.work);
 
        __check_limbo(d, false);
@@ -574,7 +588,7 @@ void mbm_handle_overflow(struct work_struct *work)
        if (!static_branch_likely(&rdt_mon_enable_key))
                goto out_unlock;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
        d = container_of(work, struct rdt_domain, mbm_over.work);
 
        list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
@@ -671,15 +685,16 @@ static void l3_mon_evt_init(struct rdt_resource *r)
 int rdt_get_mon_l3_config(struct rdt_resource *r)
 {
        unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        unsigned int cl_size = boot_cpu_data.x86_cache_size;
        int ret;
 
-       r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
+       hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
        r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
-       r->mbm_width = MBM_CNTR_WIDTH_BASE;
+       hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
 
        if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
-               r->mbm_width += mbm_offset;
+               hw_res->mbm_width += mbm_offset;
        else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
                pr_warn("Ignoring impossible MBM counter offset\n");
 
@@ -693,7 +708,7 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
        resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
 
        /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
-       resctrl_cqm_threshold /= r->mon_scale;
+       resctrl_cqm_threshold /= hw_res->mon_scale;
 
        ret = dom_data_init(r);
        if (ret)
index 2207916..db813f8 100644 (file)
@@ -250,7 +250,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
        plr->line_size = 0;
        kfree(plr->kmem);
        plr->kmem = NULL;
-       plr->r = NULL;
+       plr->s = NULL;
        if (plr->d)
                plr->d->plr = NULL;
        plr->d = NULL;
@@ -294,10 +294,10 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
 
        ci = get_cpu_cacheinfo(plr->cpu);
 
-       plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
+       plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
 
        for (i = 0; i < ci->num_leaves; i++) {
-               if (ci->info_list[i].level == plr->r->cache_level) {
+               if (ci->info_list[i].level == plr->s->res->cache_level) {
                        plr->line_size = ci->info_list[i].coherency_line_size;
                        return 0;
                }
@@ -688,8 +688,8 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
         *   resource, the portion of cache used by it should be made
         *   unavailable to all future allocations from both resources.
         */
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
-           rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
+           resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
                rdt_last_cmd_puts("CDP enabled\n");
                return -EINVAL;
        }
@@ -800,7 +800,7 @@ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm
        unsigned long cbm_b;
 
        if (d->plr) {
-               cbm_len = d->plr->r->cache.cbm_len;
+               cbm_len = d->plr->s->res->cache.cbm_len;
                cbm_b = d->plr->cbm;
                if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
                        return true;
index 01fd30e..b57b3db 100644 (file)
@@ -39,6 +39,9 @@ static struct kernfs_root *rdt_root;
 struct rdtgroup rdtgroup_default;
 LIST_HEAD(rdt_all_groups);
 
+/* list of entries for the schemata file */
+LIST_HEAD(resctrl_schema_all);
+
 /* Kernel fs node for "info" directory under root */
 static struct kernfs_node *kn_info;
 
@@ -100,12 +103,12 @@ int closids_supported(void)
 
 static void closid_init(void)
 {
-       struct rdt_resource *r;
-       int rdt_min_closid = 32;
+       struct resctrl_schema *s;
+       u32 rdt_min_closid = 32;
 
        /* Compute rdt_min_closid across all resources */
-       for_each_alloc_enabled_rdt_resource(r)
-               rdt_min_closid = min(rdt_min_closid, r->num_closid);
+       list_for_each_entry(s, &resctrl_schema_all, list)
+               rdt_min_closid = min(rdt_min_closid, s->num_closid);
 
        closid_free_map = BIT_MASK(rdt_min_closid) - 1;
 
@@ -842,16 +845,17 @@ static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
 static int rdt_num_closids_show(struct kernfs_open_file *of,
                                struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
 
-       seq_printf(seq, "%d\n", r->num_closid);
+       seq_printf(seq, "%u\n", s->num_closid);
        return 0;
 }
 
 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%x\n", r->default_ctrl);
        return 0;
@@ -860,7 +864,8 @@ static int rdt_default_ctrl_show(struct kernfs_open_file *of,
 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
        return 0;
@@ -869,7 +874,8 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
                                   struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%x\n", r->cache.shareable_bits);
        return 0;
@@ -892,38 +898,40 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
 static int rdt_bit_usage_show(struct kernfs_open_file *of,
                              struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
        /*
         * Use unsigned long even though only 32 bits are used to ensure
         * test_bit() is used safely.
         */
        unsigned long sw_shareable = 0, hw_shareable = 0;
        unsigned long exclusive = 0, pseudo_locked = 0;
+       struct rdt_resource *r = s->res;
        struct rdt_domain *dom;
        int i, hwb, swb, excl, psl;
        enum rdtgrp_mode mode;
        bool sep = false;
-       u32 *ctrl;
+       u32 ctrl_val;
 
        mutex_lock(&rdtgroup_mutex);
        hw_shareable = r->cache.shareable_bits;
        list_for_each_entry(dom, &r->domains, list) {
                if (sep)
                        seq_putc(seq, ';');
-               ctrl = dom->ctrl_val;
                sw_shareable = 0;
                exclusive = 0;
                seq_printf(seq, "%d=", dom->id);
-               for (i = 0; i < closids_supported(); i++, ctrl++) {
+               for (i = 0; i < closids_supported(); i++) {
                        if (!closid_allocated(i))
                                continue;
+                       ctrl_val = resctrl_arch_get_config(r, dom, i,
+                                                          s->conf_type);
                        mode = rdtgroup_mode_by_closid(i);
                        switch (mode) {
                        case RDT_MODE_SHAREABLE:
-                               sw_shareable |= *ctrl;
+                               sw_shareable |= ctrl_val;
                                break;
                        case RDT_MODE_EXCLUSIVE:
-                               exclusive |= *ctrl;
+                               exclusive |= ctrl_val;
                                break;
                        case RDT_MODE_PSEUDO_LOCKSETUP:
                        /*
@@ -970,7 +978,8 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
 static int rdt_min_bw_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.min_bw);
        return 0;
@@ -1001,7 +1010,8 @@ static int rdt_mon_features_show(struct kernfs_open_file *of,
 static int rdt_bw_gran_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.bw_gran);
        return 0;
@@ -1010,7 +1020,8 @@ static int rdt_bw_gran_show(struct kernfs_open_file *of,
 static int rdt_delay_linear_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.delay_linear);
        return 0;
@@ -1020,8 +1031,9 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
                                  struct seq_file *seq, void *v)
 {
        struct rdt_resource *r = of->kn->parent->priv;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
-       seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
+       seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale);
 
        return 0;
 }
@@ -1029,7 +1041,8 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
                                         struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
                seq_puts(seq, "per-thread\n");
@@ -1042,7 +1055,7 @@ static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
                                       char *buf, size_t nbytes, loff_t off)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct rdt_hw_resource *hw_res;
        unsigned int bytes;
        int ret;
 
@@ -1053,7 +1066,8 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
        if (bytes > (boot_cpu_data.x86_cache_size * 1024))
                return -EINVAL;
 
-       resctrl_cqm_threshold = bytes / r->mon_scale;
+       hw_res = resctrl_to_arch_res(of->kn->parent->priv);
+       resctrl_cqm_threshold = bytes / hw_res->mon_scale;
 
        return nbytes;
 }
@@ -1078,76 +1092,17 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
        return 0;
 }
 
-/**
- * rdt_cdp_peer_get - Retrieve CDP peer if it exists
- * @r: RDT resource to which RDT domain @d belongs
- * @d: Cache instance for which a CDP peer is requested
- * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
- *         Used to return the result.
- * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
- *         Used to return the result.
- *
- * RDT resources are managed independently and by extension the RDT domains
- * (RDT resource instances) are managed independently also. The Code and
- * Data Prioritization (CDP) RDT resources, while managed independently,
- * could refer to the same underlying hardware. For example,
- * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
- *
- * When provided with an RDT resource @r and an instance of that RDT
- * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
- * resource and the exact instance that shares the same hardware.
- *
- * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
- *         If a CDP peer was found, @r_cdp will point to the peer RDT resource
- *         and @d_cdp will point to the peer RDT domain.
- */
-static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
-                           struct rdt_resource **r_cdp,
-                           struct rdt_domain **d_cdp)
+static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
 {
-       struct rdt_resource *_r_cdp = NULL;
-       struct rdt_domain *_d_cdp = NULL;
-       int ret = 0;
-
-       switch (r->rid) {
-       case RDT_RESOURCE_L3DATA:
-               _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
-               break;
-       case RDT_RESOURCE_L3CODE:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L3DATA];
-               break;
-       case RDT_RESOURCE_L2DATA:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2CODE];
-               break;
-       case RDT_RESOURCE_L2CODE:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2DATA];
-               break;
+       switch (my_type) {
+       case CDP_CODE:
+               return CDP_DATA;
+       case CDP_DATA:
+               return CDP_CODE;
        default:
-               ret = -ENOENT;
-               goto out;
-       }
-
-       /*
-        * When a new CPU comes online and CDP is enabled then the new
-        * RDT domains (if any) associated with both CDP RDT resources
-        * are added in the same CPU online routine while the
-        * rdtgroup_mutex is held. It should thus not happen for one
-        * RDT domain to exist and be associated with its RDT CDP
-        * resource but there is no RDT domain associated with the
-        * peer RDT CDP resource. Hence the WARN.
-        */
-       _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
-       if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
-               _r_cdp = NULL;
-               _d_cdp = NULL;
-               ret = -EINVAL;
+       case CDP_NONE:
+               return CDP_NONE;
        }
-
-out:
-       *r_cdp = _r_cdp;
-       *d_cdp = _d_cdp;
-
-       return ret;
 }
 
 /**
@@ -1171,11 +1126,11 @@ out:
  * Return: false if CBM does not overlap, true if it does.
  */
 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                                   unsigned long cbm, int closid, bool exclusive)
+                                   unsigned long cbm, int closid,
+                                   enum resctrl_conf_type type, bool exclusive)
 {
        enum rdtgrp_mode mode;
        unsigned long ctrl_b;
-       u32 *ctrl;
        int i;
 
        /* Check for any overlap with regions used by hardware directly */
@@ -1186,9 +1141,8 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
        }
 
        /* Check for overlap with other resource groups */
-       ctrl = d->ctrl_val;
-       for (i = 0; i < closids_supported(); i++, ctrl++) {
-               ctrl_b = *ctrl;
+       for (i = 0; i < closids_supported(); i++) {
+               ctrl_b = resctrl_arch_get_config(r, d, i, type);
                mode = rdtgroup_mode_by_closid(i);
                if (closid_allocated(i) && i != closid &&
                    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
@@ -1208,7 +1162,7 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
 
 /**
  * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
- * @r: Resource to which domain instance @d belongs.
+ * @s: Schema for the resource to which domain instance @d belongs.
  * @d: The domain instance for which @closid is being tested.
  * @cbm: Capacity bitmask being tested.
  * @closid: Intended closid for @cbm.
@@ -1226,19 +1180,19 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
  *
  * Return: true if CBM overlap detected, false if there is no overlap
  */
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
                           unsigned long cbm, int closid, bool exclusive)
 {
-       struct rdt_resource *r_cdp;
-       struct rdt_domain *d_cdp;
+       enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
+       struct rdt_resource *r = s->res;
 
-       if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
+       if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
+                                   exclusive))
                return true;
 
-       if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
+       if (!resctrl_arch_get_cdp_enabled(r->rid))
                return false;
-
-       return  __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
+       return  __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
 }
 
 /**
@@ -1256,17 +1210,21 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
 {
        int closid = rdtgrp->closid;
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        bool has_cache = false;
        struct rdt_domain *d;
+       u32 ctrl;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                if (r->rid == RDT_RESOURCE_MBA)
                        continue;
                has_cache = true;
                list_for_each_entry(d, &r->domains, list) {
-                       if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
-                                                 rdtgrp->closid, false)) {
+                       ctrl = resctrl_arch_get_config(r, d, closid,
+                                                      s->conf_type);
+                       if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
                                rdt_last_cmd_puts("Schemata overlaps\n");
                                return false;
                        }
@@ -1397,6 +1355,7 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
 static int rdtgroup_size_show(struct kernfs_open_file *of,
                              struct seq_file *s, void *v)
 {
+       struct resctrl_schema *schema;
        struct rdtgroup *rdtgrp;
        struct rdt_resource *r;
        struct rdt_domain *d;
@@ -1418,8 +1377,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
                        ret = -ENODEV;
                } else {
                        seq_printf(s, "%*s:", max_name_width,
-                                  rdtgrp->plr->r->name);
-                       size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+                                  rdtgrp->plr->s->name);
+                       size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
                                                    rdtgrp->plr->d,
                                                    rdtgrp->plr->cbm);
                        seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
@@ -1427,18 +1386,19 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
                goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(schema, &resctrl_schema_all, list) {
+               r = schema->res;
                sep = false;
-               seq_printf(s, "%*s:", max_name_width, r->name);
+               seq_printf(s, "%*s:", max_name_width, schema->name);
                list_for_each_entry(d, &r->domains, list) {
                        if (sep)
                                seq_putc(s, ';');
                        if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
                                size = 0;
                        } else {
-                               ctrl = (!is_mba_sc(r) ?
-                                               d->ctrl_val[rdtgrp->closid] :
-                                               d->mbps_val[rdtgrp->closid]);
+                               ctrl = resctrl_arch_get_config(r, d,
+                                                              rdtgrp->closid,
+                                                              schema->conf_type);
                                if (r->rid == RDT_RESOURCE_MBA)
                                        size = ctrl;
                                else
@@ -1757,14 +1717,14 @@ int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
        return ret;
 }
 
-static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
+static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
                                      unsigned long fflags)
 {
        struct kernfs_node *kn_subdir;
        int ret;
 
        kn_subdir = kernfs_create_dir(kn_info, name,
-                                     kn_info->mode, r);
+                                     kn_info->mode, priv);
        if (IS_ERR(kn_subdir))
                return PTR_ERR(kn_subdir);
 
@@ -1781,6 +1741,7 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
 
 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
 {
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        unsigned long fflags;
        char name[32];
@@ -1795,9 +1756,11 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
        if (ret)
                goto out_destroy;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       /* loop over enabled controls, these are all alloc_enabled */
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                fflags =  r->fflags | RF_CTRL_INFO;
-               ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
+               ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
                if (ret)
                        goto out_destroy;
        }
@@ -1867,7 +1830,7 @@ static void l2_qos_cfg_update(void *arg)
 
 static inline bool is_mba_linear(void)
 {
-       return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
+       return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
 }
 
 static int set_cache_qos_cfg(int level, bool enable)
@@ -1888,7 +1851,7 @@ static int set_cache_qos_cfg(int level, bool enable)
        if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       r_l = &rdt_resources_all[level];
+       r_l = &rdt_resources_all[level].r_resctrl;
        list_for_each_entry(d, &r_l->domains, list) {
                if (r_l->cache.arch_has_per_cpu_cfg)
                        /* Pick all the CPUs in the domain instance */
@@ -1914,14 +1877,16 @@ static int set_cache_qos_cfg(int level, bool enable)
 /* Restore the qos cfg state when a domain comes online */
 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
 {
-       if (!r->alloc_capable)
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+
+       if (!r->cdp_capable)
                return;
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
-               l2_qos_cfg_update(&r->alloc_enabled);
+       if (r->rid == RDT_RESOURCE_L2)
+               l2_qos_cfg_update(&hw_res->cdp_enabled);
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
-               l3_qos_cfg_update(&r->alloc_enabled);
+       if (r->rid == RDT_RESOURCE_L3)
+               l3_qos_cfg_update(&hw_res->cdp_enabled);
 }
 
 /*
@@ -1932,7 +1897,8 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
  */
 static int set_mba_sc(bool mba_sc)
 {
-       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        if (!is_mbm_enabled() || !is_mba_linear() ||
@@ -1940,73 +1906,60 @@ static int set_mba_sc(bool mba_sc)
                return -EINVAL;
 
        r->membw.mba_sc = mba_sc;
-       list_for_each_entry(d, &r->domains, list)
-               setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
+       list_for_each_entry(d, &r->domains, list) {
+               hw_dom = resctrl_to_arch_dom(d);
+               setup_default_ctrlval(r, hw_dom->ctrl_val, hw_dom->mbps_val);
+       }
 
        return 0;
 }
 
-static int cdp_enable(int level, int data_type, int code_type)
+static int cdp_enable(int level)
 {
-       struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
-       struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
-       struct rdt_resource *r_l = &rdt_resources_all[level];
+       struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
        int ret;
 
-       if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
-           !r_lcode->alloc_capable)
+       if (!r_l->alloc_capable)
                return -EINVAL;
 
        ret = set_cache_qos_cfg(level, true);
-       if (!ret) {
-               r_l->alloc_enabled = false;
-               r_ldata->alloc_enabled = true;
-               r_lcode->alloc_enabled = true;
-       }
+       if (!ret)
+               rdt_resources_all[level].cdp_enabled = true;
+
        return ret;
 }
 
-static int cdpl3_enable(void)
+static void cdp_disable(int level)
 {
-       return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
-                         RDT_RESOURCE_L3CODE);
-}
+       struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
 
-static int cdpl2_enable(void)
-{
-       return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
-                         RDT_RESOURCE_L2CODE);
+       if (r_hw->cdp_enabled) {
+               set_cache_qos_cfg(level, false);
+               r_hw->cdp_enabled = false;
+       }
 }
 
-static void cdp_disable(int level, int data_type, int code_type)
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
 {
-       struct rdt_resource *r = &rdt_resources_all[level];
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
 
-       r->alloc_enabled = r->alloc_capable;
+       if (!hw_res->r_resctrl.cdp_capable)
+               return -EINVAL;
 
-       if (rdt_resources_all[data_type].alloc_enabled) {
-               rdt_resources_all[data_type].alloc_enabled = false;
-               rdt_resources_all[code_type].alloc_enabled = false;
-               set_cache_qos_cfg(level, false);
-       }
-}
+       if (enable)
+               return cdp_enable(l);
 
-static void cdpl3_disable(void)
-{
-       cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
-}
+       cdp_disable(l);
 
-static void cdpl2_disable(void)
-{
-       cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
+       return 0;
 }
 
 static void cdp_disable_all(void)
 {
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
-               cdpl3_disable();
-       if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
-               cdpl2_disable();
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
+               resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
+               resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
 }
 
 /*
@@ -2084,10 +2037,10 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
        int ret = 0;
 
        if (ctx->enable_cdpl2)
-               ret = cdpl2_enable();
+               ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
 
        if (!ret && ctx->enable_cdpl3)
-               ret = cdpl3_enable();
+               ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
 
        if (!ret && ctx->enable_mba_mbps)
                ret = set_mba_sc(true);
@@ -2095,6 +2048,92 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
        return ret;
 }
 
+static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
+{
+       struct resctrl_schema *s;
+       const char *suffix = "";
+       int ret, cl;
+
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       s->res = r;
+       s->num_closid = resctrl_arch_get_num_closid(r);
+       if (resctrl_arch_get_cdp_enabled(r->rid))
+               s->num_closid /= 2;
+
+       s->conf_type = type;
+       switch (type) {
+       case CDP_CODE:
+               suffix = "CODE";
+               break;
+       case CDP_DATA:
+               suffix = "DATA";
+               break;
+       case CDP_NONE:
+               suffix = "";
+               break;
+       }
+
+       ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
+       if (ret >= sizeof(s->name)) {
+               kfree(s);
+               return -EINVAL;
+       }
+
+       cl = strlen(s->name);
+
+       /*
+        * If CDP is supported by this resource, but not enabled,
+        * include the suffix. This ensures the tabular format of the
+        * schemata file does not change between mounts of the filesystem.
+        */
+       if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
+               cl += 4;
+
+       if (cl > max_name_width)
+               max_name_width = cl;
+
+       INIT_LIST_HEAD(&s->list);
+       list_add(&s->list, &resctrl_schema_all);
+
+       return 0;
+}
+
+static int schemata_list_create(void)
+{
+       struct rdt_resource *r;
+       int ret = 0;
+
+       for_each_alloc_enabled_rdt_resource(r) {
+               if (resctrl_arch_get_cdp_enabled(r->rid)) {
+                       ret = schemata_list_add(r, CDP_CODE);
+                       if (ret)
+                               break;
+
+                       ret = schemata_list_add(r, CDP_DATA);
+               } else {
+                       ret = schemata_list_add(r, CDP_NONE);
+               }
+
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static void schemata_list_destroy(void)
+{
+       struct resctrl_schema *s, *tmp;
+
+       list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
+               list_del(&s->list);
+               kfree(s);
+       }
+}
+
 static int rdt_get_tree(struct fs_context *fc)
 {
        struct rdt_fs_context *ctx = rdt_fc2context(fc);
@@ -2116,11 +2155,17 @@ static int rdt_get_tree(struct fs_context *fc)
        if (ret < 0)
                goto out_cdp;
 
+       ret = schemata_list_create();
+       if (ret) {
+               schemata_list_destroy();
+               goto out_mba;
+       }
+
        closid_init();
 
        ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
        if (ret < 0)
-               goto out_mba;
+               goto out_schemata_free;
 
        if (rdt_mon_capable) {
                ret = mongroup_create_dir(rdtgroup_default.kn,
@@ -2153,7 +2198,7 @@ static int rdt_get_tree(struct fs_context *fc)
                static_branch_enable_cpuslocked(&rdt_enable_key);
 
        if (is_mbm_enabled()) {
-               r = &rdt_resources_all[RDT_RESOURCE_L3];
+               r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
                list_for_each_entry(dom, &r->domains, list)
                        mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
        }
@@ -2170,6 +2215,8 @@ out_mongrp:
                kernfs_remove(kn_mongrp);
 out_info:
        kernfs_remove(kn_info);
+out_schemata_free:
+       schemata_list_destroy();
 out_mba:
        if (ctx->enable_mba_mbps)
                set_mba_sc(false);
@@ -2257,6 +2304,8 @@ static int rdt_init_fs_context(struct fs_context *fc)
 
 static int reset_all_ctrls(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       struct rdt_hw_domain *hw_dom;
        struct msr_param msr_param;
        cpumask_var_t cpu_mask;
        struct rdt_domain *d;
@@ -2267,7 +2316,7 @@ static int reset_all_ctrls(struct rdt_resource *r)
 
        msr_param.res = r;
        msr_param.low = 0;
-       msr_param.high = r->num_closid;
+       msr_param.high = hw_res->num_closid;
 
        /*
         * Disable resource control for this resource by setting all
@@ -2275,10 +2324,11 @@ static int reset_all_ctrls(struct rdt_resource *r)
         * from each domain to update the MSRs below.
         */
        list_for_each_entry(d, &r->domains, list) {
+               hw_dom = resctrl_to_arch_dom(d);
                cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
 
-               for (i = 0; i < r->num_closid; i++)
-                       d->ctrl_val[i] = r->default_ctrl;
+               for (i = 0; i < hw_res->num_closid; i++)
+                       hw_dom->ctrl_val[i] = r->default_ctrl;
        }
        cpu = get_cpu();
        /* Update CBM on this cpu if it's in cpu_mask. */
@@ -2408,6 +2458,7 @@ static void rdt_kill_sb(struct super_block *sb)
        rmdir_all_sub();
        rdt_pseudo_lock_release();
        rdtgroup_default.mode = RDT_MODE_SHAREABLE;
+       schemata_list_destroy();
        static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
        static_branch_disable_cpuslocked(&rdt_mon_enable_key);
        static_branch_disable_cpuslocked(&rdt_enable_key);
@@ -2642,23 +2693,24 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
  * Set the RDT domain up to start off with all usable allocations. That is,
  * all shareable and unused bits. All-zero CBM is invalid.
  */
-static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
                                 u32 closid)
 {
-       struct rdt_resource *r_cdp = NULL;
-       struct rdt_domain *d_cdp = NULL;
+       enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
+       enum resctrl_conf_type t = s->conf_type;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        u32 used_b = 0, unused_b = 0;
        unsigned long tmp_cbm;
        enum rdtgrp_mode mode;
-       u32 peer_ctl, *ctrl;
+       u32 peer_ctl, ctrl_val;
        int i;
 
-       rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
-       d->have_new_ctrl = false;
-       d->new_ctrl = r->cache.shareable_bits;
+       cfg = &d->staged_config[t];
+       cfg->have_new_ctrl = false;
+       cfg->new_ctrl = r->cache.shareable_bits;
        used_b = r->cache.shareable_bits;
-       ctrl = d->ctrl_val;
-       for (i = 0; i < closids_supported(); i++, ctrl++) {
+       for (i = 0; i < closids_supported(); i++) {
                if (closid_allocated(i) && i != closid) {
                        mode = rdtgroup_mode_by_closid(i);
                        if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
@@ -2673,35 +2725,38 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
                         * usage to ensure there is no overlap
                         * with an exclusive group.
                         */
-                       if (d_cdp)
-                               peer_ctl = d_cdp->ctrl_val[i];
+                       if (resctrl_arch_get_cdp_enabled(r->rid))
+                               peer_ctl = resctrl_arch_get_config(r, d, i,
+                                                                  peer_type);
                        else
                                peer_ctl = 0;
-                       used_b |= *ctrl | peer_ctl;
+                       ctrl_val = resctrl_arch_get_config(r, d, i,
+                                                          s->conf_type);
+                       used_b |= ctrl_val | peer_ctl;
                        if (mode == RDT_MODE_SHAREABLE)
-                               d->new_ctrl |= *ctrl | peer_ctl;
+                               cfg->new_ctrl |= ctrl_val | peer_ctl;
                }
        }
        if (d->plr && d->plr->cbm > 0)
                used_b |= d->plr->cbm;
        unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
        unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
-       d->new_ctrl |= unused_b;
+       cfg->new_ctrl |= unused_b;
        /*
         * Force the initial CBM to be valid, user can
         * modify the CBM based on system availability.
         */
-       d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r);
+       cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
        /*
         * Assign the u32 CBM to an unsigned long to ensure that
         * bitmap_weight() does not access out-of-bound memory.
         */
-       tmp_cbm = d->new_ctrl;
+       tmp_cbm = cfg->new_ctrl;
        if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
-               rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+               rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
                return -ENOSPC;
        }
-       d->have_new_ctrl = true;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -2716,13 +2771,13 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
  * If there are no more shareable bits available on any domain then
  * the entire allocation will fail.
  */
-static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
 {
        struct rdt_domain *d;
        int ret;
 
-       list_for_each_entry(d, &r->domains, list) {
-               ret = __init_one_rdt_domain(d, r, closid);
+       list_for_each_entry(d, &s->res->domains, list) {
+               ret = __init_one_rdt_domain(d, s, closid);
                if (ret < 0)
                        return ret;
        }
@@ -2733,30 +2788,34 @@ static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
 /* Initialize MBA resource with default values. */
 static void rdtgroup_init_mba(struct rdt_resource *r)
 {
+       struct resctrl_staged_config *cfg;
        struct rdt_domain *d;
 
        list_for_each_entry(d, &r->domains, list) {
-               d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
-               d->have_new_ctrl = true;
+               cfg = &d->staged_config[CDP_NONE];
+               cfg->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+               cfg->have_new_ctrl = true;
        }
 }
 
 /* Initialize the RDT group's allocations. */
 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 {
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        int ret;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                if (r->rid == RDT_RESOURCE_MBA) {
                        rdtgroup_init_mba(r);
                } else {
-                       ret = rdtgroup_init_cat(r, rdtgrp->closid);
+                       ret = rdtgroup_init_cat(s, rdtgrp->closid);
                        if (ret < 0)
                                return ret;
                }
 
-               ret = update_domains(r, rdtgrp->closid);
+               ret = resctrl_arch_update_domains(r, rdtgrp->closid);
                if (ret < 0) {
                        rdt_last_cmd_puts("Failed to initialize allocations\n");
                        return ret;
@@ -3124,13 +3183,13 @@ out:
 
 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
 {
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
                seq_puts(seq, ",cdp");
 
-       if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
                seq_puts(seq, ",cdpl2");
 
-       if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
+       if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
                seq_puts(seq, ",mba_MBps");
 
        return 0;
index 08651a4..42fc41d 100644 (file)
@@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
        .irq_set_affinity = msi_domain_set_affinity,
        .irq_retrigger = irq_chip_retrigger_hierarchy,
        .irq_write_msi_msg = hpet_msi_write_msg,
-       .flags = IRQCHIP_SKIP_SET_WAKE,
+       .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static int hpet_msi_init(struct irq_domain *domain,
index 282b4ee..15aefa3 100644 (file)
@@ -235,15 +235,15 @@ static char irq_trigger[2];
  */
 static void restore_ELCR(char *trigger)
 {
-       outb(trigger[0], 0x4d0);
-       outb(trigger[1], 0x4d1);
+       outb(trigger[0], PIC_ELCR1);
+       outb(trigger[1], PIC_ELCR2);
 }
 
 static void save_ELCR(char *trigger)
 {
        /* IRQ 0,1,2,8,13 are marked as reserved */
-       trigger[0] = inb(0x4d0) & 0xF8;
-       trigger[1] = inb(0x4d1) & 0xDE;
+       trigger[0] = inb(PIC_ELCR1) & 0xF8;
+       trigger[1] = inb(PIC_ELCR2) & 0xDE;
 }
 
 static void i8259A_resume(void)
index 8f06449..fed721f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/smp.h>
 #include <linux/pci.h>
 
+#include <asm/i8259.h>
 #include <asm/io_apic.h>
 #include <asm/acpi.h>
 #include <asm/irqdomain.h>
@@ -251,7 +252,7 @@ static int __init ELCR_trigger(unsigned int irq)
 {
        unsigned int port;
 
-       port = 0x4d0 + (irq >> 3);
+       port = PIC_ELCR1 + (irq >> 3);
        return (inb(port) >> (irq & 7)) & 1;
 }
 
index ebfb911..0a40df6 100644 (file)
@@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
        },
        {       /* Handle problems with rebooting on the OptiPlex 990. */
                .callback = set_pci_reboot,
-               .ident = "Dell OptiPlex 990",
+               .ident = "Dell OptiPlex 990 BIOS A0x",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "A0"),
                },
        },
        {       /* Handle problems with rebooting on Dell 300's */
index 9320285..85f6e24 100644 (file)
@@ -610,6 +610,9 @@ void set_cpu_sibling_map(int cpu)
        if (threads > __max_smt_threads)
                __max_smt_threads = threads;
 
+       for_each_cpu(i, topology_sibling_cpumask(cpu))
+               cpu_data(i).smt_active = threads > 1;
+
        /*
         * This needs a separate iteration over the cpus because we rely on all
         * topology_sibling_cpumask links to be set-up.
@@ -1552,8 +1555,13 @@ static void remove_siblinginfo(int cpu)
 
        for_each_cpu(sibling, topology_die_cpumask(cpu))
                cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
-       for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+
+       for_each_cpu(sibling, topology_sibling_cpumask(cpu)) {
                cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+               if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1)
+                       cpu_data(sibling).smt_active = false;
+       }
+
        for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
                cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
        cpumask_clear(cpu_llc_shared_mask(cpu));
index 739be5d..fe03bd9 100644 (file)
@@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        kvm_mmu_after_set_cpuid(vcpu);
 }
 
-static int is_efer_nx(void)
-{
-       return host_efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct kvm_cpuid_entry2 *e, *entry;
-
-       entry = NULL;
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               e = &vcpu->arch.cpuid_entries[i];
-               if (e->function == 0x80000001) {
-                       entry = e;
-                       break;
-               }
-       }
-       if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
-               cpuid_entry_clear(entry, X86_FEATURE_NX);
-               printk(KERN_INFO "kvm: guest NX capability removed\n");
-       }
-}
-
 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
@@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
        vcpu->arch.cpuid_entries = e2;
        vcpu->arch.cpuid_nent = cpuid->nent;
 
-       cpuid_fix_nx_cap(vcpu);
        kvm_update_cpuid_runtime(vcpu);
        kvm_vcpu_after_set_cpuid(vcpu);
 
@@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
 
 void kvm_set_cpu_caps(void)
 {
-       unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
 #ifdef CONFIG_X86_64
        unsigned int f_gbpages = F(GBPAGES);
        unsigned int f_lm = F(LM);
@@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
                F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
                F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
                F(PAT) | F(PSE36) | 0 /* Reserved */ |
-               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
                F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
                0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
        );
index b07592c..41d2a53 100644 (file)
@@ -1933,7 +1933,7 @@ ret_success:
 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *entry;
-       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+       struct kvm_vcpu_hv *hv_vcpu;
 
        entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
        if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
@@ -2016,6 +2016,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
 
 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
 {
+       trace_kvm_hv_hypercall_done(result);
        kvm_hv_hypercall_set_result(vcpu, result);
        ++vcpu->stat.hypercalls;
        return kvm_skip_emulated_instruction(vcpu);
@@ -2139,6 +2140,7 @@ static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
 
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 {
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct kvm_hv_hcall hc;
        u64 ret = HV_STATUS_SUCCESS;
 
@@ -2173,17 +2175,25 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
        hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
        hc.rep = !!(hc.rep_cnt || hc.rep_idx);
 
-       if (hc.fast && is_xmm_fast_hypercall(&hc))
-               kvm_hv_hypercall_read_xmm(&hc);
-
        trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
                               hc.ingpa, hc.outgpa);
 
-       if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) {
+       if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
                ret = HV_STATUS_ACCESS_DENIED;
                goto hypercall_complete;
        }
 
+       if (hc.fast && is_xmm_fast_hypercall(&hc)) {
+               if (unlikely(hv_vcpu->enforce_cpuid &&
+                            !(hv_vcpu->cpuid_cache.features_edx &
+                              HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
+                       kvm_queue_exception(vcpu, UD_VECTOR);
+                       return 1;
+               }
+
+               kvm_hv_hypercall_read_xmm(&hc);
+       }
+
        switch (hc.code) {
        case HVCALL_NOTIFY_LONG_SPIN_WAIT:
                if (unlikely(hc.rep)) {
index 629a09c..0b80263 100644 (file)
@@ -541,17 +541,17 @@ static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
                            addr, len, val);
 }
 
-static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+static int picdev_elcr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
                             gpa_t addr, int len, const void *val)
 {
-       return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
+       return picdev_write(container_of(dev, struct kvm_pic, dev_elcr),
                            addr, len, val);
 }
 
-static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+static int picdev_elcr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
                            gpa_t addr, int len, void *val)
 {
-       return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
+       return picdev_read(container_of(dev, struct kvm_pic, dev_elcr),
                            addr, len, val);
 }
 
@@ -577,9 +577,9 @@ static const struct kvm_io_device_ops picdev_slave_ops = {
        .write    = picdev_slave_write,
 };
 
-static const struct kvm_io_device_ops picdev_eclr_ops = {
-       .read     = picdev_eclr_read,
-       .write    = picdev_eclr_write,
+static const struct kvm_io_device_ops picdev_elcr_ops = {
+       .read     = picdev_elcr_read,
+       .write    = picdev_elcr_write,
 };
 
 int kvm_pic_init(struct kvm *kvm)
@@ -602,7 +602,7 @@ int kvm_pic_init(struct kvm *kvm)
         */
        kvm_iodevice_init(&s->dev_master, &picdev_master_ops);
        kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops);
-       kvm_iodevice_init(&s->dev_eclr, &picdev_eclr_ops);
+       kvm_iodevice_init(&s->dev_elcr, &picdev_elcr_ops);
        mutex_lock(&kvm->slots_lock);
        ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
                                      &s->dev_master);
@@ -613,7 +613,7 @@ int kvm_pic_init(struct kvm *kvm)
        if (ret < 0)
                goto fail_unreg_2;
 
-       ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr);
+       ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_elcr);
        if (ret < 0)
                goto fail_unreg_1;
 
@@ -647,7 +647,7 @@ void kvm_pic_destroy(struct kvm *kvm)
        mutex_lock(&kvm->slots_lock);
        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
-       kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
+       kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_elcr);
        mutex_unlock(&kvm->slots_lock);
 
        kvm->arch.vpic = NULL;
index 9b64abf..650642b 100644 (file)
@@ -55,7 +55,7 @@ struct kvm_pic {
        int output;             /* intr from master PIC */
        struct kvm_io_device dev_master;
        struct kvm_io_device dev_slave;
-       struct kvm_io_device dev_eclr;
+       struct kvm_io_device dev_elcr;
        void (*ack_notifier)(void *opaque, int irq);
        unsigned long irq_states[PIC_NUM_PINS];
 };
index 66f7f5b..47b7652 100644 (file)
@@ -1644,7 +1644,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2535,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
 {
        struct kvm_mmu_page *sp;
+       bool locked = false;
 
        /*
         * Force write-protection if the page is being tracked.  Note, the page
@@ -2557,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
                if (sp->unsync)
                        continue;
 
+               /*
+                * TDP MMU page faults require an additional spinlock as they
+                * run with mmu_lock held for read, not write, and the unsync
+                * logic is not thread safe.  Take the spinklock regardless of
+                * the MMU type to avoid extra conditionals/parameters, there's
+                * no meaningful penalty if mmu_lock is held for write.
+                */
+               if (!locked) {
+                       locked = true;
+                       spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
+
+                       /*
+                        * Recheck after taking the spinlock, a different vCPU
+                        * may have since marked the page unsync.  A false
+                        * positive on the unprotected check above is not
+                        * possible as clearing sp->unsync _must_ hold mmu_lock
+                        * for write, i.e. unsync cannot transition from 0->1
+                        * while this CPU holds mmu_lock for read (or write).
+                        */
+                       if (READ_ONCE(sp->unsync))
+                               continue;
+               }
+
                WARN_ON(sp->role.level != PG_LEVEL_4K);
                kvm_unsync_page(vcpu, sp);
        }
+       if (locked)
+               spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
 
        /*
         * We need to ensure that the marking of unsync pages is visible
@@ -5537,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
 
+       spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+
        if (!kvm_mmu_init_tdp_mmu(kvm))
                /*
                 * No smp_load/store wrappers needed here as we are in
index 0853370..d80cb12 100644 (file)
@@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        if (!kvm->arch.tdp_mmu_enabled)
                return;
 
+       WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
 
        /*
@@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
                          bool shared)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
-
        kvm_lockdep_assert_mmu_lock_held(kvm, shared);
 
        if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
@@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
        list_del_rcu(&root->link);
        spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
+       zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
 
        call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
 }
@@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                          gfn_t start, gfn_t end, bool can_yield, bool flush,
                          bool shared)
 {
+       gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+       bool zap_all = (start == 0 && end >= max_gfn_host);
        struct tdp_iter iter;
 
+       /*
+        * No need to try to step down in the iterator when zapping all SPTEs,
+        * zapping the top-level non-leaf SPTEs will recurse on their children.
+        */
+       int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
+
+       /*
+        * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
+        * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
+        * and so KVM will never install a SPTE for such addresses.
+        */
+       end = min(end, max_gfn_host);
+
        kvm_lockdep_assert_mmu_lock_held(kvm, shared);
 
        rcu_read_lock();
 
-       tdp_root_for_each_pte(iter, root, start, end) {
+       for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+                                  min_level, start, end) {
 retry:
                if (can_yield &&
                    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
@@ -744,9 +759,10 @@ retry:
                /*
                 * If this is a non-last-level SPTE that covers a larger range
                 * than should be zapped, continue, and zap the mappings at a
-                * lower level.
+                * lower level, except when zapping all SPTEs.
                 */
-               if ((iter.gfn < start ||
+               if (!zap_all &&
+                   (iter.gfn < start ||
                     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
@@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
 
 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
        bool flush = false;
        int i;
 
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-               flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
+               flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
                                                  flush, false);
 
        if (flush)
@@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
  */
 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
        struct kvm_mmu_page *next_root;
        struct kvm_mmu_page *root;
        bool flush = false;
@@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
 
                rcu_read_unlock();
 
-               flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
-                                     true);
+               flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
 
                /*
                 * Put the reference acquired in
index 61738ff..e551547 100644 (file)
@@ -158,6 +158,9 @@ void recalc_intercepts(struct vcpu_svm *svm)
        /* If SMI is not intercepted, ignore guest SMI intercept as well  */
        if (!intercept_smi)
                vmcb_clr_intercept(c, INTERCEPT_SMI);
+
+       vmcb_set_intercept(c, INTERCEPT_VMLOAD);
+       vmcb_set_intercept(c, INTERCEPT_VMSAVE);
 }
 
 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -503,7 +506,11 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
 
 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
 {
-       const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+       const u32 int_ctl_vmcb01_bits =
+               V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
+
+       const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
+
        struct kvm_vcpu *vcpu = &svm->vcpu;
 
        /*
@@ -535,8 +542,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
                vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
 
        svm->vmcb->control.int_ctl             =
-               (svm->nested.ctl.int_ctl & ~mask) |
-               (svm->vmcb01.ptr->control.int_ctl & mask);
+               (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
+               (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
 
        svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
        svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
index 6710d9e..7fbce34 100644 (file)
@@ -64,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock);
 unsigned int max_sev_asid;
 static unsigned int min_sev_asid;
 static unsigned long sev_me_mask;
+static unsigned int nr_asids;
 static unsigned long *sev_asid_bitmap;
 static unsigned long *sev_reclaim_asid_bitmap;
 
@@ -78,11 +79,11 @@ struct enc_region {
 /* Called with the sev_bitmap_lock held, or on shutdown  */
 static int sev_flush_asids(int min_asid, int max_asid)
 {
-       int ret, pos, error = 0;
+       int ret, asid, error = 0;
 
        /* Check if there are any ASIDs to reclaim before performing a flush */
-       pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid);
-       if (pos >= max_asid)
+       asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
+       if (asid > max_asid)
                return -EBUSY;
 
        /*
@@ -115,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
 
        /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
        bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
-                  max_sev_asid);
-       bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
+                  nr_asids);
+       bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
 
        return true;
 }
 
 static int sev_asid_new(struct kvm_sev_info *sev)
 {
-       int pos, min_asid, max_asid, ret;
+       int asid, min_asid, max_asid, ret;
        bool retry = true;
        enum misc_res_type type;
 
@@ -143,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
         * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
         * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
         */
-       min_asid = sev->es_active ? 0 : min_sev_asid - 1;
+       min_asid = sev->es_active ? 1 : min_sev_asid;
        max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
 again:
-       pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
-       if (pos >= max_asid) {
+       asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+       if (asid > max_asid) {
                if (retry && __sev_recycle_asids(min_asid, max_asid)) {
                        retry = false;
                        goto again;
@@ -157,11 +158,11 @@ again:
                goto e_uncharge;
        }
 
-       __set_bit(pos, sev_asid_bitmap);
+       __set_bit(asid, sev_asid_bitmap);
 
        mutex_unlock(&sev_bitmap_lock);
 
-       return pos + 1;
+       return asid;
 e_uncharge:
        misc_cg_uncharge(type, sev->misc_cg, 1);
        put_misc_cg(sev->misc_cg);
@@ -179,17 +180,16 @@ static int sev_get_asid(struct kvm *kvm)
 static void sev_asid_free(struct kvm_sev_info *sev)
 {
        struct svm_cpu_data *sd;
-       int cpu, pos;
+       int cpu;
        enum misc_res_type type;
 
        mutex_lock(&sev_bitmap_lock);
 
-       pos = sev->asid - 1;
-       __set_bit(pos, sev_reclaim_asid_bitmap);
+       __set_bit(sev->asid, sev_reclaim_asid_bitmap);
 
        for_each_possible_cpu(cpu) {
                sd = per_cpu(svm_data, cpu);
-               sd->sev_vmcbs[pos] = NULL;
+               sd->sev_vmcbs[sev->asid] = NULL;
        }
 
        mutex_unlock(&sev_bitmap_lock);
@@ -1857,12 +1857,17 @@ void __init sev_hardware_setup(void)
        min_sev_asid = edx;
        sev_me_mask = 1UL << (ebx & 0x3f);
 
-       /* Initialize SEV ASID bitmaps */
-       sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       /*
+        * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
+        * even though it's never used, so that the bitmap is indexed by the
+        * actual ASID.
+        */
+       nr_asids = max_sev_asid + 1;
+       sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
        if (!sev_asid_bitmap)
                goto out;
 
-       sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
        if (!sev_reclaim_asid_bitmap) {
                bitmap_free(sev_asid_bitmap);
                sev_asid_bitmap = NULL;
@@ -1907,7 +1912,7 @@ void sev_hardware_teardown(void)
                return;
 
        /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
-       sev_flush_asids(0, max_sev_asid);
+       sev_flush_asids(1, max_sev_asid);
 
        bitmap_free(sev_asid_bitmap);
        bitmap_free(sev_reclaim_asid_bitmap);
@@ -1921,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
        if (!sev_enabled)
                return 0;
 
-       sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL);
+       sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
        if (!sd->sev_vmcbs)
                return -ENOMEM;
 
index e8ccab5..69639f9 100644 (file)
@@ -1589,17 +1589,18 @@ static void svm_set_vintr(struct vcpu_svm *svm)
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-       const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
        svm_clr_intercept(svm, INTERCEPT_VINTR);
 
        /* Drop int_ctl fields related to VINTR injection.  */
-       svm->vmcb->control.int_ctl &= mask;
+       svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
        if (is_guest_mode(&svm->vcpu)) {
-               svm->vmcb01.ptr->control.int_ctl &= mask;
+               svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
 
                WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
                        (svm->nested.ctl.int_ctl & V_TPR_MASK));
-               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
+
+               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
+                       V_IRQ_INJECTION_BITS_MASK;
        }
 
        vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
index b484141..03ebe36 100644 (file)
@@ -92,6 +92,21 @@ TRACE_EVENT(kvm_hv_hypercall,
                  __entry->outgpa)
 );
 
+TRACE_EVENT(kvm_hv_hypercall_done,
+       TP_PROTO(u64 result),
+       TP_ARGS(result),
+
+       TP_STRUCT__entry(
+               __field(__u64, result)
+       ),
+
+       TP_fast_assign(
+               __entry->result = result;
+       ),
+
+       TP_printk("result 0x%llx", __entry->result)
+);
+
 /*
  * Tracepoint for Xen hypercall.
  */
index 1a52134..b3f77d1 100644 (file)
@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
        vcpu_put(vcpu);
 }
 
+#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
+
+static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
+{
+       return VALID_PAGE(root_hpa) &&
+              ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
+}
+
+static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
+                                      gpa_t addr)
+{
+       uint i;
+       struct kvm_mmu_root_info *cached_root;
+
+       WARN_ON_ONCE(!mmu_is_nested(vcpu));
+
+       for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+               cached_root = &vcpu->arch.mmu->prev_roots[i];
+
+               if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
+                                           eptp))
+                       vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
+       }
+}
+
 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                struct x86_exception *fault)
 {
@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                vm_exit_reason = EXIT_REASON_PML_FULL;
                vmx->nested.pml_full = false;
                exit_qualification &= INTR_INFO_UNBLOCK_NMI;
-       } else if (fault->error_code & PFERR_RSVD_MASK)
-               vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
-       else
-               vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+       } else {
+               if (fault->error_code & PFERR_RSVD_MASK)
+                       vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+               else
+                       vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+
+               /*
+                * Although the caller (kvm_inject_emulated_page_fault) would
+                * have already synced the faulting address in the shadow EPT
+                * tables for the current EPTP12, we also need to sync it for
+                * any other cached EPTP02s based on the same EP4TA, since the
+                * TLB associates mappings to the EP4TA rather than the full EPTP.
+                */
+               nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
+                                          fault->address);
+       }
 
        nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
        vmcs12->guest_physical_address = fault->address;
@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        return nested_vmx_succeed(vcpu);
 }
 
-#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
-
-static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
-{
-       return VALID_PAGE(root_hpa) &&
-               ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
-}
-
 /* Emulate the INVEPT instruction */
 static int handle_invept(struct kvm_vcpu *vcpu)
 {
@@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
                if (is_nmi(intr_info))
                        return true;
                else if (is_page_fault(intr_info))
-                       return vcpu->arch.apf.host_apf_flags || !enable_ept;
+                       return vcpu->arch.apf.host_apf_flags ||
+                              vmx_need_pf_intercept(vcpu);
                else if (is_debug(intr_info) &&
                         vcpu->guest_debug &
                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
index db88ed4..17a1cb4 100644 (file)
@@ -522,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
 
 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
 {
-       return vmx->secondary_exec_control &
+       return secondary_exec_controls_get(vmx) &
                SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
 }
 
index 4116567..e5d5c5e 100644 (file)
@@ -4358,8 +4358,17 @@ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
 
 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
 {
-       return kvm_arch_interrupt_allowed(vcpu) &&
-               kvm_cpu_accept_dm_intr(vcpu);
+       /*
+        * Do not cause an interrupt window exit if an exception
+        * is pending or an event needs reinjection; userspace
+        * might want to inject the interrupt manually using KVM_SET_REGS
+        * or KVM_SET_SREGS.  For that to work, we must be at an
+        * instruction boundary and with no events half-injected.
+        */
+       return (kvm_arch_interrupt_allowed(vcpu) &&
+               kvm_cpu_accept_dm_intr(vcpu) &&
+               !kvm_event_needs_reinjection(vcpu) &&
+               !vcpu->arch.exception.pending);
 }
 
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
index bad4dee..c6506c6 100644 (file)
@@ -44,6 +44,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
 lib-y := delay.o misc.o cmdline.o cpu.o
 lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
+lib-y += pc-conf-reg.o
 lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
diff --git a/arch/x86/lib/pc-conf-reg.c b/arch/x86/lib/pc-conf-reg.c
new file mode 100644 (file)
index 0000000..febb527
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for the configuration register space at port I/O locations
+ * 0x22 and 0x23 variously used by PC architectures, e.g. the MP Spec,
+ * Cyrix CPUs, numerous chipsets.  As the space is indirectly addressed
+ * it may have to be protected with a spinlock, depending on the context.
+ */
+
+#include <linux/spinlock.h>
+
+#include <asm/pc-conf-reg.h>
+
+DEFINE_RAW_SPINLOCK(pc_conf_lock);
index cd768da..933a2eb 100644 (file)
@@ -376,12 +376,12 @@ static void enter_uniprocessor(void)
                goto out;
        }
 
-       get_online_cpus();
+       cpus_read_lock();
        cpumask_copy(downed_cpus, cpu_online_mask);
        cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
        if (num_online_cpus() > 1)
                pr_notice("Disabling non-boot CPUs...\n");
-       put_online_cpus();
+       cpus_read_unlock();
 
        for_each_cpu(cpu, downed_cpus) {
                err = remove_cpu(cpu);
index cfe6b1e..59ba296 100644 (file)
@@ -8,11 +8,13 @@
 #include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
+#include <linux/sched/smt.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/nospec-branch.h>
 #include <asm/cache.h>
+#include <asm/cacheflush.h>
 #include <asm/apic.h>
 #include <asm/perf_event.h>
 
  */
 
 /*
- * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
- * stored in cpu_tlb_state.last_user_mm_ibpb.
+ * Bits to mangle the TIF_SPEC_* state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_spec.
  */
 #define LAST_USER_MM_IBPB      0x1UL
+#define LAST_USER_MM_L1D_FLUSH 0x2UL
+#define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB | LAST_USER_MM_L1D_FLUSH)
+
+/* Bits to set when tlbstate and flush is (re)initialized */
+#define LAST_USER_MM_INIT      LAST_USER_MM_IBPB
 
 /*
  * The x86 feature is called PCID (Process Context IDentifier). It is similar
@@ -317,20 +324,70 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        local_irq_restore(flags);
 }
 
-static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+/*
+ * Invoked from return to user/guest by a task that opted-in to L1D
+ * flushing but ended up running on an SMT enabled core due to wrong
+ * affinity settings or CPU hotplug. This is part of the paranoid L1D flush
+ * contract which this task requested.
+ */
+static void l1d_flush_force_sigbus(struct callback_head *ch)
+{
+       force_sig(SIGBUS);
+}
+
+static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
+                               struct task_struct *next)
+{
+       /* Flush L1D if the outgoing task requests it */
+       if (prev_mm & LAST_USER_MM_L1D_FLUSH)
+               wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+
+       /* Check whether the incoming task opted in for L1D flush */
+       if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH)))
+               return;
+
+       /*
+        * Validate that it is not running on an SMT sibling as this would
+        * make the excercise pointless because the siblings share L1D. If
+        * it runs on a SMT sibling, notify it with SIGBUS on return to
+        * user/guest
+        */
+       if (this_cpu_read(cpu_info.smt_active)) {
+               clear_ti_thread_flag(&next->thread_info, TIF_SPEC_L1D_FLUSH);
+               next->l1d_flush_kill.func = l1d_flush_force_sigbus;
+               task_work_add(next, &next->l1d_flush_kill, TWA_RESUME);
+       }
+}
+
+static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
 {
        unsigned long next_tif = task_thread_info(next)->flags;
-       unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+       unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
 
-       return (unsigned long)next->mm | ibpb;
+       /*
+        * Ensure that the bit shift above works as expected and the two flags
+        * end up in bit 0 and 1.
+        */
+       BUILD_BUG_ON(TIF_SPEC_L1D_FLUSH != TIF_SPEC_IB + 1);
+
+       return (unsigned long)next->mm | spec_bits;
 }
 
-static void cond_ibpb(struct task_struct *next)
+static void cond_mitigation(struct task_struct *next)
 {
+       unsigned long prev_mm, next_mm;
+
        if (!next || !next->mm)
                return;
 
+       next_mm = mm_mangle_tif_spec_bits(next);
+       prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
+
        /*
+        * Avoid user/user BTB poisoning by flushing the branch predictor
+        * when switching between processes. This stops one process from
+        * doing Spectre-v2 attacks on another.
+        *
         * Both, the conditional and the always IBPB mode use the mm
         * pointer to avoid the IBPB when switching between tasks of the
         * same process. Using the mm pointer instead of mm->context.ctx_id
@@ -340,8 +397,6 @@ static void cond_ibpb(struct task_struct *next)
         * exposed data is not really interesting.
         */
        if (static_branch_likely(&switch_mm_cond_ibpb)) {
-               unsigned long prev_mm, next_mm;
-
                /*
                 * This is a bit more complex than the always mode because
                 * it has to handle two cases:
@@ -371,20 +426,14 @@ static void cond_ibpb(struct task_struct *next)
                 * Optimize this with reasonably small overhead for the
                 * above cases. Mangle the TIF_SPEC_IB bit into the mm
                 * pointer of the incoming task which is stored in
-                * cpu_tlbstate.last_user_mm_ibpb for comparison.
-                */
-               next_mm = mm_mangle_tif_spec_ib(next);
-               prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
-
-               /*
+                * cpu_tlbstate.last_user_mm_spec for comparison.
+                *
                 * Issue IBPB only if the mm's are different and one or
                 * both have the IBPB bit set.
                 */
                if (next_mm != prev_mm &&
                    (next_mm | prev_mm) & LAST_USER_MM_IBPB)
                        indirect_branch_prediction_barrier();
-
-               this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
        }
 
        if (static_branch_unlikely(&switch_mm_always_ibpb)) {
@@ -393,11 +442,22 @@ static void cond_ibpb(struct task_struct *next)
                 * different context than the user space task which ran
                 * last on this CPU.
                 */
-               if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+               if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
+                                       (unsigned long)next->mm)
                        indirect_branch_prediction_barrier();
-                       this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
-               }
        }
+
+       if (static_branch_unlikely(&switch_mm_cond_l1d_flush)) {
+               /*
+                * Flush L1D when the outgoing task requested it and/or
+                * check whether the incoming task requested L1D flushing
+                * and ended up on an SMT sibling.
+                */
+               if (unlikely((prev_mm | next_mm) & LAST_USER_MM_L1D_FLUSH))
+                       l1d_flush_evaluate(prev_mm, next_mm, next);
+       }
+
+       this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm);
 }
 
 #ifdef CONFIG_PERF_EVENTS
@@ -531,11 +591,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                need_flush = true;
        } else {
                /*
-                * Avoid user/user BTB poisoning by flushing the branch
-                * predictor when switching between processes. This stops
-                * one process from doing Spectre-v2 attacks on another.
+                * Apply process to process speculation vulnerability
+                * mitigations if applicable.
                 */
-               cond_ibpb(tsk);
+               cond_mitigation(tsk);
 
                /*
                 * Stop remote flushes for the previous mm.
@@ -643,7 +702,7 @@ void initialize_tlbstate_and_flush(void)
        write_cr3(build_cr3(mm->pgd, 0));
 
        /* Reinitialize tlbstate. */
-       this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
+       this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_INIT);
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
        this_cpu_write(cpu_tlbstate.next_asid, 1);
        this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
index d3a73f9..97b63e3 100644 (file)
 #include <linux/dmi.h>
 #include <linux/io.h>
 #include <linux/smp.h>
+#include <linux/spinlock.h>
 #include <asm/io_apic.h>
 #include <linux/irq.h>
 #include <linux/acpi.h>
+
+#include <asm/i8259.h>
+#include <asm/pc-conf-reg.h>
 #include <asm/pci_x86.h>
 
 #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
@@ -47,6 +51,8 @@ struct irq_router {
        int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
        int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq,
                int new);
+       int (*lvl)(struct pci_dev *router, struct pci_dev *dev, int pirq,
+               int irq);
 };
 
 struct irq_router_handler {
@@ -153,7 +159,7 @@ static void __init pirq_peer_trick(void)
 void elcr_set_level_irq(unsigned int irq)
 {
        unsigned char mask = 1 << (irq & 7);
-       unsigned int port = 0x4d0 + (irq >> 3);
+       unsigned int port = PIC_ELCR1 + (irq >> 3);
        unsigned char val;
        static u16 elcr_irq_mask;
 
@@ -169,6 +175,139 @@ void elcr_set_level_irq(unsigned int irq)
        }
 }
 
+/*
+ *     PIRQ routing for the M1487 ISA Bus Controller (IBC) ASIC used
+ *     with the ALi FinALi 486 chipset.  The IBC is not decoded in the
+ *     PCI configuration space, so we identify it by the accompanying
+ *     M1489 Cache-Memory PCI Controller (CMP) ASIC.
+ *
+ *     There are four 4-bit mappings provided, spread across two PCI
+ *     INTx Routing Table Mapping Registers, available in the port I/O
+ *     space accessible indirectly via the index/data register pair at
+ *     0x22/0x23, located at indices 0x42 and 0x43 for the INT1/INT2
+ *     and INT3/INT4 lines respectively.  The INT1/INT3 and INT2/INT4
+ *     lines are mapped in the low and the high 4-bit nibble of the
+ *     corresponding register as follows:
+ *
+ *     0000 : Disabled
+ *     0001 : IRQ9
+ *     0010 : IRQ3
+ *     0011 : IRQ10
+ *     0100 : IRQ4
+ *     0101 : IRQ5
+ *     0110 : IRQ7
+ *     0111 : IRQ6
+ *     1000 : Reserved
+ *     1001 : IRQ11
+ *     1010 : Reserved
+ *     1011 : IRQ12
+ *     1100 : Reserved
+ *     1101 : IRQ14
+ *     1110 : Reserved
+ *     1111 : IRQ15
+ *
+ *     In addition to the usual ELCR register pair there is a separate
+ *     PCI INTx Sensitivity Register at index 0x44 in the same port I/O
+ *     space, whose bits 3:0 select the trigger mode for INT[4:1] lines
+ *     respectively.  Any bit set to 1 causes interrupts coming on the
+ *     corresponding line to be passed to ISA as edge-triggered and
+ *     otherwise they are passed as level-triggered.  Manufacturer's
+ *     documentation says this register has to be set consistently with
+ *     the relevant ELCR register.
+ *
+ *     Accesses to the port I/O space concerned here need to be unlocked
+ *     by writing the value of 0xc5 to the Lock Register at index 0x03
+ *     beforehand.  Any other value written to said register prevents
+ *     further accesses from reaching the register file, except for the
+ *     Lock Register being written with 0xc5 again.
+ *
+ *     References:
+ *
+ *     "M1489/M1487: 486 PCI Chip Set", Version 1.2, Acer Laboratories
+ *     Inc., July 1997
+ */
+
+#define PC_CONF_FINALI_LOCK            0x03u
+#define PC_CONF_FINALI_PCI_INTX_RT1    0x42u
+#define PC_CONF_FINALI_PCI_INTX_RT2    0x43u
+#define PC_CONF_FINALI_PCI_INTX_SENS   0x44u
+
+#define PC_CONF_FINALI_LOCK_KEY                0xc5u
+
+static u8 read_pc_conf_nybble(u8 base, u8 index)
+{
+       u8 reg = base + (index >> 1);
+       u8 x;
+
+       x = pc_conf_get(reg);
+       return index & 1 ? x >> 4 : x & 0xf;
+}
+
+static void write_pc_conf_nybble(u8 base, u8 index, u8 val)
+{
+       u8 reg = base + (index >> 1);
+       u8 x;
+
+       x = pc_conf_get(reg);
+       x = index & 1 ? (x & 0x0f) | (val << 4) : (x & 0xf0) | val;
+       pc_conf_set(reg, x);
+}
+
+static int pirq_finali_get(struct pci_dev *router, struct pci_dev *dev,
+                          int pirq)
+{
+       static const u8 irqmap[16] = {
+               0, 9, 3, 10, 4, 5, 7, 6, 0, 11, 0, 12, 0, 14, 0, 15
+       };
+       unsigned long flags;
+       u8 x;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
+       x = irqmap[read_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, pirq - 1)];
+       pc_conf_set(PC_CONF_FINALI_LOCK, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return x;
+}
+
+static int pirq_finali_set(struct pci_dev *router, struct pci_dev *dev,
+                          int pirq, int irq)
+{
+       static const u8 irqmap[16] = {
+               0, 0, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15
+       };
+       u8 val = irqmap[irq];
+       unsigned long flags;
+
+       if (!val)
+               return 0;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
+       write_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, pirq - 1, val);
+       pc_conf_set(PC_CONF_FINALI_LOCK, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return 1;
+}
+
+static int pirq_finali_lvl(struct pci_dev *router, struct pci_dev *dev,
+                          int pirq, int irq)
+{
+       u8 mask = ~(1u << (pirq - 1));
+       unsigned long flags;
+       u8 trig;
+
+       elcr_set_level_irq(irq);
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
+       trig = pc_conf_get(PC_CONF_FINALI_PCI_INTX_SENS);
+       trig &= mask;
+       pc_conf_set(PC_CONF_FINALI_PCI_INTX_SENS, trig);
+       pc_conf_set(PC_CONF_FINALI_LOCK, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return 1;
+}
+
 /*
  * Common IRQ routing practice: nibbles in config space,
  * offset by some magic constant.
@@ -219,6 +358,74 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
        return 0;
 }
 
+/*
+ *     PIRQ routing for the 82374EB/82374SB EISA System Component (ESC)
+ *     ASIC used with the Intel 82420 and 82430 PCIsets.  The ESC is not
+ *     decoded in the PCI configuration space, so we identify it by the
+ *     accompanying 82375EB/82375SB PCI-EISA Bridge (PCEB) ASIC.
+ *
+ *     There are four PIRQ Route Control registers, available in the
+ *     port I/O space accessible indirectly via the index/data register
+ *     pair at 0x22/0x23, located at indices 0x60/0x61/0x62/0x63 for the
+ *     PIRQ0/1/2/3# lines respectively.  The semantics is the same as
+ *     with the PIIX router.
+ *
+ *     Accesses to the port I/O space concerned here need to be unlocked
+ *     by writing the value of 0x0f to the ESC ID Register at index 0x02
+ *     beforehand.  Any other value written to said register prevents
+ *     further accesses from reaching the register file, except for the
+ *     ESC ID Register being written with 0x0f again.
+ *
+ *     References:
+ *
+ *     "82374EB/82374SB EISA System Component (ESC)", Intel Corporation,
+ *     Order Number: 290476-004, March 1996
+ *
+ *     "82375EB/82375SB PCI-EISA Bridge (PCEB)", Intel Corporation, Order
+ *     Number: 290477-004, March 1996
+ */
+
+#define PC_CONF_I82374_ESC_ID                  0x02u
+#define PC_CONF_I82374_PIRQ_ROUTE_CONTROL      0x60u
+
+#define PC_CONF_I82374_ESC_ID_KEY              0x0fu
+
+static int pirq_esc_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+       unsigned long flags;
+       int reg;
+       u8 x;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 4)
+               reg += PC_CONF_I82374_PIRQ_ROUTE_CONTROL - 1;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, PC_CONF_I82374_ESC_ID_KEY);
+       x = pc_conf_get(reg);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return (x < 16) ? x : 0;
+}
+
+static int pirq_esc_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
+                      int irq)
+{
+       unsigned long flags;
+       int reg;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 4)
+               reg += PC_CONF_I82374_PIRQ_ROUTE_CONTROL - 1;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, PC_CONF_I82374_ESC_ID_KEY);
+       pc_conf_set(reg, irq);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return 1;
+}
+
 /*
  * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
  * just a pointer to the config space.
@@ -237,6 +444,50 @@ static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
        return 1;
 }
 
+/*
+ *     PIRQ routing for the 82426EX ISA Bridge (IB) ASIC used with the
+ *     Intel 82420EX PCIset.
+ *
+ *     There are only two PIRQ Route Control registers, available in the
+ *     combined 82425EX/82426EX PCI configuration space, at 0x66 and 0x67
+ *     for the PIRQ0# and PIRQ1# lines respectively.  The semantics is
+ *     the same as with the PIIX router.
+ *
+ *     References:
+ *
+ *     "82420EX PCIset Data Sheet, 82425EX PCI System Controller (PSC)
+ *     and 82426EX ISA Bridge (IB)", Intel Corporation, Order Number:
+ *     290488-004, December 1995
+ */
+
+#define PCI_I82426EX_PIRQ_ROUTE_CONTROL        0x66u
+
+static int pirq_ib_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+       int reg;
+       u8 x;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 2)
+               reg += PCI_I82426EX_PIRQ_ROUTE_CONTROL - 1;
+
+       pci_read_config_byte(router, reg, &x);
+       return (x < 16) ? x : 0;
+}
+
+static int pirq_ib_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
+                      int irq)
+{
+       int reg;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 2)
+               reg += PCI_I82426EX_PIRQ_ROUTE_CONTROL - 1;
+
+       pci_write_config_byte(router, reg, irq);
+       return 1;
+}
+
 /*
  * The VIA pirq rules are nibble-based, like ALI,
  * but without the ugly irq number munging.
@@ -549,6 +800,11 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
                return 0;
 
        switch (device) {
+       case PCI_DEVICE_ID_INTEL_82375:
+               r->name = "PCEB/ESC";
+               r->get = pirq_esc_get;
+               r->set = pirq_esc_set;
+               return 1;
        case PCI_DEVICE_ID_INTEL_82371FB_0:
        case PCI_DEVICE_ID_INTEL_82371SB_0:
        case PCI_DEVICE_ID_INTEL_82371AB_0:
@@ -594,6 +850,11 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
                r->get = pirq_piix_get;
                r->set = pirq_piix_set;
                return 1;
+       case PCI_DEVICE_ID_INTEL_82425:
+               r->name = "PSC/IB";
+               r->get = pirq_ib_get;
+               r->set = pirq_ib_set;
+               return 1;
        }
 
        if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN && 
@@ -745,6 +1006,12 @@ static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router,
 static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
 {
        switch (device) {
+       case PCI_DEVICE_ID_AL_M1489:
+               r->name = "FinALi";
+               r->get = pirq_finali_get;
+               r->set = pirq_finali_set;
+               r->lvl = pirq_finali_lvl;
+               return 1;
        case PCI_DEVICE_ID_AL_M1533:
        case PCI_DEVICE_ID_AL_M1563:
                r->name = "ALI";
@@ -968,11 +1235,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
        } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
        ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) {
                msg = "found";
-               elcr_set_level_irq(irq);
+               if (r->lvl)
+                       r->lvl(pirq_router_dev, dev, pirq, irq);
+               else
+                       elcr_set_level_irq(irq);
        } else if (newirq && r->set &&
                (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
                if (r->set(pirq_router_dev, dev, pirq, newirq)) {
-                       elcr_set_level_irq(newirq);
+                       if (r->lvl)
+                               r->lvl(pirq_router_dev, dev, pirq, newirq);
+                       else
+                               elcr_set_level_irq(newirq);
                        msg = "assigned";
                        irq = newirq;
                }
index 3a070e7..6665f88 100644 (file)
@@ -58,19 +58,20 @@ static void msr_restore_context(struct saved_context *ctxt)
 }
 
 /**
- *     __save_processor_state - save CPU registers before creating a
- *             hibernation image and before restoring the memory state from it
- *     @ctxt - structure to store the registers contents in
+ * __save_processor_state() - Save CPU registers before creating a
+ *                             hibernation image and before restoring
+ *                             the memory state from it
+ * @ctxt: Structure to store the registers contents in.
  *
- *     NOTE: If there is a CPU register the modification of which by the
- *     boot kernel (ie. the kernel used for loading the hibernation image)
- *     might affect the operations of the restored target kernel (ie. the one
- *     saved in the hibernation image), then its contents must be saved by this
- *     function.  In other words, if kernel A is hibernated and different
- *     kernel B is used for loading the hibernation image into memory, the
- *     kernel A's __save_processor_state() function must save all registers
- *     needed by kernel A, so that it can operate correctly after the resume
- *     regardless of what kernel B does in the meantime.
+ * NOTE: If there is a CPU register the modification of which by the
+ * boot kernel (ie. the kernel used for loading the hibernation image)
+ * might affect the operations of the restored target kernel (ie. the one
+ * saved in the hibernation image), then its contents must be saved by this
+ * function.  In other words, if kernel A is hibernated and different
+ * kernel B is used for loading the hibernation image into memory, the
+ * kernel A's __save_processor_state() function must save all registers
+ * needed by kernel A, so that it can operate correctly after the resume
+ * regardless of what kernel B does in the meantime.
  */
 static void __save_processor_state(struct saved_context *ctxt)
 {
@@ -181,9 +182,9 @@ static void fix_processor_context(void)
 }
 
 /**
- * __restore_processor_state - restore the contents of CPU registers saved
- *                             by __save_processor_state()
- * @ctxt - structure to load the registers contents from
+ * __restore_processor_state() - Restore the contents of CPU registers saved
+ *                               by __save_processor_state()
+ * @ctxt: Structure to load the registers contents from.
  *
  * The asm code that gets us here will have restored a usable GDT, although
  * it will be pointing to the wrong alias.
index fd1ab80..a4cf678 100644 (file)
@@ -10,6 +10,7 @@ BEGIN {
 
 /^GNU objdump/ {
        verstr = ""
+       gsub(/\(.*\)/, "");
        for (i = 3; i <= NF; i++)
                if (match($(i), "^[0-9]")) {
                        verstr = $(i);
index 04c5a44..27c8220 100644 (file)
@@ -26,6 +26,9 @@ static struct relocs relocs32;
 #if ELF_BITS == 64
 static struct relocs relocs32neg;
 static struct relocs relocs64;
+#define FMT PRIu64
+#else
+#define FMT PRIu32
 #endif
 
 struct section {
@@ -57,12 +60,12 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
        [S_REL] =
        "^(__init_(begin|end)|"
        "__x86_cpu_dev_(start|end)|"
-       "(__parainstructions|__alt_instructions)(|_end)|"
-       "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
+       "(__parainstructions|__alt_instructions)(_end)?|"
+       "(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
        "__(start|end)_pci_.*|"
        "__(start|end)_builtin_fw|"
-       "__(start|stop)___ksymtab(|_gpl)|"
-       "__(start|stop)___kcrctab(|_gpl)|"
+       "__(start|stop)___ksymtab(_gpl)?|"
+       "__(start|stop)___kcrctab(_gpl)?|"
        "__(start|stop)___param|"
        "__(start|stop)___modver|"
        "__(start|stop)___bug_table|"
@@ -389,7 +392,7 @@ static void read_ehdr(FILE *fp)
                Elf_Shdr shdr;
 
                if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
-                       die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n", ehdr.e_shoff, strerror(errno));
 
                if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
                        die("Cannot read initial ELF section header: %s\n", strerror(errno));
@@ -412,17 +415,17 @@ static void read_shdrs(FILE *fp)
 
        secs = calloc(shnum, sizeof(struct section));
        if (!secs) {
-               die("Unable to allocate %d section headers\n",
+               die("Unable to allocate %ld section headers\n",
                    shnum);
        }
        if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
-               die("Seek to %d failed: %s\n",
-                       ehdr.e_shoff, strerror(errno));
+               die("Seek to %" FMT " failed: %s\n",
+                   ehdr.e_shoff, strerror(errno));
        }
        for (i = 0; i < shnum; i++) {
                struct section *sec = &secs[i];
                if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
-                       die("Cannot read ELF section headers %d/%d: %s\n",
+                       die("Cannot read ELF section headers %d/%ld: %s\n",
                            i, shnum, strerror(errno));
                sec->shdr.sh_name      = elf_word_to_cpu(shdr.sh_name);
                sec->shdr.sh_type      = elf_word_to_cpu(shdr.sh_type);
@@ -450,12 +453,12 @@ static void read_strtabs(FILE *fp)
                }
                sec->strtab = malloc(sec->shdr.sh_size);
                if (!sec->strtab) {
-                       die("malloc of %d bytes for strtab failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for strtab failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
@@ -475,12 +478,12 @@ static void read_symtabs(FILE *fp)
                }
                sec->symtab = malloc(sec->shdr.sh_size);
                if (!sec->symtab) {
-                       die("malloc of %d bytes for symtab failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for symtab failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
@@ -508,12 +511,12 @@ static void read_relocs(FILE *fp)
                }
                sec->reltab = malloc(sec->shdr.sh_size);
                if (!sec->reltab) {
-                       die("malloc of %d bytes for relocs failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for relocs failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
index 43c83c0..4c49c82 100644 (file)
@@ -17,6 +17,7 @@
 #include <regex.h>
 #include <tools/le_byteshift.h>
 
+__attribute__((__format__(printf, 1, 2)))
 void die(char *fmt, ...) __attribute__((noreturn));
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
index a48bf2d..764b54b 100644 (file)
@@ -33,8 +33,6 @@ DECLARE_PER_CPU(unsigned long, nmi_count);
 
 asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
 {
-       int irq = irq_find_mapping(NULL, hwirq);
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        /* Debugging check for stack overflow: is there less than 1KB free? */
        {
@@ -48,7 +46,7 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
                               sp - sizeof(struct thread_info));
        }
 #endif
-       generic_handle_irq(irq);
+       generic_handle_domain_irq(NULL, hwirq);
 }
 
 int arch_show_interrupts(struct seq_file *p, int prec)
index fd732ae..bac87d7 100644 (file)
@@ -114,16 +114,6 @@ config BLK_DEV_THROTTLING_LOW
 
        Note, this is an experimental interface and could be changed someday.
 
-config BLK_CMDLINE_PARSER
-       bool "Block device command line partition parser"
-       help
-       Enabling this option allows you to specify the partition layout from
-       the kernel boot args.  This is typically of use for embedded devices
-       which don't otherwise have any standardized method for listing the
-       partitions on a block device.
-
-       See Documentation/block/cmdline-partition.rst for more information.
-
 config BLK_WBT
        bool "Enable support for block device writeback throttling"
        help
@@ -251,4 +241,8 @@ config BLK_MQ_RDMA
 config BLK_PM
        def_bool BLOCK && PM
 
+# do not use in new code
+config BLOCK_HOLDER_DEPRECATED
+       bool
+
 source "block/Kconfig.iosched"
index 64053d6..2f2158e 100644 (file)
@@ -9,12 +9,6 @@ config MQ_IOSCHED_DEADLINE
        help
          MQ version of the deadline IO scheduler.
 
-config MQ_IOSCHED_DEADLINE_CGROUP
-       tristate
-       default y
-       depends on MQ_IOSCHED_DEADLINE
-       depends on BLK_CGROUP
-
 config MQ_IOSCHED_KYBER
        tristate "Kyber I/O scheduler"
        default y
index bfbe4e1..1d0d466 100644 (file)
@@ -22,13 +22,10 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO)     += blk-ioprio.o
 obj-$(CONFIG_BLK_CGROUP_IOLATENCY)     += blk-iolatency.o
 obj-$(CONFIG_BLK_CGROUP_IOCOST)        += blk-iocost.o
 obj-$(CONFIG_MQ_IOSCHED_DEADLINE)      += mq-deadline.o
-mq-deadline-y += mq-deadline-main.o
-mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o
 obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
 bfq-y                          := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
 obj-$(CONFIG_IOSCHED_BFQ)      += bfq.o
 
-obj-$(CONFIG_BLK_CMDLINE_PARSER)       += cmdline-parser.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY_T10)    += t10-pi.o
 obj-$(CONFIG_BLK_MQ_PCI)       += blk-mq-pci.o
@@ -42,3 +39,4 @@ obj-$(CONFIG_BLK_SED_OPAL)    += sed-opal.o
 obj-$(CONFIG_BLK_PM)           += blk-pm.o
 obj-$(CONFIG_BLK_INLINE_ENCRYPTION)    += keyslot-manager.o blk-crypto.o
 obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK)   += blk-crypto-fallback.o
+obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED)  += holder.o
index 7279559..480e1a1 100644 (file)
@@ -2361,6 +2361,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
        __rq = bfq_find_rq_fmerge(bfqd, bio, q);
        if (__rq && elv_bio_merge_ok(__rq, bio)) {
                *req = __rq;
+
+               if (blk_discard_mergable(__rq))
+                       return ELEVATOR_DISCARD_MERGE;
                return ELEVATOR_FRONT_MERGE;
        }
 
@@ -2505,7 +2508,7 @@ void bfq_end_wr_async_queues(struct bfq_data *bfqd,
        int i, j;
 
        for (i = 0; i < 2; i++)
-               for (j = 0; j < IOPRIO_BE_NR; j++)
+               for (j = 0; j < IOPRIO_NR_LEVELS; j++)
                        if (bfqg->async_bfqq[i][j])
                                bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
        if (bfqg->async_idle_bfqq)
@@ -5266,8 +5269,8 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
        switch (ioprio_class) {
        default:
                pr_err("bdi %s: bfq: bad prio class %d\n",
-                               bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
-                               ioprio_class);
+                       bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
+                       ioprio_class);
                fallthrough;
        case IOPRIO_CLASS_NONE:
                /*
@@ -5290,10 +5293,10 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
                break;
        }
 
-       if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
+       if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
                pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
                        bfqq->new_ioprio);
-               bfqq->new_ioprio = IOPRIO_BE_NR;
+               bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
        }
 
        bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
@@ -5408,7 +5411,7 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
        case IOPRIO_CLASS_RT:
                return &bfqg->async_bfqq[0][ioprio];
        case IOPRIO_CLASS_NONE:
-               ioprio = IOPRIO_NORM;
+               ioprio = IOPRIO_BE_NORM;
                fallthrough;
        case IOPRIO_CLASS_BE:
                return &bfqg->async_bfqq[1][ioprio];
@@ -6822,7 +6825,7 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
        int i, j;
 
        for (i = 0; i < 2; i++)
-               for (j = 0; j < IOPRIO_BE_NR; j++)
+               for (j = 0; j < IOPRIO_NR_LEVELS; j++)
                        __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
 
        __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
index 99c2a3c..a73488e 100644 (file)
@@ -931,7 +931,7 @@ struct bfq_group {
 
        void *bfqd;
 
-       struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
+       struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS];
        struct bfq_queue *async_idle_bfqq;
 
        struct bfq_entity *my_entity;
@@ -948,15 +948,13 @@ struct bfq_group {
        struct bfq_entity entity;
        struct bfq_sched_data sched_data;
 
-       struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
+       struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS];
        struct bfq_queue *async_idle_bfqq;
 
        struct rb_root rq_pos_tree;
 };
 #endif
 
-struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
-
 /* --------------- main algorithm interface ----------------- */
 
 #define BFQ_SERVICE_TREE_INIT  ((struct bfq_service_tree)              \
index 7a462df..b74cc0d 100644 (file)
@@ -505,7 +505,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
  */
 unsigned short bfq_ioprio_to_weight(int ioprio)
 {
-       return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
+       return (IOPRIO_NR_LEVELS - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
 }
 
 /**
@@ -514,12 +514,12 @@ unsigned short bfq_ioprio_to_weight(int ioprio)
  *
  * To preserve as much as possible the old only-ioprio user interface,
  * 0 is used as an escape ioprio value for weights (numerically) equal or
- * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
+ * larger than IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF.
  */
 static unsigned short bfq_weight_to_ioprio(int weight)
 {
        return max_t(int, 0,
-                    IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
+                    IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF - weight);
 }
 
 static void bfq_get_entity(struct bfq_entity *entity)
index 4b4eb89..6b47cdd 100644 (file)
@@ -104,8 +104,7 @@ void bio_integrity_free(struct bio *bio)
        struct bio_set *bs = bio->bi_pool;
 
        if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
-               kfree(page_address(bip->bip_vec->bv_page) +
-                     bip->bip_vec->bv_offset);
+               kfree(bvec_virt(bip->bip_vec));
 
        __bio_integrity_free(bs, bip);
        bio->bi_integrity = NULL;
@@ -163,27 +162,23 @@ static blk_status_t bio_integrity_process(struct bio *bio,
        struct bio_vec bv;
        struct bio_integrity_payload *bip = bio_integrity(bio);
        blk_status_t ret = BLK_STS_OK;
-       void *prot_buf = page_address(bip->bip_vec->bv_page) +
-               bip->bip_vec->bv_offset;
 
        iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
        iter.interval = 1 << bi->interval_exp;
        iter.seed = proc_iter->bi_sector;
-       iter.prot_buf = prot_buf;
+       iter.prot_buf = bvec_virt(bip->bip_vec);
 
        __bio_for_each_segment(bv, bio, bviter, *proc_iter) {
-               void *kaddr = kmap_atomic(bv.bv_page);
+               void *kaddr = bvec_kmap_local(&bv);
 
-               iter.data_buf = kaddr + bv.bv_offset;
+               iter.data_buf = kaddr;
                iter.data_size = bv.bv_len;
-
                ret = proc_fn(&iter);
-               if (ret) {
-                       kunmap_atomic(kaddr);
-                       return ret;
-               }
+               kunmap_local(kaddr);
+
+               if (ret)
+                       break;
 
-               kunmap_atomic(kaddr);
        }
        return ret;
 }
index 1fab762..e16849f 100644 (file)
 #include "blk.h"
 #include "blk-rq-qos.h"
 
+struct bio_alloc_cache {
+       struct bio_list         free_list;
+       unsigned int            nr;
+};
+
 static struct biovec_slab {
        int nr_vecs;
        char *name;
@@ -246,12 +251,40 @@ static void bio_free(struct bio *bio)
 void bio_init(struct bio *bio, struct bio_vec *table,
              unsigned short max_vecs)
 {
-       memset(bio, 0, sizeof(*bio));
+       bio->bi_next = NULL;
+       bio->bi_bdev = NULL;
+       bio->bi_opf = 0;
+       bio->bi_flags = 0;
+       bio->bi_ioprio = 0;
+       bio->bi_write_hint = 0;
+       bio->bi_status = 0;
+       bio->bi_iter.bi_sector = 0;
+       bio->bi_iter.bi_size = 0;
+       bio->bi_iter.bi_idx = 0;
+       bio->bi_iter.bi_bvec_done = 0;
+       bio->bi_end_io = NULL;
+       bio->bi_private = NULL;
+#ifdef CONFIG_BLK_CGROUP
+       bio->bi_blkg = NULL;
+       bio->bi_issue.value = 0;
+#ifdef CONFIG_BLK_CGROUP_IOCOST
+       bio->bi_iocost_cost = 0;
+#endif
+#endif
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+       bio->bi_crypt_context = NULL;
+#endif
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+       bio->bi_integrity = NULL;
+#endif
+       bio->bi_vcnt = 0;
+
        atomic_set(&bio->__bi_remaining, 1);
        atomic_set(&bio->__bi_cnt, 1);
 
-       bio->bi_io_vec = table;
        bio->bi_max_vecs = max_vecs;
+       bio->bi_io_vec = table;
+       bio->bi_pool = NULL;
 }
 EXPORT_SYMBOL(bio_init);
 
@@ -495,16 +528,11 @@ EXPORT_SYMBOL(bio_kmalloc);
 
 void zero_fill_bio(struct bio *bio)
 {
-       unsigned long flags;
        struct bio_vec bv;
        struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, iter) {
-               char *data = bvec_kmap_irq(&bv, &flags);
-               memset(data, 0, bv.bv_len);
-               flush_dcache_page(bv.bv_page);
-               bvec_kunmap_irq(data, &flags);
-       }
+       bio_for_each_segment(bv, bio, iter)
+               memzero_bvec(&bv);
 }
 EXPORT_SYMBOL(zero_fill_bio);
 
@@ -591,6 +619,53 @@ void guard_bio_eod(struct bio *bio)
        bio_truncate(bio, maxsector << 9);
 }
 
+#define ALLOC_CACHE_MAX                512
+#define ALLOC_CACHE_SLACK       64
+
+static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
+                                 unsigned int nr)
+{
+       unsigned int i = 0;
+       struct bio *bio;
+
+       while ((bio = bio_list_pop(&cache->free_list)) != NULL) {
+               cache->nr--;
+               bio_free(bio);
+               if (++i == nr)
+                       break;
+       }
+}
+
+static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct bio_set *bs;
+
+       bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
+       if (bs->cache) {
+               struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
+
+               bio_alloc_cache_prune(cache, -1U);
+       }
+       return 0;
+}
+
+static void bio_alloc_cache_destroy(struct bio_set *bs)
+{
+       int cpu;
+
+       if (!bs->cache)
+               return;
+
+       cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
+       for_each_possible_cpu(cpu) {
+               struct bio_alloc_cache *cache;
+
+               cache = per_cpu_ptr(bs->cache, cpu);
+               bio_alloc_cache_prune(cache, -1U);
+       }
+       free_percpu(bs->cache);
+}
+
 /**
  * bio_put - release a reference to a bio
  * @bio:   bio to release reference to
@@ -601,16 +676,23 @@ void guard_bio_eod(struct bio *bio)
  **/
 void bio_put(struct bio *bio)
 {
-       if (!bio_flagged(bio, BIO_REFFED))
-               bio_free(bio);
-       else {
+       if (unlikely(bio_flagged(bio, BIO_REFFED))) {
                BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
+               if (!atomic_dec_and_test(&bio->__bi_cnt))
+                       return;
+       }
 
-               /*
-                * last put frees it
-                */
-               if (atomic_dec_and_test(&bio->__bi_cnt))
-                       bio_free(bio);
+       if (bio_flagged(bio, BIO_PERCPU_CACHE)) {
+               struct bio_alloc_cache *cache;
+
+               bio_uninit(bio);
+               cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
+               bio_list_add_head(&cache->free_list, bio);
+               if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
+                       bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
+               put_cpu();
+       } else {
+               bio_free(bio);
        }
 }
 EXPORT_SYMBOL(bio_put);
@@ -979,6 +1061,14 @@ static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
        return 0;
 }
 
+static void bio_put_pages(struct page **pages, size_t size, size_t off)
+{
+       size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
+
+       for (i = 0; i < nr; i++)
+               put_page(pages[i]);
+}
+
 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 
 /**
@@ -1023,8 +1113,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
                        if (same_page)
                                put_page(page);
                } else {
-                       if (WARN_ON_ONCE(bio_full(bio, len)))
-                                return -EINVAL;
+                       if (WARN_ON_ONCE(bio_full(bio, len))) {
+                               bio_put_pages(pages + i, left, offset);
+                               return -EINVAL;
+                       }
                        __bio_add_page(bio, page, len, offset);
                }
                offset = 0;
@@ -1069,6 +1161,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
                len = min_t(size_t, PAGE_SIZE - offset, left);
                if (bio_add_hw_page(q, bio, page, len, offset,
                                max_append_sectors, &same_page) != len) {
+                       bio_put_pages(pages + i, left, offset);
                        ret = -EINVAL;
                        break;
                }
@@ -1191,27 +1284,15 @@ EXPORT_SYMBOL(bio_advance);
 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
                        struct bio *src, struct bvec_iter *src_iter)
 {
-       struct bio_vec src_bv, dst_bv;
-       void *src_p, *dst_p;
-       unsigned bytes;
-
        while (src_iter->bi_size && dst_iter->bi_size) {
-               src_bv = bio_iter_iovec(src, *src_iter);
-               dst_bv = bio_iter_iovec(dst, *dst_iter);
-
-               bytes = min(src_bv.bv_len, dst_bv.bv_len);
-
-               src_p = kmap_atomic(src_bv.bv_page);
-               dst_p = kmap_atomic(dst_bv.bv_page);
-
-               memcpy(dst_p + dst_bv.bv_offset,
-                      src_p + src_bv.bv_offset,
-                      bytes);
+               struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
+               struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
+               unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
+               void *src_buf;
 
-               kunmap_atomic(dst_p);
-               kunmap_atomic(src_p);
-
-               flush_dcache_page(dst_bv.bv_page);
+               src_buf = bvec_kmap_local(&src_bv);
+               memcpy_to_bvec(&dst_bv, src_buf);
+               kunmap_local(src_buf);
 
                bio_advance_iter_single(src, src_iter, bytes);
                bio_advance_iter_single(dst, dst_iter, bytes);
@@ -1463,12 +1544,15 @@ EXPORT_SYMBOL(bio_split);
  * @bio:       bio to trim
  * @offset:    number of sectors to trim from the front of @bio
  * @size:      size we want to trim @bio to, in sectors
+ *
+ * This function is typically used for bios that are cloned and submitted
+ * to the underlying device in parts.
  */
-void bio_trim(struct bio *bio, int offset, int size)
+void bio_trim(struct bio *bio, sector_t offset, sector_t size)
 {
-       /* 'bio' is a cloned bio which we need to trim to match
-        * the given offset and size.
-        */
+       if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
+                        offset + size > bio->bi_iter.bi_size))
+               return;
 
        size <<= 9;
        if (offset == 0 && size == bio->bi_iter.bi_size)
@@ -1479,7 +1563,6 @@ void bio_trim(struct bio *bio, int offset, int size)
 
        if (bio_integrity(bio))
                bio_integrity_trim(bio);
-
 }
 EXPORT_SYMBOL_GPL(bio_trim);
 
@@ -1502,6 +1585,7 @@ int biovec_init_pool(mempool_t *pool, int pool_entries)
  */
 void bioset_exit(struct bio_set *bs)
 {
+       bio_alloc_cache_destroy(bs);
        if (bs->rescue_workqueue)
                destroy_workqueue(bs->rescue_workqueue);
        bs->rescue_workqueue = NULL;
@@ -1563,12 +1647,18 @@ int bioset_init(struct bio_set *bs,
            biovec_init_pool(&bs->bvec_pool, pool_size))
                goto bad;
 
-       if (!(flags & BIOSET_NEED_RESCUER))
-               return 0;
-
-       bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
-       if (!bs->rescue_workqueue)
-               goto bad;
+       if (flags & BIOSET_NEED_RESCUER) {
+               bs->rescue_workqueue = alloc_workqueue("bioset",
+                                                       WQ_MEM_RECLAIM, 0);
+               if (!bs->rescue_workqueue)
+                       goto bad;
+       }
+       if (flags & BIOSET_PERCPU_CACHE) {
+               bs->cache = alloc_percpu(struct bio_alloc_cache);
+               if (!bs->cache)
+                       goto bad;
+               cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
+       }
 
        return 0;
 bad:
@@ -1595,6 +1685,46 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
 }
 EXPORT_SYMBOL(bioset_init_from_src);
 
+/**
+ * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
+ * @kiocb:     kiocb describing the IO
+ * @nr_iovecs: number of iovecs to pre-allocate
+ * @bs:                bio_set to allocate from
+ *
+ * Description:
+ *    Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
+ *    used to check if we should dip into the per-cpu bio_set allocation
+ *    cache. The allocation uses GFP_KERNEL internally. On return, the
+ *    bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
+ *    MUST be done from process context, not hard/soft IRQ.
+ *
+ */
+struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
+                           struct bio_set *bs)
+{
+       struct bio_alloc_cache *cache;
+       struct bio *bio;
+
+       if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
+               return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
+
+       cache = per_cpu_ptr(bs->cache, get_cpu());
+       bio = bio_list_pop(&cache->free_list);
+       if (bio) {
+               cache->nr--;
+               put_cpu();
+               bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
+               bio->bi_pool = bs;
+               bio_set_flag(bio, BIO_PERCPU_CACHE);
+               return bio;
+       }
+       put_cpu();
+       bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
+       bio_set_flag(bio, BIO_PERCPU_CACHE);
+       return bio;
+}
+EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
+
 static int __init init_bio(void)
 {
        int i;
@@ -1609,6 +1739,9 @@ static int __init init_bio(void)
                                SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
        }
 
+       cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
+                                       bio_cpu_dead);
+
        if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
                panic("bio: can't allocate bios\n");
 
index 575d7a2..3c88a79 100644 (file)
@@ -489,10 +489,9 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 
 const char *blkg_dev_name(struct blkcg_gq *blkg)
 {
-       /* some drivers (floppy) instantiate a queue w/o disk registered */
-       if (blkg->q->backing_dev_info->dev)
-               return bdi_dev_name(blkg->q->backing_dev_info);
-       return NULL;
+       if (!blkg->q->disk || !blkg->q->disk->bdi->dev)
+               return NULL;
+       return bdi_dev_name(blkg->q->disk->bdi);
 }
 
 /**
@@ -790,6 +789,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                struct blkcg_gq *parent = blkg->parent;
                struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
                struct blkg_iostat cur, delta;
+               unsigned long flags;
                unsigned int seq;
 
                /* fetch the current per-cpu values */
@@ -799,21 +799,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                } while (u64_stats_fetch_retry(&bisc->sync, seq));
 
                /* propagate percpu delta to global */
-               u64_stats_update_begin(&blkg->iostat.sync);
+               flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                blkg_iostat_set(&delta, &cur);
                blkg_iostat_sub(&delta, &bisc->last);
                blkg_iostat_add(&blkg->iostat.cur, &delta);
                blkg_iostat_add(&bisc->last, &delta);
-               u64_stats_update_end(&blkg->iostat.sync);
+               u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 
                /* propagate global delta to parent (unless that's root) */
                if (parent && parent->parent) {
-                       u64_stats_update_begin(&parent->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
                        blkg_iostat_set(&delta, &blkg->iostat.cur);
                        blkg_iostat_sub(&delta, &blkg->iostat.last);
                        blkg_iostat_add(&parent->iostat.cur, &delta);
                        blkg_iostat_add(&blkg->iostat.last, &delta);
-                       u64_stats_update_end(&parent->iostat.sync);
+                       u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
                }
        }
 
@@ -848,6 +848,7 @@ static void blkcg_fill_root_iostats(void)
                memset(&tmp, 0, sizeof(tmp));
                for_each_possible_cpu(cpu) {
                        struct disk_stats *cpu_dkstats;
+                       unsigned long flags;
 
                        cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
                        tmp.ios[BLKG_IOSTAT_READ] +=
@@ -864,104 +865,86 @@ static void blkcg_fill_root_iostats(void)
                        tmp.bytes[BLKG_IOSTAT_DISCARD] +=
                                cpu_dkstats->sectors[STAT_DISCARD] << 9;
 
-                       u64_stats_update_begin(&blkg->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                        blkg_iostat_set(&blkg->iostat.cur, &tmp);
-                       u64_stats_update_end(&blkg->iostat.sync);
+                       u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
                }
        }
 }
 
-static int blkcg_print_stat(struct seq_file *sf, void *v)
+static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
 {
-       struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
-       struct blkcg_gq *blkg;
-
-       if (!seq_css(sf)->parent)
-               blkcg_fill_root_iostats();
-       else
-               cgroup_rstat_flush(blkcg->css.cgroup);
-
-       rcu_read_lock();
-
-       hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
-               struct blkg_iostat_set *bis = &blkg->iostat;
-               const char *dname;
-               char *buf;
-               u64 rbytes, wbytes, rios, wios, dbytes, dios;
-               size_t size = seq_get_buf(sf, &buf), off = 0;
-               int i;
-               bool has_stats = false;
-               unsigned seq;
+       struct blkg_iostat_set *bis = &blkg->iostat;
+       u64 rbytes, wbytes, rios, wios, dbytes, dios;
+       bool has_stats = false;
+       const char *dname;
+       unsigned seq;
+       int i;
 
-               spin_lock_irq(&blkg->q->queue_lock);
+       if (!blkg->online)
+               return;
 
-               if (!blkg->online)
-                       goto skip;
+       dname = blkg_dev_name(blkg);
+       if (!dname)
+               return;
 
-               dname = blkg_dev_name(blkg);
-               if (!dname)
-                       goto skip;
+       seq_printf(s, "%s ", dname);
 
-               /*
-                * Hooray string manipulation, count is the size written NOT
-                * INCLUDING THE \0, so size is now count+1 less than what we
-                * had before, but we want to start writing the next bit from
-                * the \0 so we only add count to buf.
-                */
-               off += scnprintf(buf+off, size-off, "%s ", dname);
+       do {
+               seq = u64_stats_fetch_begin(&bis->sync);
+
+               rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
+               wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
+               dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
+               rios = bis->cur.ios[BLKG_IOSTAT_READ];
+               wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
+               dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
+       } while (u64_stats_fetch_retry(&bis->sync, seq));
+
+       if (rbytes || wbytes || rios || wios) {
+               has_stats = true;
+               seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
+                       rbytes, wbytes, rios, wios,
+                       dbytes, dios);
+       }
 
-               do {
-                       seq = u64_stats_fetch_begin(&bis->sync);
+       if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
+               has_stats = true;
+               seq_printf(s, " use_delay=%d delay_nsec=%llu",
+                       atomic_read(&blkg->use_delay),
+                       atomic64_read(&blkg->delay_nsec));
+       }
 
-                       rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
-                       wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
-                       dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
-                       rios = bis->cur.ios[BLKG_IOSTAT_READ];
-                       wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
-                       dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
-               } while (u64_stats_fetch_retry(&bis->sync, seq));
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
 
-               if (rbytes || wbytes || rios || wios) {
-                       has_stats = true;
-                       off += scnprintf(buf+off, size-off,
-                                        "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
-                                        rbytes, wbytes, rios, wios,
-                                        dbytes, dios);
-               }
+               if (!blkg->pd[i] || !pol->pd_stat_fn)
+                       continue;
 
-               if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
+               if (pol->pd_stat_fn(blkg->pd[i], s))
                        has_stats = true;
-                       off += scnprintf(buf+off, size-off,
-                                        " use_delay=%d delay_nsec=%llu",
-                                        atomic_read(&blkg->use_delay),
-                                       (unsigned long long)atomic64_read(&blkg->delay_nsec));
-               }
+       }
 
-               for (i = 0; i < BLKCG_MAX_POLS; i++) {
-                       struct blkcg_policy *pol = blkcg_policy[i];
-                       size_t written;
+       if (has_stats)
+               seq_printf(s, "\n");
+}
 
-                       if (!blkg->pd[i] || !pol->pd_stat_fn)
-                               continue;
+static int blkcg_print_stat(struct seq_file *sf, void *v)
+{
+       struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+       struct blkcg_gq *blkg;
 
-                       written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
-                       if (written)
-                               has_stats = true;
-                       off += written;
-               }
+       if (!seq_css(sf)->parent)
+               blkcg_fill_root_iostats();
+       else
+               cgroup_rstat_flush(blkcg->css.cgroup);
 
-               if (has_stats) {
-                       if (off < size - 1) {
-                               off += scnprintf(buf+off, size-off, "\n");
-                               seq_commit(sf, off);
-                       } else {
-                               seq_commit(sf, -1);
-                       }
-               }
-       skip:
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+               spin_lock_irq(&blkg->q->queue_lock);
+               blkcg_print_one_stat(blkg, sf);
                spin_unlock_irq(&blkg->q->queue_lock);
        }
-
        rcu_read_unlock();
        return 0;
 }
index 0447769..5454db2 100644 (file)
@@ -14,7 +14,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/backing-dev.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
@@ -122,7 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->internal_tag = BLK_MQ_NO_TAG;
        rq->start_time_ns = ktime_get_ns();
        rq->part = NULL;
-       refcount_set(&rq->ref, 1);
        blk_crypto_rq_set_defaults(rq);
 }
 EXPORT_SYMBOL(blk_rq_init);
@@ -394,10 +392,7 @@ void blk_cleanup_queue(struct request_queue *q)
        /* for synchronous bio-based driver finish in-flight integrity i/o */
        blk_flush_integrity();
 
-       /* @q won't process any more request, flush async actions */
-       del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
        blk_sync_queue(q);
-
        if (queue_is_mq(q))
                blk_mq_exit_queue(q);
 
@@ -534,20 +529,14 @@ struct request_queue *blk_alloc_queue(int node_id)
        if (ret)
                goto fail_id;
 
-       q->backing_dev_info = bdi_alloc(node_id);
-       if (!q->backing_dev_info)
-               goto fail_split;
-
        q->stats = blk_alloc_queue_stats();
        if (!q->stats)
-               goto fail_stats;
+               goto fail_split;
 
        q->node = node_id;
 
        atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
 
-       timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
-                   laptop_mode_timer_fn, 0);
        timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
        INIT_WORK(&q->timeout_work, blk_timeout_work);
        INIT_LIST_HEAD(&q->icq_list);
@@ -572,7 +561,7 @@ struct request_queue *blk_alloc_queue(int node_id)
        if (percpu_ref_init(&q->q_usage_counter,
                                blk_queue_usage_counter_release,
                                PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto fail_bdi;
+               goto fail_stats;
 
        if (blkcg_init_queue(q))
                goto fail_ref;
@@ -585,10 +574,8 @@ struct request_queue *blk_alloc_queue(int node_id)
 
 fail_ref:
        percpu_ref_exit(&q->q_usage_counter);
-fail_bdi:
-       blk_free_queue_stats(q->stats);
 fail_stats:
-       bdi_put(q->backing_dev_info);
+       blk_free_queue_stats(q->stats);
 fail_split:
        bioset_exit(&q->bio_split);
 fail_id:
@@ -834,7 +821,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
        }
 
        if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
-               bio->bi_opf &= ~REQ_HIPRI;
+               bio_clear_hipri(bio);
 
        switch (bio_op(bio)) {
        case REQ_OP_DISCARD:
index c5bdaaf..103c2e2 100644 (file)
@@ -332,7 +332,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
        if (mode->keysize == 0)
                return -EINVAL;
 
-       if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
+       if (dun_bytes == 0 || dun_bytes > mode->ivsize)
                return -EINVAL;
 
        if (!is_power_of_2(data_unit_size))
index 1002f6c..4201728 100644 (file)
@@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 }
 
+bool is_flush_rq(struct request *rq)
+{
+       return rq->end_io == flush_end_io;
+}
+
 /**
  * blk_kick_flush - consider issuing flush request
  * @q: request_queue being kicked
@@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
        flush_rq->rq_disk = first_rq->rq_disk;
        flush_rq->end_io = flush_end_io;
+       /*
+        * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
+        * implied in refcount_inc_not_zero() called from
+        * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
+        * and READ flush_rq->end_io
+        */
+       smp_wmb();
+       refcount_set(&flush_rq->ref, 1);
 
        blk_flush_queue_rq(flush_rq, false);
 }
index 410da06..69a1217 100644 (file)
@@ -431,13 +431,15 @@ void blk_integrity_unregister(struct gendisk *disk)
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
 
-void blk_integrity_add(struct gendisk *disk)
+int blk_integrity_add(struct gendisk *disk)
 {
-       if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
-                                &disk_to_dev(disk)->kobj, "%s", "integrity"))
-               return;
+       int ret;
 
-       kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+       ret = kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
+                                  &disk_to_dev(disk)->kobj, "%s", "integrity");
+       if (!ret)
+               kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+       return ret;
 }
 
 void blk_integrity_del(struct gendisk *disk)
index 5fac375..b3880e4 100644 (file)
@@ -2988,34 +2988,29 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
        kfree(iocg);
 }
 
-static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
+static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
 {
        struct ioc_gq *iocg = pd_to_iocg(pd);
        struct ioc *ioc = iocg->ioc;
-       size_t pos = 0;
 
        if (!ioc->enabled)
-               return 0;
+               return false;
 
        if (iocg->level == 0) {
                unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
                        ioc->vtime_base_rate * 10000,
                        VTIME_PER_USEC);
-               pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
-                                 vp10k / 100, vp10k % 100);
+               seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
        }
 
-       pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
-                        iocg->last_stat.usage_us);
+       seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
 
        if (blkcg_debug_stats)
-               pos += scnprintf(buf + pos, size - pos,
-                                " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
-                                iocg->last_stat.wait_us,
-                                iocg->last_stat.indebt_us,
-                                iocg->last_stat.indelay_us);
-
-       return pos;
+               seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
+                       iocg->last_stat.wait_us,
+                       iocg->last_stat.indebt_us,
+                       iocg->last_stat.indelay_us);
+       return true;
 }
 
 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
@@ -3061,19 +3056,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
                if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
                        return -EINVAL;
 
-               spin_lock(&blkcg->lock);
+               spin_lock_irq(&blkcg->lock);
                iocc->dfl_weight = v * WEIGHT_ONE;
                hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
                        struct ioc_gq *iocg = blkg_to_iocg(blkg);
 
                        if (iocg) {
-                               spin_lock_irq(&iocg->ioc->lock);
+                               spin_lock(&iocg->ioc->lock);
                                ioc_now(iocg->ioc, &now);
                                weight_updated(iocg, &now);
-                               spin_unlock_irq(&iocg->ioc->lock);
+                               spin_unlock(&iocg->ioc->lock);
                        }
                }
-               spin_unlock(&blkcg->lock);
+               spin_unlock_irq(&blkcg->lock);
 
                return nbytes;
        }
index 81be009..c0545f9 100644 (file)
@@ -833,7 +833,11 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
 
        enable = iolatency_set_min_lat_nsec(blkg, lat_val);
        if (enable) {
-               WARN_ON_ONCE(!blk_get_queue(blkg->q));
+               if (!blk_get_queue(blkg->q)) {
+                       ret = -ENODEV;
+                       goto out;
+               }
+
                blkg_get(blkg);
        }
 
@@ -886,8 +890,7 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
        return 0;
 }
 
-static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
-                                size_t size)
+static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
 {
        struct latency_stat stat;
        int cpu;
@@ -902,39 +905,40 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
        preempt_enable();
 
        if (iolat->rq_depth.max_depth == UINT_MAX)
-               return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
-                                (unsigned long long)stat.ps.missed,
-                                (unsigned long long)stat.ps.total);
-       return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
-                        (unsigned long long)stat.ps.missed,
-                        (unsigned long long)stat.ps.total,
-                        iolat->rq_depth.max_depth);
+               seq_printf(s, " missed=%llu total=%llu depth=max",
+                       (unsigned long long)stat.ps.missed,
+                       (unsigned long long)stat.ps.total);
+       else
+               seq_printf(s, " missed=%llu total=%llu depth=%u",
+                       (unsigned long long)stat.ps.missed,
+                       (unsigned long long)stat.ps.total,
+                       iolat->rq_depth.max_depth);
+       return true;
 }
 
-static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
-                               size_t size)
+static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
 {
        struct iolatency_grp *iolat = pd_to_lat(pd);
        unsigned long long avg_lat;
        unsigned long long cur_win;
 
        if (!blkcg_debug_stats)
-               return 0;
+               return false;
 
        if (iolat->ssd)
-               return iolatency_ssd_stat(iolat, buf, size);
+               return iolatency_ssd_stat(iolat, s);
 
        avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
        cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
        if (iolat->rq_depth.max_depth == UINT_MAX)
-               return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
-                                avg_lat, cur_win);
-
-       return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
-                        iolat->rq_depth.max_depth, avg_lat, cur_win);
+               seq_printf(s, " depth=max avg_lat=%llu win=%llu",
+                       avg_lat, cur_win);
+       else
+               seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
+                       iolat->rq_depth.max_depth, avg_lat, cur_win);
+       return true;
 }
 
-
 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
                                                   struct request_queue *q,
                                                   struct blkcg *blkcg)
index 3743158..d1448aa 100644 (file)
@@ -400,7 +400,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
        struct bvec_iter_all iter_all;
 
        bio_for_each_segment_all(bvec, bio, iter_all) {
-               memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
+               memcpy_from_bvec(p, bvec);
                p += bvec->bv_len;
        }
 
index a11b3b5..7a5c81c 100644 (file)
@@ -285,7 +285,7 @@ split:
         * iopoll in direct IO routine. Given performance gain of iopoll for
         * big IO can be trival, disable iopoll when split needed.
         */
-       bio->bi_opf &= ~REQ_HIPRI;
+       bio_clear_hipri(bio);
 
        return bio_split(bio, sectors, GFP_NOIO, bs);
 }
@@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
                trace_block_split(split, (*bio)->bi_iter.bi_sector);
                submit_bio_noacct(*bio);
                *bio = split;
+
+               blk_throtl_charge_bio_split(*bio);
        }
 }
 
@@ -705,22 +707,6 @@ static void blk_account_io_merge_request(struct request *req)
        }
 }
 
-/*
- * Two cases of handling DISCARD merge:
- * If max_discard_segments > 1, the driver takes every bio
- * as a range and send them to controller together. The ranges
- * needn't to be contiguous.
- * Otherwise, the bios/requests will be handled as same as
- * others which should be contiguous.
- */
-static inline bool blk_discard_mergable(struct request *req)
-{
-       if (req_op(req) == REQ_OP_DISCARD &&
-           queue_max_discard_segments(req->q) > 1)
-               return true;
-       return false;
-}
-
 static enum elv_merge blk_try_req_merge(struct request *req,
                                        struct request *next)
 {
index 7b52e76..253c857 100644 (file)
@@ -45,60 +45,12 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
        kfree(hctx);
 }
 
-struct blk_mq_ctx_sysfs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct blk_mq_ctx *, char *);
-       ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
-};
-
 struct blk_mq_hw_ctx_sysfs_entry {
        struct attribute attr;
        ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
        ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
 };
 
-static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
-                                char *page)
-{
-       struct blk_mq_ctx_sysfs_entry *entry;
-       struct blk_mq_ctx *ctx;
-       struct request_queue *q;
-       ssize_t res;
-
-       entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
-       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
-       q = ctx->queue;
-
-       if (!entry->show)
-               return -EIO;
-
-       mutex_lock(&q->sysfs_lock);
-       res = entry->show(ctx, page);
-       mutex_unlock(&q->sysfs_lock);
-       return res;
-}
-
-static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
-                                 const char *page, size_t length)
-{
-       struct blk_mq_ctx_sysfs_entry *entry;
-       struct blk_mq_ctx *ctx;
-       struct request_queue *q;
-       ssize_t res;
-
-       entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
-       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
-       q = ctx->queue;
-
-       if (!entry->store)
-               return -EIO;
-
-       mutex_lock(&q->sysfs_lock);
-       res = entry->store(ctx, page, length);
-       mutex_unlock(&q->sysfs_lock);
-       return res;
-}
-
 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
                                    struct attribute *attr, char *page)
 {
@@ -198,23 +150,16 @@ static struct attribute *default_hw_ctx_attrs[] = {
 };
 ATTRIBUTE_GROUPS(default_hw_ctx);
 
-static const struct sysfs_ops blk_mq_sysfs_ops = {
-       .show   = blk_mq_sysfs_show,
-       .store  = blk_mq_sysfs_store,
-};
-
 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
        .show   = blk_mq_hw_sysfs_show,
        .store  = blk_mq_hw_sysfs_store,
 };
 
 static struct kobj_type blk_mq_ktype = {
-       .sysfs_ops      = &blk_mq_sysfs_ops,
        .release        = blk_mq_sysfs_release,
 };
 
 static struct kobj_type blk_mq_ctx_ktype = {
-       .sysfs_ops      = &blk_mq_sysfs_ops,
        .release        = blk_mq_ctx_sysfs_release,
 };
 
index 2c4ac51..9440499 100644 (file)
@@ -525,7 +525,7 @@ void blk_mq_free_request(struct request *rq)
                __blk_mq_dec_active_requests(hctx);
 
        if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
-               laptop_io_completion(q->backing_dev_info);
+               laptop_io_completion(q->disk->bdi);
 
        rq_qos_done(q, rq);
 
@@ -606,7 +606,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
         * This is probably worse than completing the request on a different
         * cache domain.
         */
-       if (force_irqthreads)
+       if (force_irqthreads())
                return false;
 
        /* same CPU or cache domain?  Complete locally */
@@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 
 void blk_mq_put_rq_ref(struct request *rq)
 {
-       if (is_flush_rq(rq, rq->mq_hctx))
+       if (is_flush_rq(rq))
                rq->end_io(rq, 0);
        else if (refcount_dec_and_test(&rq->ref))
                __blk_mq_free_request(rq);
@@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
        unsigned long *next = priv;
 
        /*
-        * Just do a quick check if it is expired before locking the request in
-        * so we're not unnecessarilly synchronizing across CPUs.
-        */
-       if (!blk_mq_req_expired(rq, next))
-               return true;
-
-       /*
-        * We have reason to believe the request may be expired. Take a
-        * reference on the request to lock this request lifetime into its
-        * currently allocated context to prevent it from being reallocated in
-        * the event the completion by-passes this timeout handler.
-        *
-        * If the reference was already released, then the driver beat the
-        * timeout handler to posting a natural completion.
-        */
-       if (!refcount_inc_not_zero(&rq->ref))
-               return true;
-
-       /*
-        * The request is now locked and cannot be reallocated underneath the
-        * timeout handler's processing. Re-verify this exact request is truly
-        * expired; if it is not expired, then the request was completed and
-        * reallocated as a new request.
+        * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
+        * be reallocated underneath the timeout handler's processing, then
+        * the expire check is reliable. If the request is not expired, then
+        * it was completed and reallocated as a new request after returning
+        * from blk_mq_check_expired().
         */
        if (blk_mq_req_expired(rq, next))
                blk_mq_rq_timed_out(rq, reserved);
-
-       blk_mq_put_rq_ref(rq);
        return true;
 }
 
@@ -2994,10 +2974,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (shared)
+               if (shared) {
                        hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
-               else
+               } else {
+                       blk_mq_tag_idle(hctx);
                        hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
+               }
        }
 }
 
@@ -3133,7 +3115,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 }
 EXPORT_SYMBOL(blk_mq_init_queue);
 
-struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata)
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+               struct lock_class_key *lkclass)
 {
        struct request_queue *q;
        struct gendisk *disk;
@@ -3142,12 +3125,11 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata)
        if (IS_ERR(q))
                return ERR_CAST(q);
 
-       disk = __alloc_disk_node(0, set->numa_node);
+       disk = __alloc_disk_node(q, set->numa_node, lkclass);
        if (!disk) {
                blk_cleanup_queue(q);
                return ERR_PTR(-ENOMEM);
        }
-       disk->queue = q;
        return disk;
 }
 EXPORT_SYMBOL(__blk_mq_alloc_disk);
index 902c40d..a7c857a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/pagemap.h>
+#include <linux/backing-dev-defs.h>
 #include <linux/gcd.h>
 #include <linux/lcm.h>
 #include <linux/jiffies.h>
@@ -140,7 +141,9 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
                                 limits->logical_block_size >> SECTOR_SHIFT);
        limits->max_sectors = max_sectors;
 
-       q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
+       if (!q->disk)
+               return;
+       q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
@@ -380,18 +383,19 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
 }
 EXPORT_SYMBOL(blk_queue_alignment_offset);
 
-void blk_queue_update_readahead(struct request_queue *q)
+void disk_update_readahead(struct gendisk *disk)
 {
+       struct request_queue *q = disk->queue;
+
        /*
         * For read-ahead of large files to be effective, we need to read ahead
         * at least twice the optimal I/O size.
         */
-       q->backing_dev_info->ra_pages =
+       disk->bdi->ra_pages =
                max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
-       q->backing_dev_info->io_pages =
-               queue_max_sectors(q) >> (PAGE_SHIFT - 9);
+       disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
 }
-EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
+EXPORT_SYMBOL_GPL(disk_update_readahead);
 
 /**
  * blk_limits_io_min - set minimum request size for a device
@@ -471,7 +475,9 @@ EXPORT_SYMBOL(blk_limits_io_opt);
 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
 {
        blk_limits_io_opt(&q->limits, opt);
-       q->backing_dev_info->ra_pages =
+       if (!q->disk)
+               return;
+       q->disk->bdi->ra_pages =
                max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
 }
 EXPORT_SYMBOL(blk_queue_io_opt);
@@ -661,17 +667,11 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
        struct request_queue *t = disk->queue;
 
        if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
-                       get_start_sect(bdev) + (offset >> 9)) < 0) {
-               char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
-
-               disk_name(disk, 0, top);
-               bdevname(bdev, bottom);
-
-               printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
-                      top, bottom);
-       }
+                       get_start_sect(bdev) + (offset >> 9)) < 0)
+               pr_notice("%s: Warning: Device %pg is misaligned\n",
+                       disk->disk_name, bdev);
 
-       blk_queue_update_readahead(disk->queue);
+       disk_update_readahead(disk);
 }
 EXPORT_SYMBOL(disk_stack_limits);
 
index 370d83c..614d9d4 100644 (file)
@@ -88,9 +88,11 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
-       unsigned long ra_kb = q->backing_dev_info->ra_pages <<
-                                       (PAGE_SHIFT - 10);
+       unsigned long ra_kb;
 
+       if (!q->disk)
+               return -EINVAL;
+       ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
        return queue_var_show(ra_kb, page);
 }
 
@@ -98,13 +100,14 @@ static ssize_t
 queue_ra_store(struct request_queue *q, const char *page, size_t count)
 {
        unsigned long ra_kb;
-       ssize_t ret = queue_var_store(&ra_kb, page, count);
+       ssize_t ret;
 
+       if (!q->disk)
+               return -EINVAL;
+       ret = queue_var_store(&ra_kb, page, count);
        if (ret < 0)
                return ret;
-
-       q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
-
+       q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
        return ret;
 }
 
@@ -251,7 +254,8 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 
        spin_lock_irq(&q->queue_lock);
        q->limits.max_sectors = max_sectors_kb << 1;
-       q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
+       if (q->disk)
+               q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
        spin_unlock_irq(&q->queue_lock);
 
        return ret;
@@ -766,13 +770,6 @@ static void blk_exit_queue(struct request_queue *q)
         * e.g. blkcg_print_blkgs() to crash.
         */
        blkcg_exit_queue(q);
-
-       /*
-        * Since the cgroup code may dereference the @q->backing_dev_info
-        * pointer, only decrease its reference count after having removed the
-        * association with the block cgroup controller.
-        */
-       bdi_put(q->backing_dev_info);
 }
 
 /**
@@ -859,15 +856,6 @@ int blk_register_queue(struct gendisk *disk)
        struct device *dev = disk_to_dev(disk);
        struct request_queue *q = disk->queue;
 
-       if (WARN_ON(!q))
-               return -ENXIO;
-
-       WARN_ONCE(blk_queue_registered(q),
-                 "%s is registering an already registered queue\n",
-                 kobject_name(&dev->kobj));
-
-       blk_queue_update_readahead(q);
-
        ret = blk_trace_init_sysfs(dev);
        if (ret)
                return ret;
@@ -941,7 +929,6 @@ unlock:
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(blk_register_queue);
 
 /**
  * blk_unregister_queue - counterpart of blk_register_queue()
index b1b22d8..55c4901 100644 (file)
@@ -178,6 +178,9 @@ struct throtl_grp {
        unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
        unsigned long bio_cnt_reset_time;
 
+       atomic_t io_split_cnt[2];
+       atomic_t last_io_split_cnt[2];
+
        struct blkg_rwstat stat_bytes;
        struct blkg_rwstat stat_ios;
 };
@@ -777,6 +780,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
        tg->bytes_disp[rw] = 0;
        tg->io_disp[rw] = 0;
 
+       atomic_set(&tg->io_split_cnt[rw], 0);
+
        /*
         * Previous slice has expired. We must have trimmed it after last
         * bio dispatch. That means since start of last slice, we never used
@@ -799,6 +804,9 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
        tg->io_disp[rw] = 0;
        tg->slice_start[rw] = jiffies;
        tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
+
+       atomic_set(&tg->io_split_cnt[rw], 0);
+
        throtl_log(&tg->service_queue,
                   "[%c] new slice start=%lu end=%lu jiffies=%lu",
                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
@@ -1031,6 +1039,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
                                jiffies + tg->td->throtl_slice);
        }
 
+       if (iops_limit != UINT_MAX)
+               tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
+
        if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
            tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
                if (wait)
@@ -2052,12 +2063,14 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
        }
 
        if (tg->iops[READ][LIMIT_LOW]) {
+               tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
                iops = tg->last_io_disp[READ] * HZ / elapsed_time;
                if (iops >= tg->iops[READ][LIMIT_LOW])
                        tg->last_low_overflow_time[READ] = now;
        }
 
        if (tg->iops[WRITE][LIMIT_LOW]) {
+               tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
                iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
                if (iops >= tg->iops[WRITE][LIMIT_LOW])
                        tg->last_low_overflow_time[WRITE] = now;
@@ -2176,6 +2189,25 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
 }
 #endif
 
+void blk_throtl_charge_bio_split(struct bio *bio)
+{
+       struct blkcg_gq *blkg = bio->bi_blkg;
+       struct throtl_grp *parent = blkg_to_tg(blkg);
+       struct throtl_service_queue *parent_sq;
+       bool rw = bio_data_dir(bio);
+
+       do {
+               if (!parent->has_rules[rw])
+                       break;
+
+               atomic_inc(&parent->io_split_cnt[rw]);
+               atomic_inc(&parent->last_io_split_cnt[rw]);
+
+               parent_sq = parent->service_queue.parent_sq;
+               parent = sq_to_tg(parent_sq);
+       } while (parent);
+}
+
 bool blk_throtl_bio(struct bio *bio)
 {
        struct request_queue *q = bio->bi_bdev->bd_disk->queue;
index 3ed71b8..874c1c3 100644 (file)
@@ -97,7 +97,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
  */
 static bool wb_recent_wait(struct rq_wb *rwb)
 {
-       struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
+       struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
 
        return time_before(jiffies, wb->dirty_sleep + HZ);
 }
@@ -234,7 +234,7 @@ enum {
 
 static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 {
-       struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+       struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
        struct rq_depth *rqd = &rwb->rq_depth;
        u64 thislat;
 
@@ -287,7 +287,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 
 static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
 {
-       struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+       struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
        struct rq_depth *rqd = &rwb->rq_depth;
 
        trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
@@ -359,7 +359,7 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
 
        status = latency_exceeded(rwb, cb->stat);
 
-       trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
+       trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
                        inflight);
 
        /*
index 86fce75..1d0c76c 100644 (file)
@@ -360,9 +360,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
        if (!blk_queue_is_zoned(q))
                return -ENOTTY;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-
        if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
                return -EFAULT;
 
@@ -421,9 +418,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
        if (!blk_queue_is_zoned(q))
                return -ENOTTY;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-
        if (!(mode & FMODE_WRITE))
                return -EBADF;
 
index 4b885c0..8c96b0c 100644 (file)
@@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
        kobject_get(&q->kobj);
 }
 
-static inline bool
-is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
-{
-       return hctx->fq->flush_rq == req;
-}
+bool is_flush_rq(struct request *req);
 
 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
                                              gfp_t flags);
@@ -132,7 +128,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
                                bip_next->bip_vec[0].bv_offset);
 }
 
-void blk_integrity_add(struct gendisk *);
+int blk_integrity_add(struct gendisk *disk);
 void blk_integrity_del(struct gendisk *);
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
@@ -166,8 +162,9 @@ static inline bool bio_integrity_endio(struct bio *bio)
 static inline void bio_integrity_free(struct bio *bio)
 {
 }
-static inline void blk_integrity_add(struct gendisk *disk)
+static inline int blk_integrity_add(struct gendisk *disk)
 {
+       return 0;
 }
 static inline void blk_integrity_del(struct gendisk *disk)
 {
@@ -293,11 +290,13 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 extern void blk_throtl_register_queue(struct request_queue *q);
+extern void blk_throtl_charge_bio_split(struct bio *bio);
 bool blk_throtl_bio(struct bio *bio);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline void blk_throtl_exit(struct request_queue *q) { }
 static inline void blk_throtl_register_queue(struct request_queue *q) { }
+static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
 static inline bool blk_throtl_bio(struct bio *bio) { return false; }
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
@@ -344,15 +343,14 @@ static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
 
 int blk_alloc_ext_minor(void);
 void blk_free_ext_minor(unsigned int minor);
-char *disk_name(struct gendisk *hd, int partno, char *buf);
 #define ADDPART_FLAG_NONE      0
 #define ADDPART_FLAG_RAID      1
 #define ADDPART_FLAG_WHOLEDISK 2
-int bdev_add_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length);
-int bdev_del_partition(struct block_device *bdev, int partno);
-int bdev_resize_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length);
+int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length);
+int bdev_del_partition(struct gendisk *disk, int partno);
+int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length);
 
 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
                struct page *page, unsigned int len, unsigned int offset,
@@ -360,7 +358,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
 
 struct request_queue *blk_alloc_queue(int node_id);
 
-void disk_alloc_events(struct gendisk *disk);
+int disk_alloc_events(struct gendisk *disk);
 void disk_add_events(struct gendisk *disk);
 void disk_del_events(struct gendisk *disk);
 void disk_release_events(struct gendisk *disk);
@@ -368,4 +366,11 @@ extern struct device_attribute dev_attr_events;
 extern struct device_attribute dev_attr_events_async;
 extern struct device_attribute dev_attr_events_poll_msecs;
 
+static inline void bio_clear_hipri(struct bio *bio)
+{
+       /* can't support alloc cache if we turn off polling */
+       bio_clear_flag(bio, BIO_PERCPU_CACHE);
+       bio->bi_opf &= ~REQ_HIPRI;
+}
+
 #endif /* BLK_INTERNAL_H */
index 94081e0..05fc714 100644 (file)
@@ -67,18 +67,6 @@ static __init int init_emergency_pool(void)
 
 __initcall(init_emergency_pool);
 
-/*
- * highmem version, map in to vec
- */
-static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
-{
-       unsigned char *vto;
-
-       vto = kmap_atomic(to->bv_page);
-       memcpy(vto + to->bv_offset, vfrom, to->bv_len);
-       kunmap_atomic(vto);
-}
-
 /*
  * Simple bounce buffer support for highmem pages. Depending on the
  * queue gfp mask set, *to may or may not be a highmem page. kmap it
@@ -86,7 +74,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  */
 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 {
-       unsigned char *vfrom;
        struct bio_vec tovec, fromvec;
        struct bvec_iter iter;
        /*
@@ -104,11 +91,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                         * been modified by the block layer, so use the original
                         * copy, bounce_copy_vec already uses tovec->bv_len
                         */
-                       vfrom = page_address(fromvec.bv_page) +
-                               tovec.bv_offset;
-
-                       bounce_copy_vec(&tovec, vfrom);
-                       flush_dcache_page(tovec.bv_page);
+                       memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
+                                      tovec.bv_offset);
                }
                bio_advance_iter(from, &from_iter, tovec.bv_len);
        }
@@ -255,24 +239,19 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
         * because the 'bio' is single-page bvec.
         */
        for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
-               struct page *page = to->bv_page;
+               struct page *bounce_page;
 
-               if (!PageHighMem(page))
+               if (!PageHighMem(to->bv_page))
                        continue;
 
-               to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
-               inc_zone_page_state(to->bv_page, NR_BOUNCE);
+               bounce_page = mempool_alloc(&page_pool, GFP_NOIO);
+               inc_zone_page_state(bounce_page, NR_BOUNCE);
 
                if (rw == WRITE) {
-                       char *vto, *vfrom;
-
-                       flush_dcache_page(page);
-
-                       vto = page_address(to->bv_page) + to->bv_offset;
-                       vfrom = kmap_atomic(page) + to->bv_offset;
-                       memcpy(vto, vfrom, to->bv_len);
-                       kunmap_atomic(vfrom);
+                       flush_dcache_page(to->bv_page);
+                       memcpy_from_bvec(page_address(bounce_page), to);
                }
+               to->bv_page = bounce_page;
        }
 
        trace_block_bio_bounce(*bio_orig);
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
deleted file mode 100644 (file)
index f2a1457..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Parse command line, get partition information
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#include <linux/export.h>
-#include <linux/cmdline-parser.h>
-
-static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
-{
-       int ret = 0;
-       struct cmdline_subpart *new_subpart;
-
-       *subpart = NULL;
-
-       new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
-       if (!new_subpart)
-               return -ENOMEM;
-
-       if (*partdef == '-') {
-               new_subpart->size = (sector_t)(~0ULL);
-               partdef++;
-       } else {
-               new_subpart->size = (sector_t)memparse(partdef, &partdef);
-               if (new_subpart->size < (sector_t)PAGE_SIZE) {
-                       pr_warn("cmdline partition size is invalid.");
-                       ret = -EINVAL;
-                       goto fail;
-               }
-       }
-
-       if (*partdef == '@') {
-               partdef++;
-               new_subpart->from = (sector_t)memparse(partdef, &partdef);
-       } else {
-               new_subpart->from = (sector_t)(~0ULL);
-       }
-
-       if (*partdef == '(') {
-               int length;
-               char *next = strchr(++partdef, ')');
-
-               if (!next) {
-                       pr_warn("cmdline partition format is invalid.");
-                       ret = -EINVAL;
-                       goto fail;
-               }
-
-               length = min_t(int, next - partdef,
-                              sizeof(new_subpart->name) - 1);
-               strncpy(new_subpart->name, partdef, length);
-               new_subpart->name[length] = '\0';
-
-               partdef = ++next;
-       } else
-               new_subpart->name[0] = '\0';
-
-       new_subpart->flags = 0;
-
-       if (!strncmp(partdef, "ro", 2)) {
-               new_subpart->flags |= PF_RDONLY;
-               partdef += 2;
-       }
-
-       if (!strncmp(partdef, "lk", 2)) {
-               new_subpart->flags |= PF_POWERUP_LOCK;
-               partdef += 2;
-       }
-
-       *subpart = new_subpart;
-       return 0;
-fail:
-       kfree(new_subpart);
-       return ret;
-}
-
-static void free_subpart(struct cmdline_parts *parts)
-{
-       struct cmdline_subpart *subpart;
-
-       while (parts->subpart) {
-               subpart = parts->subpart;
-               parts->subpart = subpart->next_subpart;
-               kfree(subpart);
-       }
-}
-
-static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
-{
-       int ret = -EINVAL;
-       char *next;
-       int length;
-       struct cmdline_subpart **next_subpart;
-       struct cmdline_parts *newparts;
-       char buf[BDEVNAME_SIZE + 32 + 4];
-
-       *parts = NULL;
-
-       newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
-       if (!newparts)
-               return -ENOMEM;
-
-       next = strchr(bdevdef, ':');
-       if (!next) {
-               pr_warn("cmdline partition has no block device.");
-               goto fail;
-       }
-
-       length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
-       strncpy(newparts->name, bdevdef, length);
-       newparts->name[length] = '\0';
-       newparts->nr_subparts = 0;
-
-       next_subpart = &newparts->subpart;
-
-       while (next && *(++next)) {
-               bdevdef = next;
-               next = strchr(bdevdef, ',');
-
-               length = (!next) ? (sizeof(buf) - 1) :
-                       min_t(int, next - bdevdef, sizeof(buf) - 1);
-
-               strncpy(buf, bdevdef, length);
-               buf[length] = '\0';
-
-               ret = parse_subpart(next_subpart, buf);
-               if (ret)
-                       goto fail;
-
-               newparts->nr_subparts++;
-               next_subpart = &(*next_subpart)->next_subpart;
-       }
-
-       if (!newparts->subpart) {
-               pr_warn("cmdline partition has no valid partition.");
-               ret = -EINVAL;
-               goto fail;
-       }
-
-       *parts = newparts;
-
-       return 0;
-fail:
-       free_subpart(newparts);
-       kfree(newparts);
-       return ret;
-}
-
-void cmdline_parts_free(struct cmdline_parts **parts)
-{
-       struct cmdline_parts *next_parts;
-
-       while (*parts) {
-               next_parts = (*parts)->next_parts;
-               free_subpart(*parts);
-               kfree(*parts);
-               *parts = next_parts;
-       }
-}
-EXPORT_SYMBOL(cmdline_parts_free);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
-{
-       int ret;
-       char *buf;
-       char *pbuf;
-       char *next;
-       struct cmdline_parts **next_parts;
-
-       *parts = NULL;
-
-       next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       next_parts = parts;
-
-       while (next && *pbuf) {
-               next = strchr(pbuf, ';');
-               if (next)
-                       *next = '\0';
-
-               ret = parse_parts(next_parts, pbuf);
-               if (ret)
-                       goto fail;
-
-               if (next)
-                       pbuf = ++next;
-
-               next_parts = &(*next_parts)->next_parts;
-       }
-
-       if (!*parts) {
-               pr_warn("cmdline partition has no valid partition.");
-               ret = -EINVAL;
-               goto fail;
-       }
-
-       ret = 0;
-done:
-       kfree(buf);
-       return ret;
-
-fail:
-       cmdline_parts_free(parts);
-       goto done;
-}
-EXPORT_SYMBOL(cmdline_parts_parse);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
-                                        const char *bdev)
-{
-       while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
-               parts = parts->next_parts;
-       return parts;
-}
-EXPORT_SYMBOL(cmdline_parts_find);
-
-/*
- *  add_part()
- *    0 success.
- *    1 can not add so many partitions.
- */
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                     int slot,
-                     int (*add_part)(int, struct cmdline_subpart *, void *),
-                     void *param)
-{
-       sector_t from = 0;
-       struct cmdline_subpart *subpart;
-
-       for (subpart = parts->subpart; subpart;
-            subpart = subpart->next_subpart, slot++) {
-               if (subpart->from == (sector_t)(~0ULL))
-                       subpart->from = from;
-               else
-                       from = subpart->from;
-
-               if (from >= disk_size)
-                       break;
-
-               if (subpart->size > (disk_size - from))
-                       subpart->size = disk_size - from;
-
-               from += subpart->size;
-
-               if (add_part(slot, subpart, param))
-                       break;
-       }
-
-       return slot;
-}
-EXPORT_SYMBOL(cmdline_parts_set);
index a75931f..8d5496e 100644 (file)
@@ -163,15 +163,31 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
        spin_unlock_irq(&ev->lock);
 }
 
+/*
+ * Tell userland about new events.  Only the events listed in @disk->events are
+ * reported, and only if DISK_EVENT_FLAG_UEVENT is set.  Otherwise, events are
+ * processed internally but never get reported to userland.
+ */
+static void disk_event_uevent(struct gendisk *disk, unsigned int events)
+{
+       char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
+       int nr_events = 0, i;
+
+       for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
+               if (events & disk->events & (1 << i))
+                       envp[nr_events++] = disk_uevents[i];
+
+       if (nr_events)
+               kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+}
+
 static void disk_check_events(struct disk_events *ev,
                              unsigned int *clearing_ptr)
 {
        struct gendisk *disk = ev->disk;
-       char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
        unsigned int clearing = *clearing_ptr;
        unsigned int events;
        unsigned long intv;
-       int nr_events = 0, i;
 
        /* check events */
        events = disk->fops->check_events(disk, clearing);
@@ -190,19 +206,11 @@ static void disk_check_events(struct disk_events *ev,
 
        spin_unlock_irq(&ev->lock);
 
-       /*
-        * Tell userland about new events.  Only the events listed in
-        * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
-        * is set. Otherwise, events are processed internally but never
-        * get reported to userland.
-        */
-       for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
-               if ((events & disk->events & (1 << i)) &&
-                   (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
-                       envp[nr_events++] = disk_uevents[i];
+       if (events & DISK_EVENT_MEDIA_CHANGE)
+               inc_diskseq(disk);
 
-       if (nr_events)
-               kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+       if (disk->event_flags & DISK_EVENT_FLAG_UEVENT)
+               disk_event_uevent(disk, events);
 }
 
 /**
@@ -281,6 +289,32 @@ bool bdev_check_media_change(struct block_device *bdev)
 }
 EXPORT_SYMBOL(bdev_check_media_change);
 
+/**
+ * disk_force_media_change - force a media change event
+ * @disk: the disk which will raise the event
+ * @events: the events to raise
+ *
+ * Generate uevents for the disk. If DISK_EVENT_MEDIA_CHANGE is present,
+ * attempt to free all dentries and inodes and invalidates all block
+ * device page cache entries in that case.
+ *
+ * Returns %true if DISK_EVENT_MEDIA_CHANGE was raised, or %false if not.
+ */
+bool disk_force_media_change(struct gendisk *disk, unsigned int events)
+{
+       disk_event_uevent(disk, events);
+
+       if (!(events & DISK_EVENT_MEDIA_CHANGE))
+               return false;
+
+       if (__invalidate_device(disk->part0, true))
+               pr_warn("VFS: busy inodes on changed media %s\n",
+                       disk->disk_name);
+       set_bit(GD_NEED_PART_SCAN, &disk->state);
+       return true;
+}
+EXPORT_SYMBOL_GPL(disk_force_media_change);
+
 /*
  * Separate this part out so that a different pointer for clearing_ptr can be
  * passed in for disk_clear_events.
@@ -410,17 +444,17 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
 /*
  * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
  */
-void disk_alloc_events(struct gendisk *disk)
+int disk_alloc_events(struct gendisk *disk)
 {
        struct disk_events *ev;
 
        if (!disk->fops->check_events || !disk->events)
-               return;
+               return 0;
 
        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
        if (!ev) {
                pr_warn("%s: failed to initialize events\n", disk->disk_name);
-               return;
+               return -ENOMEM;
        }
 
        INIT_LIST_HEAD(&ev->node);
@@ -432,6 +466,7 @@ void disk_alloc_events(struct gendisk *disk)
        INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
 
        disk->ev = ev;
+       return 0;
 }
 
 void disk_add_events(struct gendisk *disk)
index 52ada14..ff45d83 100644 (file)
@@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
        __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
        if (__rq && elv_bio_merge_ok(__rq, bio)) {
                *req = __rq;
+
+               if (blk_discard_mergable(__rq))
+                       return ELEVATOR_DISCARD_MERGE;
                return ELEVATOR_BACK_MERGE;
        }
 
@@ -630,6 +633,9 @@ static inline bool elv_support_iosched(struct request_queue *q)
  */
 static struct elevator_type *elevator_get_default(struct request_queue *q)
 {
+       if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
+               return NULL;
+
        if (q->nr_hw_queues != 1 &&
                        !blk_mq_is_sbitmap_shared(q->tag_set->flags))
                return NULL;
@@ -702,7 +708,6 @@ void elevator_init_mq(struct request_queue *q)
                elevator_put(e);
        }
 }
-EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
 
 /*
  * switch to new_e io scheduler. be careful not to introduce deadlocks -
index 298ee78..567549a 100644 (file)
 
 static struct kobject *block_depr;
 
+/*
+ * Unique, monotonically increasing sequential number associated with block
+ * devices instances (i.e. incremented each time a device is attached).
+ * Associating uevents with block devices in userspace is difficult and racy:
+ * the uevent netlink socket is lossy, and on slow and overloaded systems has
+ * a very high latency.
+ * Block devices do not have exclusive owners in userspace, any process can set
+ * one up (e.g. loop devices). Moreover, device names can be reused (e.g. loop0
+ * can be reused again and again).
+ * A userspace process setting up a block device and watching for its events
+ * cannot thus reliably tell whether an event relates to the device it just set
+ * up or another earlier instance with the same name.
+ * This sequential number allows userspace processes to solve this problem, and
+ * uniquely associate an uevent to the lifetime to a device.
+ */
+static atomic64_t diskseq;
+
 /* for extended dynamic devt allocation, currently only one major is used */
 #define NR_EXT_DEVT            (1 << MINORBITS)
 static DEFINE_IDA(ext_devt_ida);
@@ -60,7 +77,8 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
         * initial capacity during probing.
         */
        if (size == capacity ||
-           (disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
+           !disk_live(disk) ||
+           (disk->flags & GENHD_FL_HIDDEN))
                return false;
 
        pr_info("%s: detected capacity change from %lld to %lld\n",
@@ -78,11 +96,17 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
 EXPORT_SYMBOL_GPL(set_capacity_and_notify);
 
 /*
- * Format the device name of the indicated disk into the supplied buffer and
- * return a pointer to that same buffer for convenience.
+ * Format the device name of the indicated block device into the supplied buffer
+ * and return a pointer to that same buffer for convenience.
+ *
+ * Note: do not use this in new code, use the %pg specifier to sprintf and
+ * printk insted.
  */
-char *disk_name(struct gendisk *hd, int partno, char *buf)
+const char *bdevname(struct block_device *bdev, char *buf)
 {
+       struct gendisk *hd = bdev->bd_disk;
+       int partno = bdev->bd_partno;
+
        if (!partno)
                snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
        else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
@@ -92,11 +116,6 @@ char *disk_name(struct gendisk *hd, int partno, char *buf)
 
        return buf;
 }
-
-const char *bdevname(struct block_device *bdev, char *buf)
-{
-       return disk_name(bdev->bd_disk, bdev->bd_partno, buf);
-}
 EXPORT_SYMBOL(bdevname);
 
 static void part_stat_read_all(struct block_device *part,
@@ -294,54 +313,19 @@ void unregister_blkdev(unsigned int major, const char *name)
 
 EXPORT_SYMBOL(unregister_blkdev);
 
-/**
- * blk_mangle_minor - scatter minor numbers apart
- * @minor: minor number to mangle
- *
- * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
- * is enabled.  Mangling twice gives the original value.
- *
- * RETURNS:
- * Mangled value.
- *
- * CONTEXT:
- * Don't care.
- */
-static int blk_mangle_minor(int minor)
-{
-#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
-       int i;
-
-       for (i = 0; i < MINORBITS / 2; i++) {
-               int low = minor & (1 << i);
-               int high = minor & (1 << (MINORBITS - 1 - i));
-               int distance = MINORBITS - 1 - 2 * i;
-
-               minor ^= low | high;    /* clear both bits */
-               low <<= distance;       /* swap the positions */
-               high >>= distance;
-               minor |= low | high;    /* and set */
-       }
-#endif
-       return minor;
-}
-
 int blk_alloc_ext_minor(void)
 {
        int idx;
 
        idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
-       if (idx < 0) {
-               if (idx == -ENOSPC)
-                       return -EBUSY;
-               return idx;
-       }
-       return blk_mangle_minor(idx);
+       if (idx == -ENOSPC)
+               return -EBUSY;
+       return idx;
 }
 
 void blk_free_ext_minor(unsigned int minor)
 {
-       ida_free(&ext_devt_ida, blk_mangle_minor(minor));
+       ida_free(&ext_devt_ida, minor);
 }
 
 static char *bdevt_str(dev_t devt, char *buf)
@@ -390,78 +374,20 @@ static void disk_scan_partitions(struct gendisk *disk)
                blkdev_put(bdev, FMODE_READ);
 }
 
-static void register_disk(struct device *parent, struct gendisk *disk,
-                         const struct attribute_group **groups)
-{
-       struct device *ddev = disk_to_dev(disk);
-       int err;
-
-       ddev->parent = parent;
-
-       dev_set_name(ddev, "%s", disk->disk_name);
-
-       /* delay uevents, until we scanned partition table */
-       dev_set_uevent_suppress(ddev, 1);
-
-       if (groups) {
-               WARN_ON(ddev->groups);
-               ddev->groups = groups;
-       }
-       if (device_add(ddev))
-               return;
-       if (!sysfs_deprecated) {
-               err = sysfs_create_link(block_depr, &ddev->kobj,
-                                       kobject_name(&ddev->kobj));
-               if (err) {
-                       device_del(ddev);
-                       return;
-               }
-       }
-
-       /*
-        * avoid probable deadlock caused by allocating memory with
-        * GFP_KERNEL in runtime_resume callback of its all ancestor
-        * devices
-        */
-       pm_runtime_set_memalloc_noio(ddev, true);
-
-       disk->part0->bd_holder_dir =
-               kobject_create_and_add("holders", &ddev->kobj);
-       disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
-
-       if (disk->flags & GENHD_FL_HIDDEN)
-               return;
-
-       disk_scan_partitions(disk);
-
-       /* announce the disk and partitions after all partitions are created */
-       dev_set_uevent_suppress(ddev, 0);
-       disk_uevent(disk, KOBJ_ADD);
-
-       if (disk->queue->backing_dev_info->dev) {
-               err = sysfs_create_link(&ddev->kobj,
-                         &disk->queue->backing_dev_info->dev->kobj,
-                         "bdi");
-               WARN_ON(err);
-       }
-}
-
 /**
- * __device_add_disk - add disk information to kernel list
+ * device_add_disk - add disk information to kernel list
  * @parent: parent device for the disk
  * @disk: per-device partitioning information
  * @groups: Additional per-device sysfs groups
- * @register_queue: register the queue if set to true
  *
  * This function registers the partitioning information in @disk
  * with the kernel.
- *
- * FIXME: error handling
  */
-static void __device_add_disk(struct device *parent, struct gendisk *disk,
-                             const struct attribute_group **groups,
-                             bool register_queue)
+int device_add_disk(struct device *parent, struct gendisk *disk,
+                    const struct attribute_group **groups)
+
 {
+       struct device *ddev = disk_to_dev(disk);
        int ret;
 
        /*
@@ -470,8 +396,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
         * elevator if one is needed, that is, for devices requesting queue
         * registration.
         */
-       if (register_queue)
-               elevator_init_mq(disk->queue);
+       elevator_init_mq(disk->queue);
 
        /*
         * If the driver provides an explicit major number it also must provide
@@ -481,7 +406,8 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
         * and all partitions from the extended dev_t space.
         */
        if (disk->major) {
-               WARN_ON(!disk->minors);
+               if (WARN_ON(!disk->minors))
+                       return -EINVAL;
 
                if (disk->minors > DISK_MAX_PARTS) {
                        pr_err("block: can't allocate more than %d partitions\n",
@@ -489,21 +415,65 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
                        disk->minors = DISK_MAX_PARTS;
                }
        } else {
-               WARN_ON(disk->minors);
+               if (WARN_ON(disk->minors))
+                       return -EINVAL;
 
                ret = blk_alloc_ext_minor();
-               if (ret < 0) {
-                       WARN_ON(1);
-                       return;
-               }
+               if (ret < 0)
+                       return ret;
                disk->major = BLOCK_EXT_MAJOR;
-               disk->first_minor = MINOR(ret);
+               disk->first_minor = ret;
                disk->flags |= GENHD_FL_EXT_DEVT;
        }
 
-       disk->flags |= GENHD_FL_UP;
+       ret = disk_alloc_events(disk);
+       if (ret)
+               goto out_free_ext_minor;
 
-       disk_alloc_events(disk);
+       /* delay uevents, until we scanned partition table */
+       dev_set_uevent_suppress(ddev, 1);
+
+       ddev->parent = parent;
+       ddev->groups = groups;
+       dev_set_name(ddev, "%s", disk->disk_name);
+       if (!(disk->flags & GENHD_FL_HIDDEN))
+               ddev->devt = MKDEV(disk->major, disk->first_minor);
+       ret = device_add(ddev);
+       if (ret)
+               goto out_disk_release_events;
+       if (!sysfs_deprecated) {
+               ret = sysfs_create_link(block_depr, &ddev->kobj,
+                                       kobject_name(&ddev->kobj));
+               if (ret)
+                       goto out_device_del;
+       }
+
+       /*
+        * avoid probable deadlock caused by allocating memory with
+        * GFP_KERNEL in runtime_resume callback of its all ancestor
+        * devices
+        */
+       pm_runtime_set_memalloc_noio(ddev, true);
+
+       ret = blk_integrity_add(disk);
+       if (ret)
+               goto out_del_block_link;
+
+       disk->part0->bd_holder_dir =
+               kobject_create_and_add("holders", &ddev->kobj);
+       if (!disk->part0->bd_holder_dir)
+               goto out_del_integrity;
+       disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+       if (!disk->slave_dir)
+               goto out_put_holder_dir;
+
+       ret = bd_register_pending_holders(disk);
+       if (ret < 0)
+               goto out_put_slave_dir;
+
+       ret = blk_register_queue(disk);
+       if (ret)
+               goto out_put_slave_dir;
 
        if (disk->flags & GENHD_FL_HIDDEN) {
                /*
@@ -513,48 +483,56 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
                disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
                disk->flags |= GENHD_FL_NO_PART_SCAN;
        } else {
-               struct backing_dev_info *bdi = disk->queue->backing_dev_info;
-               struct device *dev = disk_to_dev(disk);
-
-               /* Register BDI before referencing it from bdev */
-               dev->devt = MKDEV(disk->major, disk->first_minor);
-               ret = bdi_register(bdi, "%u:%u",
+               ret = bdi_register(disk->bdi, "%u:%u",
                                   disk->major, disk->first_minor);
-               WARN_ON(ret);
-               bdi_set_owner(bdi, dev);
-               bdev_add(disk->part0, dev->devt);
-       }
-       register_disk(parent, disk, groups);
-       if (register_queue)
-               blk_register_queue(disk);
+               if (ret)
+                       goto out_unregister_queue;
+               bdi_set_owner(disk->bdi, ddev);
+               ret = sysfs_create_link(&ddev->kobj,
+                                       &disk->bdi->dev->kobj, "bdi");
+               if (ret)
+                       goto out_unregister_bdi;
 
-       /*
-        * Take an extra ref on queue which will be put on disk_release()
-        * so that it sticks around as long as @disk is there.
-        */
-       if (blk_get_queue(disk->queue))
-               set_bit(GD_QUEUE_REF, &disk->state);
-       else
-               WARN_ON_ONCE(1);
+               bdev_add(disk->part0, ddev->devt);
+               disk_scan_partitions(disk);
 
-       disk_add_events(disk);
-       blk_integrity_add(disk);
-}
+               /*
+                * Announce the disk and partitions after all partitions are
+                * created. (for hidden disks uevents remain suppressed forever)
+                */
+               dev_set_uevent_suppress(ddev, 0);
+               disk_uevent(disk, KOBJ_ADD);
+       }
 
-void device_add_disk(struct device *parent, struct gendisk *disk,
-                    const struct attribute_group **groups)
+       disk_update_readahead(disk);
+       disk_add_events(disk);
+       return 0;
 
-{
-       __device_add_disk(parent, disk, groups, true);
+out_unregister_bdi:
+       if (!(disk->flags & GENHD_FL_HIDDEN))
+               bdi_unregister(disk->bdi);
+out_unregister_queue:
+       blk_unregister_queue(disk);
+out_put_slave_dir:
+       kobject_put(disk->slave_dir);
+out_put_holder_dir:
+       kobject_put(disk->part0->bd_holder_dir);
+out_del_integrity:
+       blk_integrity_del(disk);
+out_del_block_link:
+       if (!sysfs_deprecated)
+               sysfs_remove_link(block_depr, dev_name(ddev));
+out_device_del:
+       device_del(ddev);
+out_disk_release_events:
+       disk_release_events(disk);
+out_free_ext_minor:
+       if (disk->major == BLOCK_EXT_MAJOR)
+               blk_free_ext_minor(disk->first_minor);
+       return WARN_ON_ONCE(ret); /* keep until all callers handle errors */
 }
 EXPORT_SYMBOL(device_add_disk);
 
-void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
-{
-       __device_add_disk(parent, disk, NULL, false);
-}
-EXPORT_SYMBOL(device_add_disk_no_queue_reg);
-
 /**
  * del_gendisk - remove the gendisk
  * @disk: the struct gendisk to remove
@@ -578,26 +556,20 @@ void del_gendisk(struct gendisk *disk)
 {
        might_sleep();
 
-       if (WARN_ON_ONCE(!disk->queue))
+       if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
                return;
 
        blk_integrity_del(disk);
        disk_del_events(disk);
 
        mutex_lock(&disk->open_mutex);
-       disk->flags &= ~GENHD_FL_UP;
+       remove_inode_hash(disk->part0->bd_inode);
        blk_drop_partitions(disk);
        mutex_unlock(&disk->open_mutex);
 
        fsync_bdev(disk->part0);
        __invalidate_device(disk->part0, true);
 
-       /*
-        * Unhash the bdev inode for this device so that it can't be looked
-        * up any more even if openers still hold references to it.
-        */
-       remove_inode_hash(disk->part0->bd_inode);
-
        set_capacity(disk, 0);
 
        if (!(disk->flags & GENHD_FL_HIDDEN)) {
@@ -607,7 +579,7 @@ void del_gendisk(struct gendisk *disk)
                 * Unregister bdi before releasing device numbers (as they can
                 * get reused and we'd get clashes in sysfs).
                 */
-               bdi_unregister(disk->queue->backing_dev_info);
+               bdi_unregister(disk->bdi);
        }
 
        blk_unregister_queue(disk);
@@ -683,7 +655,6 @@ void __init printk_all_partitions(void)
        while ((dev = class_dev_iter_next(&iter))) {
                struct gendisk *disk = dev_to_disk(dev);
                struct block_device *part;
-               char name_buf[BDEVNAME_SIZE];
                char devt_buf[BDEVT_SIZE];
                unsigned long idx;
 
@@ -703,11 +674,10 @@ void __init printk_all_partitions(void)
                xa_for_each(&disk->part_tbl, idx, part) {
                        if (!bdev_nr_sectors(part))
                                continue;
-                       printk("%s%s %10llu %s %s",
+                       printk("%s%s %10llu %pg %s",
                               bdev_is_partition(part) ? "  " : "",
                               bdevt_str(part->bd_dev, devt_buf),
-                              bdev_nr_sectors(part) >> 1,
-                              disk_name(disk, part->bd_partno, name_buf),
+                              bdev_nr_sectors(part) >> 1, part,
                               part->bd_meta_info ?
                                        part->bd_meta_info->uuid : "");
                        if (bdev_is_partition(part))
@@ -785,7 +755,6 @@ static int show_partition(struct seq_file *seqf, void *v)
        struct gendisk *sgp = v;
        struct block_device *part;
        unsigned long idx;
-       char buf[BDEVNAME_SIZE];
 
        /* Don't show non-partitionable removeable devices or empty devices */
        if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
@@ -798,10 +767,9 @@ static int show_partition(struct seq_file *seqf, void *v)
        xa_for_each(&sgp->part_tbl, idx, part) {
                if (!bdev_nr_sectors(part))
                        continue;
-               seq_printf(seqf, "%4d  %7d %10llu %s\n",
+               seq_printf(seqf, "%4d  %7d %10llu %pg\n",
                           MAJOR(part->bd_dev), MINOR(part->bd_dev),
-                          bdev_nr_sectors(part) >> 1,
-                          disk_name(sgp, part->bd_partno, buf));
+                          bdev_nr_sectors(part) >> 1, part);
        }
        rcu_read_unlock();
        return 0;
@@ -968,6 +936,14 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
        return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
 }
 
+static ssize_t diskseq_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+
+       return sprintf(buf, "%llu\n", disk->diskseq);
+}
+
 static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
 static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
 static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
@@ -980,6 +956,7 @@ static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
 static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
 static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
 static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
+static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
 
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 ssize_t part_fail_show(struct device *dev,
@@ -1025,6 +1002,7 @@ static struct attribute *disk_attrs[] = {
        &dev_attr_events.attr,
        &dev_attr_events_async.attr,
        &dev_attr_events_poll_msecs.attr,
+       &dev_attr_diskseq.attr,
 #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
 #endif
@@ -1074,17 +1052,24 @@ static void disk_release(struct device *dev)
 
        might_sleep();
 
-       if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
-               blk_free_ext_minor(MINOR(dev->devt));
        disk_release_events(disk);
        kfree(disk->random);
        xa_destroy(&disk->part_tbl);
-       if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue)
-               blk_put_queue(disk->queue);
-       bdput(disk->part0);     /* frees the disk */
+       disk->queue->disk = NULL;
+       blk_put_queue(disk->queue);
+       iput(disk->part0->bd_inode);    /* frees the disk */
+}
+
+static int block_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+
+       return add_uevent_var(env, "DISKSEQ=%llu", disk->diskseq);
 }
+
 struct class block_class = {
        .name           = "block",
+       .dev_uevent     = block_uevent,
 };
 
 static char *block_devnode(struct device *dev, umode_t *mode,
@@ -1116,7 +1101,6 @@ static int diskstats_show(struct seq_file *seqf, void *v)
 {
        struct gendisk *gp = v;
        struct block_device *hd;
-       char buf[BDEVNAME_SIZE];
        unsigned int inflight;
        struct disk_stats stat;
        unsigned long idx;
@@ -1139,15 +1123,14 @@ static int diskstats_show(struct seq_file *seqf, void *v)
                else
                        inflight = part_in_flight(hd);
 
-               seq_printf(seqf, "%4d %7d %s "
+               seq_printf(seqf, "%4d %7d %pg "
                           "%lu %lu %lu %u "
                           "%lu %lu %lu %u "
                           "%u %u %u "
                           "%lu %lu %lu %u "
                           "%lu %u"
                           "\n",
-                          MAJOR(hd->bd_dev), MINOR(hd->bd_dev),
-                          disk_name(gp, hd->bd_partno, buf),
+                          MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd,
                           stat.ios[STAT_READ],
                           stat.merges[STAT_READ],
                           stat.sectors[STAT_READ],
@@ -1239,17 +1222,25 @@ dev_t blk_lookup_devt(const char *name, int partno)
        return devt;
 }
 
-struct gendisk *__alloc_disk_node(int minors, int node_id)
+struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
+               struct lock_class_key *lkclass)
 {
        struct gendisk *disk;
 
+       if (!blk_get_queue(q))
+               return NULL;
+
        disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
        if (!disk)
-               return NULL;
+               goto out_put_queue;
+
+       disk->bdi = bdi_alloc(node_id);
+       if (!disk->bdi)
+               goto out_free_disk;
 
        disk->part0 = bdev_alloc(disk, 0);
        if (!disk->part0)
-               goto out_free_disk;
+               goto out_free_bdi;
 
        disk->node_id = node_id;
        mutex_init(&disk->open_mutex);
@@ -1257,23 +1248,33 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
        if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
                goto out_destroy_part_tbl;
 
-       disk->minors = minors;
        rand_initialize_disk(disk);
        disk_to_dev(disk)->class = &block_class;
        disk_to_dev(disk)->type = &disk_type;
        device_initialize(disk_to_dev(disk));
+       inc_diskseq(disk);
+       disk->queue = q;
+       q->disk = disk;
+       lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0);
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+       INIT_LIST_HEAD(&disk->slave_bdevs);
+#endif
        return disk;
 
 out_destroy_part_tbl:
        xa_destroy(&disk->part_tbl);
-       bdput(disk->part0);
+       iput(disk->part0->bd_inode);
+out_free_bdi:
+       bdi_put(disk->bdi);
 out_free_disk:
        kfree(disk);
+out_put_queue:
+       blk_put_queue(q);
        return NULL;
 }
 EXPORT_SYMBOL(__alloc_disk_node);
 
-struct gendisk *__blk_alloc_disk(int node)
+struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
 {
        struct request_queue *q;
        struct gendisk *disk;
@@ -1282,12 +1283,11 @@ struct gendisk *__blk_alloc_disk(int node)
        if (!q)
                return NULL;
 
-       disk = __alloc_disk_node(0, node);
+       disk = __alloc_disk_node(q, node, lkclass);
        if (!disk) {
                blk_cleanup_queue(q);
                return NULL;
        }
-       disk->queue = q;
        return disk;
 }
 EXPORT_SYMBOL(__blk_alloc_disk);
@@ -1362,3 +1362,8 @@ int bdev_read_only(struct block_device *bdev)
        return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
 }
 EXPORT_SYMBOL(bdev_read_only);
+
+void inc_diskseq(struct gendisk *disk)
+{
+       disk->diskseq = atomic64_inc_return(&diskseq);
+}
diff --git a/block/holder.c b/block/holder.c
new file mode 100644 (file)
index 0000000..9dc0841
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/genhd.h>
+
+struct bd_holder_disk {
+       struct list_head        list;
+       struct block_device     *bdev;
+       int                     refcnt;
+};
+
+static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
+                                                 struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+
+       list_for_each_entry(holder, &disk->slave_bdevs, list)
+               if (holder->bdev == bdev)
+                       return holder;
+       return NULL;
+}
+
+static int add_symlink(struct kobject *from, struct kobject *to)
+{
+       return sysfs_create_link(from, to, kobject_name(to));
+}
+
+static void del_symlink(struct kobject *from, struct kobject *to)
+{
+       sysfs_remove_link(from, kobject_name(to));
+}
+
+static int __link_disk_holder(struct block_device *bdev, struct gendisk *disk)
+{
+       int ret;
+
+       ret = add_symlink(disk->slave_dir, bdev_kobj(bdev));
+       if (ret)
+               return ret;
+       ret = add_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
+       if (ret)
+               del_symlink(disk->slave_dir, bdev_kobj(bdev));
+       return ret;
+}
+
+/**
+ * bd_link_disk_holder - create symlinks between holding disk and slave bdev
+ * @bdev: the claimed slave bdev
+ * @disk: the holding disk
+ *
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
+ *
+ * This functions creates the following sysfs symlinks.
+ *
+ * - from "slaves" directory of the holder @disk to the claimed @bdev
+ * - from "holders" directory of the @bdev to the holder @disk
+ *
+ * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
+ * passed to bd_link_disk_holder(), then:
+ *
+ *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
+ *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
+ *
+ * The caller must have claimed @bdev before calling this function and
+ * ensure that both @bdev and @disk are valid during the creation and
+ * lifetime of these symlinks.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+       int ret = 0;
+
+       mutex_lock(&disk->open_mutex);
+
+       WARN_ON_ONCE(!bdev->bd_holder);
+
+       /* FIXME: remove the following once add_disk() handles errors */
+       if (WARN_ON(!bdev->bd_holder_dir))
+               goto out_unlock;
+
+       holder = bd_find_holder_disk(bdev, disk);
+       if (holder) {
+               holder->refcnt++;
+               goto out_unlock;
+       }
+
+       holder = kzalloc(sizeof(*holder), GFP_KERNEL);
+       if (!holder) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+       INIT_LIST_HEAD(&holder->list);
+       holder->bdev = bdev;
+       holder->refcnt = 1;
+       if (disk->slave_dir) {
+               ret = __link_disk_holder(bdev, disk);
+               if (ret) {
+                       kfree(holder);
+                       goto out_unlock;
+               }
+       }
+
+       list_add(&holder->list, &disk->slave_bdevs);
+       /*
+        * del_gendisk drops the initial reference to bd_holder_dir, so we need
+        * to keep our own here to allow for cleanup past that point.
+        */
+       kobject_get(bdev->bd_holder_dir);
+
+out_unlock:
+       mutex_unlock(&disk->open_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(bd_link_disk_holder);
+
+static void __unlink_disk_holder(struct block_device *bdev,
+               struct gendisk *disk)
+{
+       del_symlink(disk->slave_dir, bdev_kobj(bdev));
+       del_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
+}
+
+/**
+ * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
+ * @bdev: the calimed slave bdev
+ * @disk: the holding disk
+ *
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+
+       mutex_lock(&disk->open_mutex);
+       holder = bd_find_holder_disk(bdev, disk);
+       if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
+               if (disk->slave_dir)
+                       __unlink_disk_holder(bdev, disk);
+               kobject_put(bdev->bd_holder_dir);
+               list_del_init(&holder->list);
+               kfree(holder);
+       }
+       mutex_unlock(&disk->open_mutex);
+}
+EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
+
+int bd_register_pending_holders(struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+       int ret;
+
+       mutex_lock(&disk->open_mutex);
+       list_for_each_entry(holder, &disk->slave_bdevs, list) {
+               ret = __link_disk_holder(holder->bdev, disk);
+               if (ret)
+                       goto out_undo;
+       }
+       mutex_unlock(&disk->open_mutex);
+       return 0;
+
+out_undo:
+       list_for_each_entry_continue_reverse(holder, &disk->slave_bdevs, list)
+               __unlink_disk_holder(holder->bdev, disk);
+       mutex_unlock(&disk->open_mutex);
+       return ret;
+}
index 24beec9..eb0491e 100644 (file)
@@ -16,6 +16,7 @@
 static int blkpg_do_ioctl(struct block_device *bdev,
                          struct blkpg_partition __user *upart, int op)
 {
+       struct gendisk *disk = bdev->bd_disk;
        struct blkpg_partition p;
        long long start, length;
 
@@ -30,7 +31,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
                return -EINVAL;
 
        if (op == BLKPG_DEL_PARTITION)
-               return bdev_del_partition(bdev, p.pno);
+               return bdev_del_partition(disk, p.pno);
 
        start = p.start >> SECTOR_SHIFT;
        length = p.length >> SECTOR_SHIFT;
@@ -40,9 +41,9 @@ static int blkpg_do_ioctl(struct block_device *bdev,
                /* check if partition is aligned to blocksize */
                if (p.start & (bdev_logical_block_size(bdev) - 1))
                        return -EINVAL;
-               return bdev_add_partition(bdev, p.pno, start, length);
+               return bdev_add_partition(disk, p.pno, start, length);
        case BLKPG_RESIZE_PARTITION:
-               return bdev_resize_partition(bdev, p.pno, start, length);
+               return bdev_resize_partition(disk, p.pno, start, length);
        default:
                return -EINVAL;
        }
@@ -469,6 +470,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
                                BLKDEV_DISCARD_SECURE);
        case BLKZEROOUT:
                return blk_ioctl_zeroout(bdev, mode, arg);
+       case BLKGETDISKSEQ:
+               return put_u64(argp, bdev->bd_disk->diskseq);
        case BLKREPORTZONE:
                return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
        case BLKRESETZONE:
@@ -504,7 +507,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
        case BLKFRASET:
                if(!capable(CAP_SYS_ADMIN))
                        return -EACCES;
-               bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
+               bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
                return 0;
        case BLKRRPART:
                return blkdev_reread_part(bdev, mode);
@@ -554,7 +557,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case BLKFRAGET:
                if (!argp)
                        return -EINVAL;
-               return put_long(argp, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
+               return put_long(argp,
+                       (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKGETSIZE:
                size = i_size_read(bdev->bd_inode);
                if ((size >> 9) > ~0UL)
@@ -626,7 +630,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                if (!argp)
                        return -EINVAL;
                return compat_put_long(argp,
-                              (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
+                       (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKGETSIZE:
                size = i_size_read(bdev->bd_inode);
                if ((size >> 9) > ~0UL)
index bee628f..0e4ff24 100644 (file)
@@ -74,9 +74,8 @@ int ioprio_check_cap(int ioprio)
                        fallthrough;
                        /* rt has prio field too */
                case IOPRIO_CLASS_BE:
-                       if (data >= IOPRIO_BE_NR || data < 0)
+                       if (data >= IOPRIO_NR_LEVELS || data < 0)
                                return -EINVAL;
-
                        break;
                case IOPRIO_CLASS_IDLE:
                        break;
@@ -171,7 +170,7 @@ static int get_task_ioprio(struct task_struct *p)
        ret = security_task_getioprio(p);
        if (ret)
                goto out;
-       ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+       ret = IOPRIO_DEFAULT;
        task_lock(p);
        if (p->io_context)
                ret = p->io_context->ioprio;
@@ -183,9 +182,9 @@ out:
 int ioprio_best(unsigned short aprio, unsigned short bprio)
 {
        if (!ioprio_valid(aprio))
-               aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+               aprio = IOPRIO_DEFAULT;
        if (!ioprio_valid(bprio))
-               bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+               bprio = IOPRIO_DEFAULT;
 
        return min(aprio, bprio);
 }
index 81e3279..15a8be5 100644 (file)
@@ -596,13 +596,13 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
                struct list_head *head = &kcq->rq_list[sched_domain];
 
                spin_lock(&kcq->lock);
+               trace_block_rq_insert(rq);
                if (at_head)
                        list_move(&rq->queuelist, head);
                else
                        list_move_tail(&rq->queuelist, head);
                sbitmap_set_bit(&khd->kcq_map[sched_domain],
                                rq->mq_ctx->index_hw[hctx->type]);
-               trace_block_rq_insert(rq);
                spin_unlock(&kcq->lock);
        }
 }
diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
deleted file mode 100644 (file)
index 3b4bfdd..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/blk-cgroup.h>
-#include <linux/ioprio.h>
-
-#include "mq-deadline-cgroup.h"
-
-static struct blkcg_policy dd_blkcg_policy;
-
-static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp)
-{
-       struct dd_blkcg *pd;
-
-       pd = kzalloc(sizeof(*pd), gfp);
-       if (!pd)
-               return NULL;
-       pd->stats = alloc_percpu_gfp(typeof(*pd->stats),
-                                    GFP_KERNEL | __GFP_ZERO);
-       if (!pd->stats) {
-               kfree(pd);
-               return NULL;
-       }
-       return &pd->cpd;
-}
-
-static void dd_cpd_free(struct blkcg_policy_data *cpd)
-{
-       struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd);
-
-       free_percpu(dd_blkcg->stats);
-       kfree(dd_blkcg);
-}
-
-static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd)
-{
-       return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy),
-                           struct dd_blkcg, cpd);
-}
-
-/*
- * Convert an association between a block cgroup and a request queue into a
- * pointer to the mq-deadline information associated with a (blkcg, queue) pair.
- */
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
-       struct blkg_policy_data *pd;
-
-       pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy);
-       if (!pd)
-               return NULL;
-
-       return dd_blkcg_from_pd(pd);
-}
-
-static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
-{
-       static const char *const prio_class_name[] = {
-               [IOPRIO_CLASS_NONE]     = "NONE",
-               [IOPRIO_CLASS_RT]       = "RT",
-               [IOPRIO_CLASS_BE]       = "BE",
-               [IOPRIO_CLASS_IDLE]     = "IDLE",
-       };
-       struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
-       int res = 0;
-       u8 prio;
-
-       for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
-               res += scnprintf(buf + res, size - res,
-                       " [%s] dispatched=%u inserted=%u merged=%u",
-                       prio_class_name[prio],
-                       ddcg_sum(blkcg, dispatched, prio) +
-                       ddcg_sum(blkcg, merged, prio) -
-                       ddcg_sum(blkcg, completed, prio),
-                       ddcg_sum(blkcg, inserted, prio) -
-                       ddcg_sum(blkcg, completed, prio),
-                       ddcg_sum(blkcg, merged, prio));
-
-       return res;
-}
-
-static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
-                                           struct blkcg *blkcg)
-{
-       struct dd_blkg *pd;
-
-       pd = kzalloc(sizeof(*pd), gfp);
-       if (!pd)
-               return NULL;
-       return &pd->pd;
-}
-
-static void dd_pd_free(struct blkg_policy_data *pd)
-{
-       struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd);
-
-       kfree(dd_blkg);
-}
-
-static struct blkcg_policy dd_blkcg_policy = {
-       .cpd_alloc_fn           = dd_cpd_alloc,
-       .cpd_free_fn            = dd_cpd_free,
-
-       .pd_alloc_fn            = dd_pd_alloc,
-       .pd_free_fn             = dd_pd_free,
-       .pd_stat_fn             = dd_pd_stat,
-};
-
-int dd_activate_policy(struct request_queue *q)
-{
-       return blkcg_activate_policy(q, &dd_blkcg_policy);
-}
-
-void dd_deactivate_policy(struct request_queue *q)
-{
-       blkcg_deactivate_policy(q, &dd_blkcg_policy);
-}
-
-int __init dd_blkcg_init(void)
-{
-       return blkcg_policy_register(&dd_blkcg_policy);
-}
-
-void __exit dd_blkcg_exit(void)
-{
-       blkcg_policy_unregister(&dd_blkcg_policy);
-}
diff --git a/block/mq-deadline-cgroup.h b/block/mq-deadline-cgroup.h
deleted file mode 100644 (file)
index 0143fd7..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#if !defined(_MQ_DEADLINE_CGROUP_H_)
-#define _MQ_DEADLINE_CGROUP_H_
-
-#include <linux/blk-cgroup.h>
-
-struct request_queue;
-
-/**
- * struct io_stats_per_prio - I/O statistics per I/O priority class.
- * @inserted: Number of inserted requests.
- * @merged: Number of merged requests.
- * @dispatched: Number of dispatched requests.
- * @completed: Number of I/O completions.
- */
-struct io_stats_per_prio {
-       local_t inserted;
-       local_t merged;
-       local_t dispatched;
-       local_t completed;
-};
-
-/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */
-struct blkcg_io_stats {
-       struct io_stats_per_prio stats[4];
-};
-
-/**
- * struct dd_blkcg - Per cgroup data.
- * @cpd: blkcg_policy_data structure.
- * @stats: I/O statistics.
- */
-struct dd_blkcg {
-       struct blkcg_policy_data cpd;   /* must be the first member */
-       struct blkcg_io_stats __percpu *stats;
-};
-
-/*
- * Count one event of type 'event_type' and with I/O priority class
- * 'prio_class'.
- */
-#define ddcg_count(ddcg, event_type, prio_class) do {                  \
-if (ddcg) {                                                            \
-       struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats);   \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *));          \
-       BUILD_BUG_ON(!__same_type((prio_class), u8));                   \
-       local_inc(&io_stats->stats[(prio_class)].event_type);           \
-       put_cpu_ptr(io_stats);                                          \
-}                                                                      \
-} while (0)
-
-/*
- * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls
- * across all CPUs. No locking or barriers since it is fine if the returned
- * sum is slightly outdated.
- */
-#define ddcg_sum(ddcg, event_type, prio) ({                            \
-       unsigned int cpu;                                               \
-       u32 sum = 0;                                                    \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *));          \
-       BUILD_BUG_ON(!__same_type((prio), u8));                         \
-       for_each_present_cpu(cpu)                                       \
-               sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)->    \
-                                 stats[(prio)].event_type);            \
-       sum;                                                            \
-})
-
-#ifdef CONFIG_BLK_CGROUP
-
-/**
- * struct dd_blkg - Per (cgroup, request queue) data.
- * @pd: blkg_policy_data structure.
- */
-struct dd_blkg {
-       struct blkg_policy_data pd;     /* must be the first member */
-};
-
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio);
-int dd_activate_policy(struct request_queue *q);
-void dd_deactivate_policy(struct request_queue *q);
-int __init dd_blkcg_init(void);
-void __exit dd_blkcg_exit(void);
-
-#else /* CONFIG_BLK_CGROUP */
-
-static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
-       return NULL;
-}
-
-static inline int dd_activate_policy(struct request_queue *q)
-{
-       return 0;
-}
-
-static inline void dd_deactivate_policy(struct request_queue *q)
-{
-}
-
-static inline int dd_blkcg_init(void)
-{
-       return 0;
-}
-
-static inline void dd_blkcg_exit(void)
-{
-}
-
-#endif /* CONFIG_BLK_CGROUP */
-
-#endif /* _MQ_DEADLINE_CGROUP_H_ */
diff --git a/block/mq-deadline-main.c b/block/mq-deadline-main.c
deleted file mode 100644 (file)
index 6f612e6..0000000
+++ /dev/null
@@ -1,1175 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
- *  for the blk-mq scheduling framework
- *
- *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/rbtree.h>
-#include <linux/sbitmap.h>
-
-#include <trace/events/block.h>
-
-#include "blk.h"
-#include "blk-mq.h"
-#include "blk-mq-debugfs.h"
-#include "blk-mq-tag.h"
-#include "blk-mq-sched.h"
-#include "mq-deadline-cgroup.h"
-
-/*
- * See Documentation/block/deadline-iosched.rst
- */
-static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
-static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-/*
- * Time after which to dispatch lower priority requests even if higher
- * priority requests are pending.
- */
-static const int aging_expire = 10 * HZ;
-static const int writes_starved = 2;    /* max times reads can starve a write */
-static const int fifo_batch = 16;       /* # of sequential requests treated as one
-                                    by the above parameters. For throughput. */
-
-enum dd_data_dir {
-       DD_READ         = READ,
-       DD_WRITE        = WRITE,
-};
-
-enum { DD_DIR_COUNT = 2 };
-
-enum dd_prio {
-       DD_RT_PRIO      = 0,
-       DD_BE_PRIO      = 1,
-       DD_IDLE_PRIO    = 2,
-       DD_PRIO_MAX     = 2,
-};
-
-enum { DD_PRIO_COUNT = 3 };
-
-/* I/O statistics for all I/O priorities (enum dd_prio). */
-struct io_stats {
-       struct io_stats_per_prio stats[DD_PRIO_COUNT];
-};
-
-/*
- * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
- * present on both sort_list[] and fifo_list[].
- */
-struct dd_per_prio {
-       struct list_head dispatch;
-       struct rb_root sort_list[DD_DIR_COUNT];
-       struct list_head fifo_list[DD_DIR_COUNT];
-       /* Next request in FIFO order. Read, write or both are NULL. */
-       struct request *next_rq[DD_DIR_COUNT];
-};
-
-struct deadline_data {
-       /*
-        * run time data
-        */
-
-       /* Request queue that owns this data structure. */
-       struct request_queue *queue;
-
-       struct dd_per_prio per_prio[DD_PRIO_COUNT];
-
-       /* Data direction of latest dispatched request. */
-       enum dd_data_dir last_dir;
-       unsigned int batching;          /* number of sequential requests made */
-       unsigned int starved;           /* times reads have starved writes */
-
-       struct io_stats __percpu *stats;
-
-       /*
-        * settings that change how the i/o scheduler behaves
-        */
-       int fifo_expire[DD_DIR_COUNT];
-       int fifo_batch;
-       int writes_starved;
-       int front_merges;
-       u32 async_depth;
-       int aging_expire;
-
-       spinlock_t lock;
-       spinlock_t zone_lock;
-};
-
-/* Count one event of type 'event_type' and with I/O priority 'prio' */
-#define dd_count(dd, event_type, prio) do {                            \
-       struct io_stats *io_stats = get_cpu_ptr((dd)->stats);           \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
-       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
-       local_inc(&io_stats->stats[(prio)].event_type);                 \
-       put_cpu_ptr(io_stats);                                          \
-} while (0)
-
-/*
- * Returns the total number of dd_count(dd, event_type, prio) calls across all
- * CPUs. No locking or barriers since it is fine if the returned sum is slightly
- * outdated.
- */
-#define dd_sum(dd, event_type, prio) ({                                        \
-       unsigned int cpu;                                               \
-       u32 sum = 0;                                                    \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
-       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
-       for_each_present_cpu(cpu)                                       \
-               sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->      \
-                                 stats[(prio)].event_type);            \
-       sum;                                                            \
-})
-
-/* Maps an I/O priority class to a deadline scheduler priority. */
-static const enum dd_prio ioprio_class_to_prio[] = {
-       [IOPRIO_CLASS_NONE]     = DD_BE_PRIO,
-       [IOPRIO_CLASS_RT]       = DD_RT_PRIO,
-       [IOPRIO_CLASS_BE]       = DD_BE_PRIO,
-       [IOPRIO_CLASS_IDLE]     = DD_IDLE_PRIO,
-};
-
-static inline struct rb_root *
-deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
-{
-       return &per_prio->sort_list[rq_data_dir(rq)];
-}
-
-/*
- * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
- * request.
- */
-static u8 dd_rq_ioclass(struct request *rq)
-{
-       return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
-}
-
-/*
- * get the request after `rq' in sector-sorted order
- */
-static inline struct request *
-deadline_latter_request(struct request *rq)
-{
-       struct rb_node *node = rb_next(&rq->rb_node);
-
-       if (node)
-               return rb_entry_rq(node);
-
-       return NULL;
-}
-
-static void
-deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
-{
-       struct rb_root *root = deadline_rb_root(per_prio, rq);
-
-       elv_rb_add(root, rq);
-}
-
-static inline void
-deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
-{
-       const enum dd_data_dir data_dir = rq_data_dir(rq);
-
-       if (per_prio->next_rq[data_dir] == rq)
-               per_prio->next_rq[data_dir] = deadline_latter_request(rq);
-
-       elv_rb_del(deadline_rb_root(per_prio, rq), rq);
-}
-
-/*
- * remove rq from rbtree and fifo.
- */
-static void deadline_remove_request(struct request_queue *q,
-                                   struct dd_per_prio *per_prio,
-                                   struct request *rq)
-{
-       list_del_init(&rq->queuelist);
-
-       /*
-        * We might not be on the rbtree, if we are doing an insert merge
-        */
-       if (!RB_EMPTY_NODE(&rq->rb_node))
-               deadline_del_rq_rb(per_prio, rq);
-
-       elv_rqhash_del(q, rq);
-       if (q->last_merge == rq)
-               q->last_merge = NULL;
-}
-
-static void dd_request_merged(struct request_queue *q, struct request *req,
-                             enum elv_merge type)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const u8 ioprio_class = dd_rq_ioclass(req);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-       /*
-        * if the merge was a front merge, we need to reposition request
-        */
-       if (type == ELEVATOR_FRONT_MERGE) {
-               elv_rb_del(deadline_rb_root(per_prio, req), req);
-               deadline_add_rq_rb(per_prio, req);
-       }
-}
-
-/*
- * Callback function that is invoked after @next has been merged into @req.
- */
-static void dd_merged_requests(struct request_queue *q, struct request *req,
-                              struct request *next)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const u8 ioprio_class = dd_rq_ioclass(next);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_blkcg *blkcg = next->elv.priv[0];
-
-       dd_count(dd, merged, prio);
-       ddcg_count(blkcg, merged, ioprio_class);
-
-       /*
-        * if next expires before rq, assign its expire time to rq
-        * and move into next position (next will be deleted) in fifo
-        */
-       if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
-               if (time_before((unsigned long)next->fifo_time,
-                               (unsigned long)req->fifo_time)) {
-                       list_move(&req->queuelist, &next->queuelist);
-                       req->fifo_time = next->fifo_time;
-               }
-       }
-
-       /*
-        * kill knowledge of next, this one is a goner
-        */
-       deadline_remove_request(q, &dd->per_prio[prio], next);
-}
-
-/*
- * move an entry to dispatch queue
- */
-static void
-deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     struct request *rq)
-{
-       const enum dd_data_dir data_dir = rq_data_dir(rq);
-
-       per_prio->next_rq[data_dir] = deadline_latter_request(rq);
-
-       /*
-        * take it off the sort and fifo list
-        */
-       deadline_remove_request(rq->q, per_prio, rq);
-}
-
-/* Number of requests queued for a given priority level. */
-static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
-{
-       return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
-}
-
-/*
- * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
- * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
- */
-static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
-                                     enum dd_data_dir data_dir)
-{
-       struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
-
-       /*
-        * rq is expired!
-        */
-       if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
-               return 1;
-
-       return 0;
-}
-
-/*
- * For the specified data direction, return the next request to
- * dispatch using arrival ordered lists.
- */
-static struct request *
-deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     enum dd_data_dir data_dir)
-{
-       struct request *rq;
-       unsigned long flags;
-
-       if (list_empty(&per_prio->fifo_list[data_dir]))
-               return NULL;
-
-       rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
-       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
-               return rq;
-
-       /*
-        * Look for a write request that can be dispatched, that is one with
-        * an unlocked target zone.
-        */
-       spin_lock_irqsave(&dd->zone_lock, flags);
-       list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
-               if (blk_req_can_dispatch_to_zone(rq))
-                       goto out;
-       }
-       rq = NULL;
-out:
-       spin_unlock_irqrestore(&dd->zone_lock, flags);
-
-       return rq;
-}
-
-/*
- * For the specified data direction, return the next request to
- * dispatch using sector position sorted lists.
- */
-static struct request *
-deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     enum dd_data_dir data_dir)
-{
-       struct request *rq;
-       unsigned long flags;
-
-       rq = per_prio->next_rq[data_dir];
-       if (!rq)
-               return NULL;
-
-       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
-               return rq;
-
-       /*
-        * Look for a write request that can be dispatched, that is one with
-        * an unlocked target zone.
-        */
-       spin_lock_irqsave(&dd->zone_lock, flags);
-       while (rq) {
-               if (blk_req_can_dispatch_to_zone(rq))
-                       break;
-               rq = deadline_latter_request(rq);
-       }
-       spin_unlock_irqrestore(&dd->zone_lock, flags);
-
-       return rq;
-}
-
-/*
- * deadline_dispatch_requests selects the best request according to
- * read/write expire, fifo_batch, etc and with a start time <= @latest.
- */
-static struct request *__dd_dispatch_request(struct deadline_data *dd,
-                                            struct dd_per_prio *per_prio,
-                                            u64 latest_start_ns)
-{
-       struct request *rq, *next_rq;
-       enum dd_data_dir data_dir;
-       struct dd_blkcg *blkcg;
-       enum dd_prio prio;
-       u8 ioprio_class;
-
-       lockdep_assert_held(&dd->lock);
-
-       if (!list_empty(&per_prio->dispatch)) {
-               rq = list_first_entry(&per_prio->dispatch, struct request,
-                                     queuelist);
-               if (rq->start_time_ns > latest_start_ns)
-                       return NULL;
-               list_del_init(&rq->queuelist);
-               goto done;
-       }
-
-       /*
-        * batches are currently reads XOR writes
-        */
-       rq = deadline_next_request(dd, per_prio, dd->last_dir);
-       if (rq && dd->batching < dd->fifo_batch)
-               /* we have a next request are still entitled to batch */
-               goto dispatch_request;
-
-       /*
-        * at this point we are not running a batch. select the appropriate
-        * data direction (read / write)
-        */
-
-       if (!list_empty(&per_prio->fifo_list[DD_READ])) {
-               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
-
-               if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
-                   (dd->starved++ >= dd->writes_starved))
-                       goto dispatch_writes;
-
-               data_dir = DD_READ;
-
-               goto dispatch_find_request;
-       }
-
-       /*
-        * there are either no reads or writes have been starved
-        */
-
-       if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
-dispatch_writes:
-               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
-
-               dd->starved = 0;
-
-               data_dir = DD_WRITE;
-
-               goto dispatch_find_request;
-       }
-
-       return NULL;
-
-dispatch_find_request:
-       /*
-        * we are not running a batch, find best request for selected data_dir
-        */
-       next_rq = deadline_next_request(dd, per_prio, data_dir);
-       if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
-               /*
-                * A deadline has expired, the last request was in the other
-                * direction, or we have run out of higher-sectored requests.
-                * Start again from the request with the earliest expiry time.
-                */
-               rq = deadline_fifo_request(dd, per_prio, data_dir);
-       } else {
-               /*
-                * The last req was the same dir and we have a next request in
-                * sort order. No expired requests so continue on from here.
-                */
-               rq = next_rq;
-       }
-
-       /*
-        * For a zoned block device, if we only have writes queued and none of
-        * them can be dispatched, rq will be NULL.
-        */
-       if (!rq)
-               return NULL;
-
-       dd->last_dir = data_dir;
-       dd->batching = 0;
-
-dispatch_request:
-       if (rq->start_time_ns > latest_start_ns)
-               return NULL;
-       /*
-        * rq is the selected appropriate request.
-        */
-       dd->batching++;
-       deadline_move_request(dd, per_prio, rq);
-done:
-       ioprio_class = dd_rq_ioclass(rq);
-       prio = ioprio_class_to_prio[ioprio_class];
-       dd_count(dd, dispatched, prio);
-       blkcg = rq->elv.priv[0];
-       ddcg_count(blkcg, dispatched, ioprio_class);
-       /*
-        * If the request needs its target zone locked, do it.
-        */
-       blk_req_zone_write_lock(rq);
-       rq->rq_flags |= RQF_STARTED;
-       return rq;
-}
-
-/*
- * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
- *
- * One confusing aspect here is that we get called for a specific
- * hardware queue, but we may return a request that is for a
- * different hardware queue. This is because mq-deadline has shared
- * state for all hardware queues, in terms of sorting, FIFOs, etc.
- */
-static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
-{
-       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
-       const u64 now_ns = ktime_get_ns();
-       struct request *rq = NULL;
-       enum dd_prio prio;
-
-       spin_lock(&dd->lock);
-       /*
-        * Start with dispatching requests whose deadline expired more than
-        * aging_expire jiffies ago.
-        */
-       for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
-               rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
-                                          jiffies_to_nsecs(dd->aging_expire));
-               if (rq)
-                       goto unlock;
-       }
-       /*
-        * Next, dispatch requests in priority order. Ignore lower priority
-        * requests if any higher priority requests are pending.
-        */
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns);
-               if (rq || dd_queued(dd, prio))
-                       break;
-       }
-
-unlock:
-       spin_unlock(&dd->lock);
-
-       return rq;
-}
-
-/*
- * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
- * function is used by __blk_mq_get_tag().
- */
-static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
-{
-       struct deadline_data *dd = data->q->elevator->elevator_data;
-
-       /* Do not throttle synchronous reads. */
-       if (op_is_sync(op) && !op_is_write(op))
-               return;
-
-       /*
-        * Throttle asynchronous requests and writes such that these requests
-        * do not block the allocation of synchronous requests.
-        */
-       data->shallow_depth = dd->async_depth;
-}
-
-/* Called by blk_mq_update_nr_requests(). */
-static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
-{
-       struct request_queue *q = hctx->queue;
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct blk_mq_tags *tags = hctx->sched_tags;
-
-       dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
-
-       sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
-}
-
-/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
-static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
-       dd_depth_updated(hctx);
-       return 0;
-}
-
-static void dd_exit_sched(struct elevator_queue *e)
-{
-       struct deadline_data *dd = e->elevator_data;
-       enum dd_prio prio;
-
-       dd_deactivate_policy(dd->queue);
-
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
-               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
-       }
-
-       free_percpu(dd->stats);
-
-       kfree(dd);
-}
-
-/*
- * Initialize elevator private data (deadline_data) and associate with blkcg.
- */
-static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
-{
-       struct deadline_data *dd;
-       struct elevator_queue *eq;
-       enum dd_prio prio;
-       int ret = -ENOMEM;
-
-       /*
-        * Initialization would be very tricky if the queue is not frozen,
-        * hence the warning statement below.
-        */
-       WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
-
-       eq = elevator_alloc(q, e);
-       if (!eq)
-               return ret;
-
-       dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
-       if (!dd)
-               goto put_eq;
-
-       eq->elevator_data = dd;
-
-       dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
-                                    GFP_KERNEL | __GFP_ZERO);
-       if (!dd->stats)
-               goto free_dd;
-
-       dd->queue = q;
-
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-               INIT_LIST_HEAD(&per_prio->dispatch);
-               INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
-               INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
-               per_prio->sort_list[DD_READ] = RB_ROOT;
-               per_prio->sort_list[DD_WRITE] = RB_ROOT;
-       }
-       dd->fifo_expire[DD_READ] = read_expire;
-       dd->fifo_expire[DD_WRITE] = write_expire;
-       dd->writes_starved = writes_starved;
-       dd->front_merges = 1;
-       dd->last_dir = DD_WRITE;
-       dd->fifo_batch = fifo_batch;
-       dd->aging_expire = aging_expire;
-       spin_lock_init(&dd->lock);
-       spin_lock_init(&dd->zone_lock);
-
-       ret = dd_activate_policy(q);
-       if (ret)
-               goto free_stats;
-
-       ret = 0;
-       q->elevator = eq;
-       return 0;
-
-free_stats:
-       free_percpu(dd->stats);
-
-free_dd:
-       kfree(dd);
-
-put_eq:
-       kobject_put(&eq->kobj);
-       return ret;
-}
-
-/*
- * Try to merge @bio into an existing request. If @bio has been merged into
- * an existing request, store the pointer to that request into *@rq.
- */
-static int dd_request_merge(struct request_queue *q, struct request **rq,
-                           struct bio *bio)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];
-       sector_t sector = bio_end_sector(bio);
-       struct request *__rq;
-
-       if (!dd->front_merges)
-               return ELEVATOR_NO_MERGE;
-
-       __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
-       if (__rq) {
-               BUG_ON(sector != blk_rq_pos(__rq));
-
-               if (elv_bio_merge_ok(__rq, bio)) {
-                       *rq = __rq;
-                       return ELEVATOR_FRONT_MERGE;
-               }
-       }
-
-       return ELEVATOR_NO_MERGE;
-}
-
-/*
- * Attempt to merge a bio into an existing request. This function is called
- * before @bio is associated with a request.
- */
-static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
-               unsigned int nr_segs)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct request *free = NULL;
-       bool ret;
-
-       spin_lock(&dd->lock);
-       ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
-       spin_unlock(&dd->lock);
-
-       if (free)
-               blk_mq_free_request(free);
-
-       return ret;
-}
-
-/*
- * add rq to rbtree and fifo
- */
-static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
-                             bool at_head)
-{
-       struct request_queue *q = hctx->queue;
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const enum dd_data_dir data_dir = rq_data_dir(rq);
-       u16 ioprio = req_get_ioprio(rq);
-       u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
-       struct dd_per_prio *per_prio;
-       enum dd_prio prio;
-       struct dd_blkcg *blkcg;
-       LIST_HEAD(free);
-
-       lockdep_assert_held(&dd->lock);
-
-       /*
-        * This may be a requeue of a write request that has locked its
-        * target zone. If it is the case, this releases the zone lock.
-        */
-       blk_req_zone_write_unlock(rq);
-
-       /*
-        * If a block cgroup has been associated with the submitter and if an
-        * I/O priority has been set in the associated block cgroup, use the
-        * lowest of the cgroup priority and the request priority for the
-        * request. If no priority has been set in the request, use the cgroup
-        * priority.
-        */
-       prio = ioprio_class_to_prio[ioprio_class];
-       dd_count(dd, inserted, prio);
-       blkcg = dd_blkcg_from_bio(rq->bio);
-       ddcg_count(blkcg, inserted, ioprio_class);
-       rq->elv.priv[0] = blkcg;
-
-       if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
-               blk_mq_free_requests(&free);
-               return;
-       }
-
-       trace_block_rq_insert(rq);
-
-       per_prio = &dd->per_prio[prio];
-       if (at_head) {
-               list_add(&rq->queuelist, &per_prio->dispatch);
-       } else {
-               deadline_add_rq_rb(per_prio, rq);
-
-               if (rq_mergeable(rq)) {
-                       elv_rqhash_add(q, rq);
-                       if (!q->last_merge)
-                               q->last_merge = rq;
-               }
-
-               /*
-                * set expire time and add to fifo list
-                */
-               rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
-               list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
-       }
-}
-
-/*
- * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
- */
-static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
-                              struct list_head *list, bool at_head)
-{
-       struct request_queue *q = hctx->queue;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       spin_lock(&dd->lock);
-       while (!list_empty(list)) {
-               struct request *rq;
-
-               rq = list_first_entry(list, struct request, queuelist);
-               list_del_init(&rq->queuelist);
-               dd_insert_request(hctx, rq, at_head);
-       }
-       spin_unlock(&dd->lock);
-}
-
-/* Callback from inside blk_mq_rq_ctx_init(). */
-static void dd_prepare_request(struct request *rq)
-{
-       rq->elv.priv[0] = NULL;
-}
-
-/*
- * Callback from inside blk_mq_free_request().
- *
- * For zoned block devices, write unlock the target zone of
- * completed write requests. Do this while holding the zone lock
- * spinlock so that the zone is never unlocked while deadline_fifo_request()
- * or deadline_next_request() are executing. This function is called for
- * all requests, whether or not these requests complete successfully.
- *
- * For a zoned block device, __dd_dispatch_request() may have stopped
- * dispatching requests if all the queued requests are write requests directed
- * at zones that are already locked due to on-going write requests. To ensure
- * write request dispatch progress in this case, mark the queue as needing a
- * restart to ensure that the queue is run again after completion of the
- * request and zones being unlocked.
- */
-static void dd_finish_request(struct request *rq)
-{
-       struct request_queue *q = rq->q;
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct dd_blkcg *blkcg = rq->elv.priv[0];
-       const u8 ioprio_class = dd_rq_ioclass(rq);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-       dd_count(dd, completed, prio);
-       ddcg_count(blkcg, completed, ioprio_class);
-
-       if (blk_queue_is_zoned(q)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dd->zone_lock, flags);
-               blk_req_zone_write_unlock(rq);
-               if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
-                       blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
-               spin_unlock_irqrestore(&dd->zone_lock, flags);
-       }
-}
-
-static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
-{
-       return !list_empty_careful(&per_prio->dispatch) ||
-               !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
-               !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
-}
-
-static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
-{
-       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
-       enum dd_prio prio;
-
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++)
-               if (dd_has_work_for_prio(&dd->per_prio[prio]))
-                       return true;
-
-       return false;
-}
-
-/*
- * sysfs parts below
- */
-#define SHOW_INT(__FUNC, __VAR)                                                \
-static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
-{                                                                      \
-       struct deadline_data *dd = e->elevator_data;                    \
-                                                                       \
-       return sysfs_emit(page, "%d\n", __VAR);                         \
-}
-#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
-SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
-SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
-SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
-SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
-SHOW_INT(deadline_front_merges_show, dd->front_merges);
-SHOW_INT(deadline_async_depth_show, dd->front_merges);
-SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
-#undef SHOW_INT
-#undef SHOW_JIFFIES
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
-{                                                                      \
-       struct deadline_data *dd = e->elevator_data;                    \
-       int __data, __ret;                                              \
-                                                                       \
-       __ret = kstrtoint(page, 0, &__data);                            \
-       if (__ret < 0)                                                  \
-               return __ret;                                           \
-       if (__data < (MIN))                                             \
-               __data = (MIN);                                         \
-       else if (__data > (MAX))                                        \
-               __data = (MAX);                                         \
-       *(__PTR) = __CONV(__data);                                      \
-       return count;                                                   \
-}
-#define STORE_INT(__FUNC, __PTR, MIN, MAX)                             \
-       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
-#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)                         \
-       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
-STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
-STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
-STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
-STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
-STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
-STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
-STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
-#undef STORE_FUNCTION
-#undef STORE_INT
-#undef STORE_JIFFIES
-
-#define DD_ATTR(name) \
-       __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
-
-static struct elv_fs_entry deadline_attrs[] = {
-       DD_ATTR(read_expire),
-       DD_ATTR(write_expire),
-       DD_ATTR(writes_starved),
-       DD_ATTR(front_merges),
-       DD_ATTR(async_depth),
-       DD_ATTR(fifo_batch),
-       DD_ATTR(aging_expire),
-       __ATTR_NULL
-};
-
-#ifdef CONFIG_BLK_DEBUG_FS
-#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)              \
-static void *deadline_##name##_fifo_start(struct seq_file *m,          \
-                                         loff_t *pos)                  \
-       __acquires(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       spin_lock(&dd->lock);                                           \
-       return seq_list_start(&per_prio->fifo_list[data_dir], *pos);    \
-}                                                                      \
-                                                                       \
-static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,  \
-                                        loff_t *pos)                   \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);   \
-}                                                                      \
-                                                                       \
-static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)   \
-       __releases(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-                                                                       \
-       spin_unlock(&dd->lock);                                         \
-}                                                                      \
-                                                                       \
-static const struct seq_operations deadline_##name##_fifo_seq_ops = {  \
-       .start  = deadline_##name##_fifo_start,                         \
-       .next   = deadline_##name##_fifo_next,                          \
-       .stop   = deadline_##name##_fifo_stop,                          \
-       .show   = blk_mq_debugfs_rq_show,                               \
-};                                                                     \
-                                                                       \
-static int deadline_##name##_next_rq_show(void *data,                  \
-                                         struct seq_file *m)           \
-{                                                                      \
-       struct request_queue *q = data;                                 \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-       struct request *rq = per_prio->next_rq[data_dir];               \
-                                                                       \
-       if (rq)                                                         \
-               __blk_mq_debugfs_rq_show(m, rq);                        \
-       return 0;                                                       \
-}
-
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-#undef DEADLINE_DEBUGFS_DDIR_ATTRS
-
-static int deadline_batching_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u\n", dd->batching);
-       return 0;
-}
-
-static int deadline_starved_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u\n", dd->starved);
-       return 0;
-}
-
-static int dd_async_depth_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u\n", dd->async_depth);
-       return 0;
-}
-
-static int dd_queued_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
-                  dd_queued(dd, DD_BE_PRIO),
-                  dd_queued(dd, DD_IDLE_PRIO));
-       return 0;
-}
-
-/* Number of requests owned by the block driver for a given priority. */
-static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
-{
-       return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
-               - dd_sum(dd, completed, prio);
-}
-
-static int dd_owned_by_driver_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
-                  dd_owned_by_driver(dd, DD_BE_PRIO),
-                  dd_owned_by_driver(dd, DD_IDLE_PRIO));
-       return 0;
-}
-
-#define DEADLINE_DISPATCH_ATTR(prio)                                   \
-static void *deadline_dispatch##prio##_start(struct seq_file *m,       \
-                                            loff_t *pos)               \
-       __acquires(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       spin_lock(&dd->lock);                                           \
-       return seq_list_start(&per_prio->dispatch, *pos);               \
-}                                                                      \
-                                                                       \
-static void *deadline_dispatch##prio##_next(struct seq_file *m,                \
-                                           void *v, loff_t *pos)       \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       return seq_list_next(v, &per_prio->dispatch, pos);              \
-}                                                                      \
-                                                                       \
-static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)        \
-       __releases(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-                                                                       \
-       spin_unlock(&dd->lock);                                         \
-}                                                                      \
-                                                                       \
-static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
-       .start  = deadline_dispatch##prio##_start,                      \
-       .next   = deadline_dispatch##prio##_next,                       \
-       .stop   = deadline_dispatch##prio##_stop,                       \
-       .show   = blk_mq_debugfs_rq_show,                               \
-}
-
-DEADLINE_DISPATCH_ATTR(0);
-DEADLINE_DISPATCH_ATTR(1);
-DEADLINE_DISPATCH_ATTR(2);
-#undef DEADLINE_DISPATCH_ATTR
-
-#define DEADLINE_QUEUE_DDIR_ATTRS(name)                                        \
-       {#name "_fifo_list", 0400,                                      \
-                       .seq_ops = &deadline_##name##_fifo_seq_ops}
-#define DEADLINE_NEXT_RQ_ATTR(name)                                    \
-       {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
-static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
-       DEADLINE_QUEUE_DDIR_ATTRS(read0),
-       DEADLINE_QUEUE_DDIR_ATTRS(write0),
-       DEADLINE_QUEUE_DDIR_ATTRS(read1),
-       DEADLINE_QUEUE_DDIR_ATTRS(write1),
-       DEADLINE_QUEUE_DDIR_ATTRS(read2),
-       DEADLINE_QUEUE_DDIR_ATTRS(write2),
-       DEADLINE_NEXT_RQ_ATTR(read0),
-       DEADLINE_NEXT_RQ_ATTR(write0),
-       DEADLINE_NEXT_RQ_ATTR(read1),
-       DEADLINE_NEXT_RQ_ATTR(write1),
-       DEADLINE_NEXT_RQ_ATTR(read2),
-       DEADLINE_NEXT_RQ_ATTR(write2),
-       {"batching", 0400, deadline_batching_show},
-       {"starved", 0400, deadline_starved_show},
-       {"async_depth", 0400, dd_async_depth_show},
-       {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
-       {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
-       {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
-       {"owned_by_driver", 0400, dd_owned_by_driver_show},
-       {"queued", 0400, dd_queued_show},
-       {},
-};
-#undef DEADLINE_QUEUE_DDIR_ATTRS
-#endif
-
-static struct elevator_type mq_deadline = {
-       .ops = {
-               .depth_updated          = dd_depth_updated,
-               .limit_depth            = dd_limit_depth,
-               .insert_requests        = dd_insert_requests,
-               .dispatch_request       = dd_dispatch_request,
-               .prepare_request        = dd_prepare_request,
-               .finish_request         = dd_finish_request,
-               .next_request           = elv_rb_latter_request,
-               .former_request         = elv_rb_former_request,
-               .bio_merge              = dd_bio_merge,
-               .request_merge          = dd_request_merge,
-               .requests_merged        = dd_merged_requests,
-               .request_merged         = dd_request_merged,
-               .has_work               = dd_has_work,
-               .init_sched             = dd_init_sched,
-               .exit_sched             = dd_exit_sched,
-               .init_hctx              = dd_init_hctx,
-       },
-
-#ifdef CONFIG_BLK_DEBUG_FS
-       .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
-#endif
-       .elevator_attrs = deadline_attrs,
-       .elevator_name = "mq-deadline",
-       .elevator_alias = "deadline",
-       .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
-       .elevator_owner = THIS_MODULE,
-};
-MODULE_ALIAS("mq-deadline-iosched");
-
-static int __init deadline_init(void)
-{
-       int ret;
-
-       ret = elv_register(&mq_deadline);
-       if (ret)
-               goto out;
-       ret = dd_blkcg_init();
-       if (ret)
-               goto unreg;
-
-out:
-       return ret;
-
-unreg:
-       elv_unregister(&mq_deadline);
-       goto out;
-}
-
-static void __exit deadline_exit(void)
-{
-       dd_blkcg_exit();
-       elv_unregister(&mq_deadline);
-}
-
-module_init(deadline_init);
-module_exit(deadline_exit);
-
-MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MQ deadline IO scheduler");
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
new file mode 100644 (file)
index 0000000..3c3693c
--- /dev/null
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
+ *  for the blk-mq scheduling framework
+ *
+ *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <linux/sbitmap.h>
+
+#include <trace/events/block.h>
+
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
+#include "blk-mq-tag.h"
+#include "blk-mq-sched.h"
+
+/*
+ * See Documentation/block/deadline-iosched.rst
+ */
+static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
+static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+static const int writes_starved = 2;    /* max times reads can starve a write */
+static const int fifo_batch = 16;       /* # of sequential requests treated as one
+                                    by the above parameters. For throughput. */
+
+enum dd_data_dir {
+       DD_READ         = READ,
+       DD_WRITE        = WRITE,
+};
+
+enum { DD_DIR_COUNT = 2 };
+
+enum dd_prio {
+       DD_RT_PRIO      = 0,
+       DD_BE_PRIO      = 1,
+       DD_IDLE_PRIO    = 2,
+       DD_PRIO_MAX     = 2,
+};
+
+enum { DD_PRIO_COUNT = 3 };
+
+/* I/O statistics per I/O priority. */
+struct io_stats_per_prio {
+       local_t inserted;
+       local_t merged;
+       local_t dispatched;
+       local_t completed;
+};
+
+/* I/O statistics for all I/O priorities (enum dd_prio). */
+struct io_stats {
+       struct io_stats_per_prio stats[DD_PRIO_COUNT];
+};
+
+/*
+ * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
+ * present on both sort_list[] and fifo_list[].
+ */
+struct dd_per_prio {
+       struct list_head dispatch;
+       struct rb_root sort_list[DD_DIR_COUNT];
+       struct list_head fifo_list[DD_DIR_COUNT];
+       /* Next request in FIFO order. Read, write or both are NULL. */
+       struct request *next_rq[DD_DIR_COUNT];
+};
+
+struct deadline_data {
+       /*
+        * run time data
+        */
+
+       struct dd_per_prio per_prio[DD_PRIO_COUNT];
+
+       /* Data direction of latest dispatched request. */
+       enum dd_data_dir last_dir;
+       unsigned int batching;          /* number of sequential requests made */
+       unsigned int starved;           /* times reads have starved writes */
+
+       struct io_stats __percpu *stats;
+
+       /*
+        * settings that change how the i/o scheduler behaves
+        */
+       int fifo_expire[DD_DIR_COUNT];
+       int fifo_batch;
+       int writes_starved;
+       int front_merges;
+       u32 async_depth;
+
+       spinlock_t lock;
+       spinlock_t zone_lock;
+};
+
+/* Count one event of type 'event_type' and with I/O priority 'prio' */
+#define dd_count(dd, event_type, prio) do {                            \
+       struct io_stats *io_stats = get_cpu_ptr((dd)->stats);           \
+                                                                       \
+       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
+       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
+       local_inc(&io_stats->stats[(prio)].event_type);                 \
+       put_cpu_ptr(io_stats);                                          \
+} while (0)
+
+/*
+ * Returns the total number of dd_count(dd, event_type, prio) calls across all
+ * CPUs. No locking or barriers since it is fine if the returned sum is slightly
+ * outdated.
+ */
+#define dd_sum(dd, event_type, prio) ({                                        \
+       unsigned int cpu;                                               \
+       u32 sum = 0;                                                    \
+                                                                       \
+       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
+       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
+       for_each_present_cpu(cpu)                                       \
+               sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->      \
+                                 stats[(prio)].event_type);            \
+       sum;                                                            \
+})
+
+/* Maps an I/O priority class to a deadline scheduler priority. */
+static const enum dd_prio ioprio_class_to_prio[] = {
+       [IOPRIO_CLASS_NONE]     = DD_BE_PRIO,
+       [IOPRIO_CLASS_RT]       = DD_RT_PRIO,
+       [IOPRIO_CLASS_BE]       = DD_BE_PRIO,
+       [IOPRIO_CLASS_IDLE]     = DD_IDLE_PRIO,
+};
+
+static inline struct rb_root *
+deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
+{
+       return &per_prio->sort_list[rq_data_dir(rq)];
+}
+
+/*
+ * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
+ * request.
+ */
+static u8 dd_rq_ioclass(struct request *rq)
+{
+       return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+}
+
+/*
+ * get the request after `rq' in sector-sorted order
+ */
+static inline struct request *
+deadline_latter_request(struct request *rq)
+{
+       struct rb_node *node = rb_next(&rq->rb_node);
+
+       if (node)
+               return rb_entry_rq(node);
+
+       return NULL;
+}
+
+static void
+deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
+{
+       struct rb_root *root = deadline_rb_root(per_prio, rq);
+
+       elv_rb_add(root, rq);
+}
+
+static inline void
+deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
+{
+       const enum dd_data_dir data_dir = rq_data_dir(rq);
+
+       if (per_prio->next_rq[data_dir] == rq)
+               per_prio->next_rq[data_dir] = deadline_latter_request(rq);
+
+       elv_rb_del(deadline_rb_root(per_prio, rq), rq);
+}
+
+/*
+ * remove rq from rbtree and fifo.
+ */
+static void deadline_remove_request(struct request_queue *q,
+                                   struct dd_per_prio *per_prio,
+                                   struct request *rq)
+{
+       list_del_init(&rq->queuelist);
+
+       /*
+        * We might not be on the rbtree, if we are doing an insert merge
+        */
+       if (!RB_EMPTY_NODE(&rq->rb_node))
+               deadline_del_rq_rb(per_prio, rq);
+
+       elv_rqhash_del(q, rq);
+       if (q->last_merge == rq)
+               q->last_merge = NULL;
+}
+
+static void dd_request_merged(struct request_queue *q, struct request *req,
+                             enum elv_merge type)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = dd_rq_ioclass(req);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+       /*
+        * if the merge was a front merge, we need to reposition request
+        */
+       if (type == ELEVATOR_FRONT_MERGE) {
+               elv_rb_del(deadline_rb_root(per_prio, req), req);
+               deadline_add_rq_rb(per_prio, req);
+       }
+}
+
+/*
+ * Callback function that is invoked after @next has been merged into @req.
+ */
+static void dd_merged_requests(struct request_queue *q, struct request *req,
+                              struct request *next)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = dd_rq_ioclass(next);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+
+       dd_count(dd, merged, prio);
+
+       /*
+        * if next expires before rq, assign its expire time to rq
+        * and move into next position (next will be deleted) in fifo
+        */
+       if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+               if (time_before((unsigned long)next->fifo_time,
+                               (unsigned long)req->fifo_time)) {
+                       list_move(&req->queuelist, &next->queuelist);
+                       req->fifo_time = next->fifo_time;
+               }
+       }
+
+       /*
+        * kill knowledge of next, this one is a goner
+        */
+       deadline_remove_request(q, &dd->per_prio[prio], next);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+                     struct request *rq)
+{
+       const enum dd_data_dir data_dir = rq_data_dir(rq);
+
+       per_prio->next_rq[data_dir] = deadline_latter_request(rq);
+
+       /*
+        * take it off the sort and fifo list
+        */
+       deadline_remove_request(rq->q, per_prio, rq);
+}
+
+/* Number of requests queued for a given priority level. */
+static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
+{
+       return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
+}
+
+/*
+ * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
+ * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
+ */
+static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
+                                     enum dd_data_dir data_dir)
+{
+       struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
+
+       /*
+        * rq is expired!
+        */
+       if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
+               return 1;
+
+       return 0;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using arrival ordered lists.
+ */
+static struct request *
+deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+                     enum dd_data_dir data_dir)
+{
+       struct request *rq;
+       unsigned long flags;
+
+       if (list_empty(&per_prio->fifo_list[data_dir]))
+               return NULL;
+
+       rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
+       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+               return rq;
+
+       /*
+        * Look for a write request that can be dispatched, that is one with
+        * an unlocked target zone.
+        */
+       spin_lock_irqsave(&dd->zone_lock, flags);
+       list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
+               if (blk_req_can_dispatch_to_zone(rq))
+                       goto out;
+       }
+       rq = NULL;
+out:
+       spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+       return rq;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using sector position sorted lists.
+ */
+static struct request *
+deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+                     enum dd_data_dir data_dir)
+{
+       struct request *rq;
+       unsigned long flags;
+
+       rq = per_prio->next_rq[data_dir];
+       if (!rq)
+               return NULL;
+
+       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+               return rq;
+
+       /*
+        * Look for a write request that can be dispatched, that is one with
+        * an unlocked target zone.
+        */
+       spin_lock_irqsave(&dd->zone_lock, flags);
+       while (rq) {
+               if (blk_req_can_dispatch_to_zone(rq))
+                       break;
+               rq = deadline_latter_request(rq);
+       }
+       spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+       return rq;
+}
+
+/*
+ * deadline_dispatch_requests selects the best request according to
+ * read/write expire, fifo_batch, etc
+ */
+static struct request *__dd_dispatch_request(struct deadline_data *dd,
+                                            struct dd_per_prio *per_prio)
+{
+       struct request *rq, *next_rq;
+       enum dd_data_dir data_dir;
+       enum dd_prio prio;
+       u8 ioprio_class;
+
+       lockdep_assert_held(&dd->lock);
+
+       if (!list_empty(&per_prio->dispatch)) {
+               rq = list_first_entry(&per_prio->dispatch, struct request,
+                                     queuelist);
+               list_del_init(&rq->queuelist);
+               goto done;
+       }
+
+       /*
+        * batches are currently reads XOR writes
+        */
+       rq = deadline_next_request(dd, per_prio, dd->last_dir);
+       if (rq && dd->batching < dd->fifo_batch)
+               /* we have a next request are still entitled to batch */
+               goto dispatch_request;
+
+       /*
+        * at this point we are not running a batch. select the appropriate
+        * data direction (read / write)
+        */
+
+       if (!list_empty(&per_prio->fifo_list[DD_READ])) {
+               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
+
+               if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
+                   (dd->starved++ >= dd->writes_starved))
+                       goto dispatch_writes;
+
+               data_dir = DD_READ;
+
+               goto dispatch_find_request;
+       }
+
+       /*
+        * there are either no reads or writes have been starved
+        */
+
+       if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
+dispatch_writes:
+               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
+
+               dd->starved = 0;
+
+               data_dir = DD_WRITE;
+
+               goto dispatch_find_request;
+       }
+
+       return NULL;
+
+dispatch_find_request:
+       /*
+        * we are not running a batch, find best request for selected data_dir
+        */
+       next_rq = deadline_next_request(dd, per_prio, data_dir);
+       if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
+               /*
+                * A deadline has expired, the last request was in the other
+                * direction, or we have run out of higher-sectored requests.
+                * Start again from the request with the earliest expiry time.
+                */
+               rq = deadline_fifo_request(dd, per_prio, data_dir);
+       } else {
+               /*
+                * The last req was the same dir and we have a next request in
+                * sort order. No expired requests so continue on from here.
+                */
+               rq = next_rq;
+       }
+
+       /*
+        * For a zoned block device, if we only have writes queued and none of
+        * them can be dispatched, rq will be NULL.
+        */
+       if (!rq)
+               return NULL;
+
+       dd->last_dir = data_dir;
+       dd->batching = 0;
+
+dispatch_request:
+       /*
+        * rq is the selected appropriate request.
+        */
+       dd->batching++;
+       deadline_move_request(dd, per_prio, rq);
+done:
+       ioprio_class = dd_rq_ioclass(rq);
+       prio = ioprio_class_to_prio[ioprio_class];
+       dd_count(dd, dispatched, prio);
+       /*
+        * If the request needs its target zone locked, do it.
+        */
+       blk_req_zone_write_lock(rq);
+       rq->rq_flags |= RQF_STARTED;
+       return rq;
+}
+
+/*
+ * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
+ *
+ * One confusing aspect here is that we get called for a specific
+ * hardware queue, but we may return a request that is for a
+ * different hardware queue. This is because mq-deadline has shared
+ * state for all hardware queues, in terms of sorting, FIFOs, etc.
+ */
+static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+{
+       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+       struct request *rq;
+       enum dd_prio prio;
+
+       spin_lock(&dd->lock);
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+               rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
+               if (rq)
+                       break;
+       }
+       spin_unlock(&dd->lock);
+
+       return rq;
+}
+
+/*
+ * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
+ * function is used by __blk_mq_get_tag().
+ */
+static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+{
+       struct deadline_data *dd = data->q->elevator->elevator_data;
+
+       /* Do not throttle synchronous reads. */
+       if (op_is_sync(op) && !op_is_write(op))
+               return;
+
+       /*
+        * Throttle asynchronous requests and writes such that these requests
+        * do not block the allocation of synchronous requests.
+        */
+       data->shallow_depth = dd->async_depth;
+}
+
+/* Called by blk_mq_update_nr_requests(). */
+static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+{
+       struct request_queue *q = hctx->queue;
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct blk_mq_tags *tags = hctx->sched_tags;
+
+       dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+
+       sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
+}
+
+/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
+static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+       dd_depth_updated(hctx);
+       return 0;
+}
+
+static void dd_exit_sched(struct elevator_queue *e)
+{
+       struct deadline_data *dd = e->elevator_data;
+       enum dd_prio prio;
+
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+               struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
+               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
+       }
+
+       free_percpu(dd->stats);
+
+       kfree(dd);
+}
+
+/*
+ * initialize elevator private data (deadline_data).
+ */
+static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
+{
+       struct deadline_data *dd;
+       struct elevator_queue *eq;
+       enum dd_prio prio;
+       int ret = -ENOMEM;
+
+       eq = elevator_alloc(q, e);
+       if (!eq)
+               return ret;
+
+       dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+       if (!dd)
+               goto put_eq;
+
+       eq->elevator_data = dd;
+
+       dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
+                                    GFP_KERNEL | __GFP_ZERO);
+       if (!dd->stats)
+               goto free_dd;
+
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+               struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+               INIT_LIST_HEAD(&per_prio->dispatch);
+               INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
+               INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
+               per_prio->sort_list[DD_READ] = RB_ROOT;
+               per_prio->sort_list[DD_WRITE] = RB_ROOT;
+       }
+       dd->fifo_expire[DD_READ] = read_expire;
+       dd->fifo_expire[DD_WRITE] = write_expire;
+       dd->writes_starved = writes_starved;
+       dd->front_merges = 1;
+       dd->last_dir = DD_WRITE;
+       dd->fifo_batch = fifo_batch;
+       spin_lock_init(&dd->lock);
+       spin_lock_init(&dd->zone_lock);
+
+       q->elevator = eq;
+       return 0;
+
+free_dd:
+       kfree(dd);
+
+put_eq:
+       kobject_put(&eq->kobj);
+       return ret;
+}
+
+/*
+ * Try to merge @bio into an existing request. If @bio has been merged into
+ * an existing request, store the pointer to that request into *@rq.
+ */
+static int dd_request_merge(struct request_queue *q, struct request **rq,
+                           struct bio *bio)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];
+       sector_t sector = bio_end_sector(bio);
+       struct request *__rq;
+
+       if (!dd->front_merges)
+               return ELEVATOR_NO_MERGE;
+
+       __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
+       if (__rq) {
+               BUG_ON(sector != blk_rq_pos(__rq));
+
+               if (elv_bio_merge_ok(__rq, bio)) {
+                       *rq = __rq;
+                       if (blk_discard_mergable(__rq))
+                               return ELEVATOR_DISCARD_MERGE;
+                       return ELEVATOR_FRONT_MERGE;
+               }
+       }
+
+       return ELEVATOR_NO_MERGE;
+}
+
+/*
+ * Attempt to merge a bio into an existing request. This function is called
+ * before @bio is associated with a request.
+ */
+static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
+               unsigned int nr_segs)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct request *free = NULL;
+       bool ret;
+
+       spin_lock(&dd->lock);
+       ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
+       spin_unlock(&dd->lock);
+
+       if (free)
+               blk_mq_free_request(free);
+
+       return ret;
+}
+
+/*
+ * add rq to rbtree and fifo
+ */
+static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+                             bool at_head)
+{
+       struct request_queue *q = hctx->queue;
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const enum dd_data_dir data_dir = rq_data_dir(rq);
+       u16 ioprio = req_get_ioprio(rq);
+       u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
+       struct dd_per_prio *per_prio;
+       enum dd_prio prio;
+       LIST_HEAD(free);
+
+       lockdep_assert_held(&dd->lock);
+
+       /*
+        * This may be a requeue of a write request that has locked its
+        * target zone. If it is the case, this releases the zone lock.
+        */
+       blk_req_zone_write_unlock(rq);
+
+       prio = ioprio_class_to_prio[ioprio_class];
+       dd_count(dd, inserted, prio);
+       rq->elv.priv[0] = (void *)(uintptr_t)1;
+
+       if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
+               blk_mq_free_requests(&free);
+               return;
+       }
+
+       trace_block_rq_insert(rq);
+
+       per_prio = &dd->per_prio[prio];
+       if (at_head) {
+               list_add(&rq->queuelist, &per_prio->dispatch);
+       } else {
+               deadline_add_rq_rb(per_prio, rq);
+
+               if (rq_mergeable(rq)) {
+                       elv_rqhash_add(q, rq);
+                       if (!q->last_merge)
+                               q->last_merge = rq;
+               }
+
+               /*
+                * set expire time and add to fifo list
+                */
+               rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
+               list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
+       }
+}
+
+/*
+ * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
+ */
+static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
+                              struct list_head *list, bool at_head)
+{
+       struct request_queue *q = hctx->queue;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       spin_lock(&dd->lock);
+       while (!list_empty(list)) {
+               struct request *rq;
+
+               rq = list_first_entry(list, struct request, queuelist);
+               list_del_init(&rq->queuelist);
+               dd_insert_request(hctx, rq, at_head);
+       }
+       spin_unlock(&dd->lock);
+}
+
+/* Callback from inside blk_mq_rq_ctx_init(). */
+static void dd_prepare_request(struct request *rq)
+{
+       rq->elv.priv[0] = NULL;
+}
+
+/*
+ * Callback from inside blk_mq_free_request().
+ *
+ * For zoned block devices, write unlock the target zone of
+ * completed write requests. Do this while holding the zone lock
+ * spinlock so that the zone is never unlocked while deadline_fifo_request()
+ * or deadline_next_request() are executing. This function is called for
+ * all requests, whether or not these requests complete successfully.
+ *
+ * For a zoned block device, __dd_dispatch_request() may have stopped
+ * dispatching requests if all the queued requests are write requests directed
+ * at zones that are already locked due to on-going write requests. To ensure
+ * write request dispatch progress in this case, mark the queue as needing a
+ * restart to ensure that the queue is run again after completion of the
+ * request and zones being unlocked.
+ */
+static void dd_finish_request(struct request *rq)
+{
+       struct request_queue *q = rq->q;
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = dd_rq_ioclass(rq);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+       /*
+        * The block layer core may call dd_finish_request() without having
+        * called dd_insert_requests(). Hence only update statistics for
+        * requests for which dd_insert_requests() has been called. See also
+        * blk_mq_request_bypass_insert().
+        */
+       if (rq->elv.priv[0])
+               dd_count(dd, completed, prio);
+
+       if (blk_queue_is_zoned(q)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dd->zone_lock, flags);
+               blk_req_zone_write_unlock(rq);
+               if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
+                       blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
+               spin_unlock_irqrestore(&dd->zone_lock, flags);
+       }
+}
+
+static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
+{
+       return !list_empty_careful(&per_prio->dispatch) ||
+               !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
+               !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
+}
+
+static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
+{
+       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+       enum dd_prio prio;
+
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++)
+               if (dd_has_work_for_prio(&dd->per_prio[prio]))
+                       return true;
+
+       return false;
+}
+
+/*
+ * sysfs parts below
+ */
+#define SHOW_INT(__FUNC, __VAR)                                                \
+static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
+{                                                                      \
+       struct deadline_data *dd = e->elevator_data;                    \
+                                                                       \
+       return sysfs_emit(page, "%d\n", __VAR);                         \
+}
+#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
+SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
+SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
+SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
+SHOW_INT(deadline_front_merges_show, dd->front_merges);
+SHOW_INT(deadline_async_depth_show, dd->front_merges);
+SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
+#undef SHOW_INT
+#undef SHOW_JIFFIES
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
+{                                                                      \
+       struct deadline_data *dd = e->elevator_data;                    \
+       int __data, __ret;                                              \
+                                                                       \
+       __ret = kstrtoint(page, 0, &__data);                            \
+       if (__ret < 0)                                                  \
+               return __ret;                                           \
+       if (__data < (MIN))                                             \
+               __data = (MIN);                                         \
+       else if (__data > (MAX))                                        \
+               __data = (MAX);                                         \
+       *(__PTR) = __CONV(__data);                                      \
+       return count;                                                   \
+}
+#define STORE_INT(__FUNC, __PTR, MIN, MAX)                             \
+       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
+#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)                         \
+       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
+STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
+STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
+STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
+STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
+STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
+STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
+#undef STORE_FUNCTION
+#undef STORE_INT
+#undef STORE_JIFFIES
+
+#define DD_ATTR(name) \
+       __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
+
+static struct elv_fs_entry deadline_attrs[] = {
+       DD_ATTR(read_expire),
+       DD_ATTR(write_expire),
+       DD_ATTR(writes_starved),
+       DD_ATTR(front_merges),
+       DD_ATTR(async_depth),
+       DD_ATTR(fifo_batch),
+       __ATTR_NULL
+};
+
+#ifdef CONFIG_BLK_DEBUG_FS
+#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)              \
+static void *deadline_##name##_fifo_start(struct seq_file *m,          \
+                                         loff_t *pos)                  \
+       __acquires(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       spin_lock(&dd->lock);                                           \
+       return seq_list_start(&per_prio->fifo_list[data_dir], *pos);    \
+}                                                                      \
+                                                                       \
+static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,  \
+                                        loff_t *pos)                   \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);   \
+}                                                                      \
+                                                                       \
+static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)   \
+       __releases(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+                                                                       \
+       spin_unlock(&dd->lock);                                         \
+}                                                                      \
+                                                                       \
+static const struct seq_operations deadline_##name##_fifo_seq_ops = {  \
+       .start  = deadline_##name##_fifo_start,                         \
+       .next   = deadline_##name##_fifo_next,                          \
+       .stop   = deadline_##name##_fifo_stop,                          \
+       .show   = blk_mq_debugfs_rq_show,                               \
+};                                                                     \
+                                                                       \
+static int deadline_##name##_next_rq_show(void *data,                  \
+                                         struct seq_file *m)           \
+{                                                                      \
+       struct request_queue *q = data;                                 \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+       struct request *rq = per_prio->next_rq[data_dir];               \
+                                                                       \
+       if (rq)                                                         \
+               __blk_mq_debugfs_rq_show(m, rq);                        \
+       return 0;                                                       \
+}
+
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
+#undef DEADLINE_DEBUGFS_DDIR_ATTRS
+
+static int deadline_batching_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u\n", dd->batching);
+       return 0;
+}
+
+static int deadline_starved_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u\n", dd->starved);
+       return 0;
+}
+
+static int dd_async_depth_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u\n", dd->async_depth);
+       return 0;
+}
+
+static int dd_queued_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
+                  dd_queued(dd, DD_BE_PRIO),
+                  dd_queued(dd, DD_IDLE_PRIO));
+       return 0;
+}
+
+/* Number of requests owned by the block driver for a given priority. */
+static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
+{
+       return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
+               - dd_sum(dd, completed, prio);
+}
+
+static int dd_owned_by_driver_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
+                  dd_owned_by_driver(dd, DD_BE_PRIO),
+                  dd_owned_by_driver(dd, DD_IDLE_PRIO));
+       return 0;
+}
+
+#define DEADLINE_DISPATCH_ATTR(prio)                                   \
+static void *deadline_dispatch##prio##_start(struct seq_file *m,       \
+                                            loff_t *pos)               \
+       __acquires(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       spin_lock(&dd->lock);                                           \
+       return seq_list_start(&per_prio->dispatch, *pos);               \
+}                                                                      \
+                                                                       \
+static void *deadline_dispatch##prio##_next(struct seq_file *m,                \
+                                           void *v, loff_t *pos)       \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       return seq_list_next(v, &per_prio->dispatch, pos);              \
+}                                                                      \
+                                                                       \
+static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)        \
+       __releases(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+                                                                       \
+       spin_unlock(&dd->lock);                                         \
+}                                                                      \
+                                                                       \
+static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
+       .start  = deadline_dispatch##prio##_start,                      \
+       .next   = deadline_dispatch##prio##_next,                       \
+       .stop   = deadline_dispatch##prio##_stop,                       \
+       .show   = blk_mq_debugfs_rq_show,                               \
+}
+
+DEADLINE_DISPATCH_ATTR(0);
+DEADLINE_DISPATCH_ATTR(1);
+DEADLINE_DISPATCH_ATTR(2);
+#undef DEADLINE_DISPATCH_ATTR
+
+#define DEADLINE_QUEUE_DDIR_ATTRS(name)                                        \
+       {#name "_fifo_list", 0400,                                      \
+                       .seq_ops = &deadline_##name##_fifo_seq_ops}
+#define DEADLINE_NEXT_RQ_ATTR(name)                                    \
+       {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
+static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
+       DEADLINE_QUEUE_DDIR_ATTRS(read0),
+       DEADLINE_QUEUE_DDIR_ATTRS(write0),
+       DEADLINE_QUEUE_DDIR_ATTRS(read1),
+       DEADLINE_QUEUE_DDIR_ATTRS(write1),
+       DEADLINE_QUEUE_DDIR_ATTRS(read2),
+       DEADLINE_QUEUE_DDIR_ATTRS(write2),
+       DEADLINE_NEXT_RQ_ATTR(read0),
+       DEADLINE_NEXT_RQ_ATTR(write0),
+       DEADLINE_NEXT_RQ_ATTR(read1),
+       DEADLINE_NEXT_RQ_ATTR(write1),
+       DEADLINE_NEXT_RQ_ATTR(read2),
+       DEADLINE_NEXT_RQ_ATTR(write2),
+       {"batching", 0400, deadline_batching_show},
+       {"starved", 0400, deadline_starved_show},
+       {"async_depth", 0400, dd_async_depth_show},
+       {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
+       {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
+       {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
+       {"owned_by_driver", 0400, dd_owned_by_driver_show},
+       {"queued", 0400, dd_queued_show},
+       {},
+};
+#undef DEADLINE_QUEUE_DDIR_ATTRS
+#endif
+
+static struct elevator_type mq_deadline = {
+       .ops = {
+               .depth_updated          = dd_depth_updated,
+               .limit_depth            = dd_limit_depth,
+               .insert_requests        = dd_insert_requests,
+               .dispatch_request       = dd_dispatch_request,
+               .prepare_request        = dd_prepare_request,
+               .finish_request         = dd_finish_request,
+               .next_request           = elv_rb_latter_request,
+               .former_request         = elv_rb_former_request,
+               .bio_merge              = dd_bio_merge,
+               .request_merge          = dd_request_merge,
+               .requests_merged        = dd_merged_requests,
+               .request_merged         = dd_request_merged,
+               .has_work               = dd_has_work,
+               .init_sched             = dd_init_sched,
+               .exit_sched             = dd_exit_sched,
+               .init_hctx              = dd_init_hctx,
+       },
+
+#ifdef CONFIG_BLK_DEBUG_FS
+       .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
+#endif
+       .elevator_attrs = deadline_attrs,
+       .elevator_name = "mq-deadline",
+       .elevator_alias = "deadline",
+       .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
+       .elevator_owner = THIS_MODULE,
+};
+MODULE_ALIAS("mq-deadline-iosched");
+
+static int __init deadline_init(void)
+{
+       return elv_register(&mq_deadline);
+}
+
+static void __exit deadline_exit(void)
+{
+       elv_unregister(&mq_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
+
+MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MQ deadline IO scheduler");
index 6e2a649..278593b 100644 (file)
@@ -264,7 +264,6 @@ config SYSV68_PARTITION
 
 config CMDLINE_PARTITION
        bool "Command line partition support" if PARTITION_ADVANCED
-       select BLK_CMDLINE_PARSER
        help
          Say Y here if you want to read the partition table from bootargs.
          The format for the command line is just like mtdparts.
index c64c57b..2c381c6 100644 (file)
@@ -275,7 +275,7 @@ int adfspart_check_ADFS(struct parsed_partitions *state)
        /*
         * Work out start of non-adfs partition.
         */
-       nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect;
+       nr_sects = get_capacity(state->disk) - start_sect;
 
        if (start_sect) {
                switch (id) {
@@ -540,7 +540,7 @@ int adfspart_check_EESOX(struct parsed_partitions *state)
        if (i != 0) {
                sector_t size;
 
-               size = get_capacity(state->bdev->bd_disk);
+               size = get_capacity(state->disk);
                put_partition(state, slot++, start, size - start);
                strlcat(state->pp_buf, "\n", PAGE_SIZE);
        }
index c7b4fd1..85f4b96 100644 (file)
@@ -66,22 +66,6 @@ struct pvd {
 
 #define LVM_MAXLVS 256
 
-/**
- * last_lba(): return number of last logical block of device
- * @bdev: block device
- *
- * Description: Returns last LBA value on success, 0 on error.
- * This is stored (by sd and ide-geometry) in
- *  the part[0] entry for this disk, and is the number of
- *  physical sectors available on the disk.
- */
-static u64 last_lba(struct block_device *bdev)
-{
-       if (!bdev || !bdev->bd_inode)
-               return 0;
-       return (bdev->bd_inode->i_size >> 9) - 1ULL;
-}
-
 /**
  * read_lba(): Read bytes from disk, starting at given LBA
  * @state
@@ -89,7 +73,7 @@ static u64 last_lba(struct block_device *bdev)
  * @buffer
  * @count
  *
- * Description:  Reads @count bytes from @state->bdev into @buffer.
+ * Description:  Reads @count bytes from @state->disk into @buffer.
  * Returns number of bytes read on success, 0 on error.
  */
 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
@@ -97,7 +81,7 @@ static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
 {
        size_t totalreadcount = 0;
 
-       if (!buffer || lba + count / 512 > last_lba(state->bdev))
+       if (!buffer || lba + count / 512 > get_capacity(state->disk) - 1ULL)
                return 0;
 
        while (count) {
index 9526491..5c8624e 100644 (file)
@@ -34,7 +34,6 @@ int amiga_partition(struct parsed_partitions *state)
        int start_sect, nr_sects, blk, part, res = 0;
        int blksize = 1;        /* Multiplier for disk block size */
        int slot = 1;
-       char b[BDEVNAME_SIZE];
 
        for (blk = 0; ; blk++, put_dev_sector(sect)) {
                if (blk == RDB_ALLOCATION_LIMIT)
@@ -42,7 +41,7 @@ int amiga_partition(struct parsed_partitions *state)
                data = read_part_sector(state, blk, &sect);
                if (!data) {
                        pr_err("Dev %s: unable to read RDB block %d\n",
-                              bdevname(state->bdev, b), blk);
+                              state->disk->disk_name, blk);
                        res = -1;
                        goto rdb_done;
                }
@@ -64,7 +63,7 @@ int amiga_partition(struct parsed_partitions *state)
                }
 
                pr_err("Dev %s: RDB in block %d has bad checksum\n",
-                      bdevname(state->bdev, b), blk);
+                      state->disk->disk_name, blk);
        }
 
        /* blksize is blocks per 512 byte standard block */
@@ -84,7 +83,7 @@ int amiga_partition(struct parsed_partitions *state)
                data = read_part_sector(state, blk, &sect);
                if (!data) {
                        pr_err("Dev %s: unable to read partition block %d\n",
-                              bdevname(state->bdev, b), blk);
+                              state->disk->disk_name, blk);
                        res = -1;
                        goto rdb_done;
                }
index 2305840..da59941 100644 (file)
@@ -47,7 +47,7 @@ int atari_partition(struct parsed_partitions *state)
         * ATARI partition scheme supports 512 lba only.  If this is not
         * the case, bail early to avoid miscalculating hd_size.
         */
-       if (bdev_logical_block_size(state->bdev) != 512)
+       if (queue_logical_block_size(state->disk->queue) != 512)
                return 0;
 
        rs = read_part_sector(state, 0, &sect);
@@ -55,7 +55,7 @@ int atari_partition(struct parsed_partitions *state)
                return -1;
 
        /* Verify this is an Atari rootsector: */
-       hd_size = state->bdev->bd_inode->i_size >> 9;
+       hd_size = get_capacity(state->disk);
        if (!VALID_PARTITION(&rs->part[0], hd_size) &&
            !VALID_PARTITION(&rs->part[1], hd_size) &&
            !VALID_PARTITION(&rs->part[2], hd_size) &&
index c577e9e..d5b28e3 100644 (file)
@@ -9,7 +9,7 @@
  * description.
  */
 struct parsed_partitions {
-       struct block_device *bdev;
+       struct gendisk *disk;
        char name[BDEVNAME_SIZE];
        struct {
                sector_t from;
index 8f545c3..1af610f 100644 (file)
  * For further information, see "Documentation/block/cmdline-partition.rst"
  *
  */
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "check.h"
 
-#include <linux/cmdline-parser.h>
 
-#include "check.h"
+/* partition flags */
+#define PF_RDONLY                   0x01 /* Device is read only */
+#define PF_POWERUP_LOCK             0x02 /* Always locked after reset */
+
+struct cmdline_subpart {
+       char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
+       sector_t from;
+       sector_t size;
+       int flags;
+       struct cmdline_subpart *next_subpart;
+};
+
+struct cmdline_parts {
+       char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
+       unsigned int nr_subparts;
+       struct cmdline_subpart *subpart;
+       struct cmdline_parts *next_parts;
+};
+
+static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+{
+       int ret = 0;
+       struct cmdline_subpart *new_subpart;
+
+       *subpart = NULL;
+
+       new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
+       if (!new_subpart)
+               return -ENOMEM;
+
+       if (*partdef == '-') {
+               new_subpart->size = (sector_t)(~0ULL);
+               partdef++;
+       } else {
+               new_subpart->size = (sector_t)memparse(partdef, &partdef);
+               if (new_subpart->size < (sector_t)PAGE_SIZE) {
+                       pr_warn("cmdline partition size is invalid.");
+                       ret = -EINVAL;
+                       goto fail;
+               }
+       }
+
+       if (*partdef == '@') {
+               partdef++;
+               new_subpart->from = (sector_t)memparse(partdef, &partdef);
+       } else {
+               new_subpart->from = (sector_t)(~0ULL);
+       }
+
+       if (*partdef == '(') {
+               int length;
+               char *next = strchr(++partdef, ')');
+
+               if (!next) {
+                       pr_warn("cmdline partition format is invalid.");
+                       ret = -EINVAL;
+                       goto fail;
+               }
+
+               length = min_t(int, next - partdef,
+                              sizeof(new_subpart->name) - 1);
+               strncpy(new_subpart->name, partdef, length);
+               new_subpart->name[length] = '\0';
+
+               partdef = ++next;
+       } else
+               new_subpart->name[0] = '\0';
+
+       new_subpart->flags = 0;
+
+       if (!strncmp(partdef, "ro", 2)) {
+               new_subpart->flags |= PF_RDONLY;
+               partdef += 2;
+       }
+
+       if (!strncmp(partdef, "lk", 2)) {
+               new_subpart->flags |= PF_POWERUP_LOCK;
+               partdef += 2;
+       }
+
+       *subpart = new_subpart;
+       return 0;
+fail:
+       kfree(new_subpart);
+       return ret;
+}
+
+static void free_subpart(struct cmdline_parts *parts)
+{
+       struct cmdline_subpart *subpart;
+
+       while (parts->subpart) {
+               subpart = parts->subpart;
+               parts->subpart = subpart->next_subpart;
+               kfree(subpart);
+       }
+}
+
+static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+{
+       int ret = -EINVAL;
+       char *next;
+       int length;
+       struct cmdline_subpart **next_subpart;
+       struct cmdline_parts *newparts;
+       char buf[BDEVNAME_SIZE + 32 + 4];
+
+       *parts = NULL;
+
+       newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
+       if (!newparts)
+               return -ENOMEM;
+
+       next = strchr(bdevdef, ':');
+       if (!next) {
+               pr_warn("cmdline partition has no block device.");
+               goto fail;
+       }
+
+       length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+       strncpy(newparts->name, bdevdef, length);
+       newparts->name[length] = '\0';
+       newparts->nr_subparts = 0;
+
+       next_subpart = &newparts->subpart;
+
+       while (next && *(++next)) {
+               bdevdef = next;
+               next = strchr(bdevdef, ',');
+
+               length = (!next) ? (sizeof(buf) - 1) :
+                       min_t(int, next - bdevdef, sizeof(buf) - 1);
+
+               strncpy(buf, bdevdef, length);
+               buf[length] = '\0';
+
+               ret = parse_subpart(next_subpart, buf);
+               if (ret)
+                       goto fail;
+
+               newparts->nr_subparts++;
+               next_subpart = &(*next_subpart)->next_subpart;
+       }
+
+       if (!newparts->subpart) {
+               pr_warn("cmdline partition has no valid partition.");
+               ret = -EINVAL;
+               goto fail;
+       }
+
+       *parts = newparts;
+
+       return 0;
+fail:
+       free_subpart(newparts);
+       kfree(newparts);
+       return ret;
+}
+
+static void cmdline_parts_free(struct cmdline_parts **parts)
+{
+       struct cmdline_parts *next_parts;
+
+       while (*parts) {
+               next_parts = (*parts)->next_parts;
+               free_subpart(*parts);
+               kfree(*parts);
+               *parts = next_parts;
+       }
+}
+
+static int cmdline_parts_parse(struct cmdline_parts **parts,
+               const char *cmdline)
+{
+       int ret;
+       char *buf;
+       char *pbuf;
+       char *next;
+       struct cmdline_parts **next_parts;
+
+       *parts = NULL;
+
+       next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       next_parts = parts;
+
+       while (next && *pbuf) {
+               next = strchr(pbuf, ';');
+               if (next)
+                       *next = '\0';
+
+               ret = parse_parts(next_parts, pbuf);
+               if (ret)
+                       goto fail;
+
+               if (next)
+                       pbuf = ++next;
+
+               next_parts = &(*next_parts)->next_parts;
+       }
+
+       if (!*parts) {
+               pr_warn("cmdline partition has no valid partition.");
+               ret = -EINVAL;
+               goto fail;
+       }
+
+       ret = 0;
+done:
+       kfree(buf);
+       return ret;
+
+fail:
+       cmdline_parts_free(parts);
+       goto done;
+}
+
+static struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+                                        const char *bdev)
+{
+       while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
+               parts = parts->next_parts;
+       return parts;
+}
 
 static char *cmdline;
 static struct cmdline_parts *bdev_parts;
 
-static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
+static int add_part(int slot, struct cmdline_subpart *subpart,
+               struct parsed_partitions *state)
 {
        int label_min;
        struct partition_meta_info *info;
        char tmp[sizeof(info->volname) + 4];
-       struct parsed_partitions *state = (struct parsed_partitions *)param;
 
        if (slot >= state->limit)
                return 1;
@@ -50,6 +278,35 @@ static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
        return 0;
 }
 
+static int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+               struct parsed_partitions *state)
+{
+       sector_t from = 0;
+       struct cmdline_subpart *subpart;
+       int slot = 1;
+
+       for (subpart = parts->subpart; subpart;
+            subpart = subpart->next_subpart, slot++) {
+               if (subpart->from == (sector_t)(~0ULL))
+                       subpart->from = from;
+               else
+                       from = subpart->from;
+
+               if (from >= disk_size)
+                       break;
+
+               if (subpart->size > (disk_size - from))
+                       subpart->size = disk_size - from;
+
+               from += subpart->size;
+
+               if (add_part(slot, subpart, state))
+                       break;
+       }
+
+       return slot;
+}
+
 static int __init cmdline_parts_setup(char *s)
 {
        cmdline = s;
@@ -123,7 +380,6 @@ static void cmdline_parts_verifier(int slot, struct parsed_partitions *state)
 int cmdline_partition(struct parsed_partitions *state)
 {
        sector_t disk_size;
-       char bdev[BDEVNAME_SIZE];
        struct cmdline_parts *parts;
 
        if (cmdline) {
@@ -140,14 +396,13 @@ int cmdline_partition(struct parsed_partitions *state)
        if (!bdev_parts)
                return 0;
 
-       bdevname(state->bdev, bdev);
-       parts = cmdline_parts_find(bdev_parts, bdev);
+       parts = cmdline_parts_find(bdev_parts, state->disk->disk_name);
        if (!parts)
                return 0;
 
-       disk_size = get_capacity(state->bdev->bd_disk) << 9;
+       disk_size = get_capacity(state->disk) << 9;
 
-       cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+       cmdline_parts_set(parts, disk_size, state);
        cmdline_parts_verifier(1, state);
 
        strlcat(state->pp_buf, "\n", PAGE_SIZE);
index 4230d4f..58c4c36 100644 (file)
@@ -135,8 +135,8 @@ static struct parsed_partitions *check_partition(struct gendisk *hd)
        }
        state->pp_buf[0] = '\0';
 
-       state->bdev = hd->part0;
-       disk_name(hd, 0, state->name);
+       state->disk = hd;
+       snprintf(state->name, BDEVNAME_SIZE, "%s", hd->disk_name);
        snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
        if (isdigit(state->name[strlen(state->name)-1]))
                sprintf(state->name, "p");
@@ -259,9 +259,8 @@ static const struct attribute_group *part_attr_groups[] = {
 
 static void part_release(struct device *dev)
 {
-       if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
-               blk_free_ext_minor(MINOR(dev->devt));
-       bdput(dev_to_bdev(dev));
+       put_disk(dev_to_bdev(dev)->bd_disk);
+       iput(dev_to_bdev(dev)->bd_inode);
 }
 
 static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -281,12 +280,10 @@ struct device_type part_type = {
        .uevent         = part_uevent,
 };
 
-/*
- * Must be called either with open_mutex held, before a disk can be opened or
- * after all disk users are gone.
- */
 static void delete_partition(struct block_device *part)
 {
+       lockdep_assert_held(&part->bd_disk->open_mutex);
+
        fsync_bdev(part);
        __invalidate_device(part, true);
 
@@ -351,20 +348,17 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
        if (xa_load(&disk->part_tbl, partno))
                return ERR_PTR(-EBUSY);
 
+       /* ensure we always have a reference to the whole disk */
+       get_device(disk_to_dev(disk));
+
+       err = -ENOMEM;
        bdev = bdev_alloc(disk, partno);
        if (!bdev)
-               return ERR_PTR(-ENOMEM);
+               goto out_put_disk;
 
        bdev->bd_start_sect = start;
        bdev_set_nr_sectors(bdev, len);
 
-       if (info) {
-               err = -ENOMEM;
-               bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
-               if (!bdev->bd_meta_info)
-                       goto out_bdput;
-       }
-
        pdev = &bdev->bd_device;
        dname = dev_name(ddev);
        if (isdigit(dname[strlen(dname) - 1]))
@@ -388,6 +382,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
        }
        pdev->devt = devt;
 
+       if (info) {
+               err = -ENOMEM;
+               bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
+               if (!bdev->bd_meta_info)
+                       goto out_put;
+       }
+
        /* delay uevent until 'holders' subdir is created */
        dev_set_uevent_suppress(pdev, 1);
        err = device_add(pdev);
@@ -417,14 +418,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
                kobject_uevent(&pdev->kobj, KOBJ_ADD);
        return bdev;
 
-out_bdput:
-       bdput(bdev);
-       return ERR_PTR(err);
 out_del:
        kobject_put(bdev->bd_holder_dir);
        device_del(pdev);
 out_put:
        put_device(pdev);
+out_put_disk:
+       put_disk(disk);
        return ERR_PTR(err);
 }
 
@@ -449,15 +449,14 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
        return overlap;
 }
 
-int bdev_add_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length)
+int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length)
 {
        struct block_device *part;
-       struct gendisk *disk = bdev->bd_disk;
        int ret;
 
        mutex_lock(&disk->open_mutex);
-       if (!(disk->flags & GENHD_FL_UP)) {
+       if (!disk_live(disk)) {
                ret = -ENXIO;
                goto out;
        }
@@ -475,13 +474,13 @@ out:
        return ret;
 }
 
-int bdev_del_partition(struct block_device *bdev, int partno)
+int bdev_del_partition(struct gendisk *disk, int partno)
 {
        struct block_device *part = NULL;
        int ret = -ENXIO;
 
-       mutex_lock(&bdev->bd_disk->open_mutex);
-       part = xa_load(&bdev->bd_disk->part_tbl, partno);
+       mutex_lock(&disk->open_mutex);
+       part = xa_load(&disk->part_tbl, partno);
        if (!part)
                goto out_unlock;
 
@@ -492,18 +491,18 @@ int bdev_del_partition(struct block_device *bdev, int partno)
        delete_partition(part);
        ret = 0;
 out_unlock:
-       mutex_unlock(&bdev->bd_disk->open_mutex);
+       mutex_unlock(&disk->open_mutex);
        return ret;
 }
 
-int bdev_resize_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length)
+int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length)
 {
        struct block_device *part = NULL;
        int ret = -ENXIO;
 
-       mutex_lock(&bdev->bd_disk->open_mutex);
-       part = xa_load(&bdev->bd_disk->part_tbl, partno);
+       mutex_lock(&disk->open_mutex);
+       part = xa_load(&disk->part_tbl, partno);
        if (!part)
                goto out_unlock;
 
@@ -512,14 +511,14 @@ int bdev_resize_partition(struct block_device *bdev, int partno,
                goto out_unlock;
 
        ret = -EBUSY;
-       if (partition_overlaps(bdev->bd_disk, start, length, partno))
+       if (partition_overlaps(disk, start, length, partno))
                goto out_unlock;
 
        bdev_set_nr_sectors(part, length);
 
        ret = 0;
 out_unlock:
-       mutex_unlock(&bdev->bd_disk->open_mutex);
+       mutex_unlock(&disk->open_mutex);
        return ret;
 }
 
@@ -667,7 +666,7 @@ int bdev_disk_changed(struct gendisk *disk, bool invalidate)
 
        lockdep_assert_held(&disk->open_mutex);
 
-       if (!(disk->flags & GENHD_FL_UP))
+       if (!disk_live(disk))
                return -ENXIO;
 
 rescan:
@@ -715,10 +714,10 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
 
 void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
 {
-       struct address_space *mapping = state->bdev->bd_inode->i_mapping;
+       struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
        struct page *page;
 
-       if (n >= get_capacity(state->bdev->bd_disk)) {
+       if (n >= get_capacity(state->disk)) {
                state->access_beyond_eod = true;
                return NULL;
        }
index e271679..7ca5c4c 100644 (file)
@@ -124,19 +124,17 @@ efi_crc32(const void *buf, unsigned long len)
 
 /**
  * last_lba(): return number of last logical block of device
- * @bdev: block device
+ * @disk: block device
  * 
  * Description: Returns last LBA value on success, 0 on error.
  * This is stored (by sd and ide-geometry) in
  *  the part[0] entry for this disk, and is the number of
  *  physical sectors available on the disk.
  */
-static u64 last_lba(struct block_device *bdev)
+static u64 last_lba(struct gendisk *disk)
 {
-       if (!bdev || !bdev->bd_inode)
-               return 0;
-       return div_u64(bdev->bd_inode->i_size,
-                      bdev_logical_block_size(bdev)) - 1ULL;
+       return div_u64(disk->part0->bd_inode->i_size,
+                      queue_logical_block_size(disk->queue)) - 1ULL;
 }
 
 static inline int pmbr_part_valid(gpt_mbr_record *part)
@@ -231,17 +229,17 @@ done:
  * @buffer: destination buffer
  * @count: bytes to read
  *
- * Description: Reads @count bytes from @state->bdev into @buffer.
+ * Description: Reads @count bytes from @state->disk into @buffer.
  * Returns number of bytes read on success, 0 on error.
  */
 static size_t read_lba(struct parsed_partitions *state,
                       u64 lba, u8 *buffer, size_t count)
 {
        size_t totalreadcount = 0;
-       struct block_device *bdev = state->bdev;
-       sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
+       sector_t n = lba *
+               (queue_logical_block_size(state->disk->queue) / 512);
 
-       if (!buffer || lba > last_lba(bdev))
+       if (!buffer || lba > last_lba(state->disk))
                 return 0;
 
        while (count) {
@@ -302,14 +300,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
  * @lba: the Logical Block Address of the partition table
  * 
  * Description: returns GPT header on success, NULL on error.   Allocates
- * and fills a GPT header starting at @ from @state->bdev.
+ * and fills a GPT header starting at @ from @state->disk.
  * Note: remember to free gpt when finished with it.
  */
 static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
                                         u64 lba)
 {
        gpt_header *gpt;
-       unsigned ssz = bdev_logical_block_size(state->bdev);
+       unsigned ssz = queue_logical_block_size(state->disk->queue);
 
        gpt = kmalloc(ssz, GFP_KERNEL);
        if (!gpt)
@@ -356,10 +354,10 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
 
        /* Check the GUID Partition Table header size is too big */
        if (le32_to_cpu((*gpt)->header_size) >
-                       bdev_logical_block_size(state->bdev)) {
+                       queue_logical_block_size(state->disk->queue)) {
                pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
                        le32_to_cpu((*gpt)->header_size),
-                       bdev_logical_block_size(state->bdev));
+                       queue_logical_block_size(state->disk->queue));
                goto fail;
        }
 
@@ -395,7 +393,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
        /* Check the first_usable_lba and last_usable_lba are
         * within the disk.
         */
-       lastlba = last_lba(state->bdev);
+       lastlba = last_lba(state->disk);
        if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
                pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
                         (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
@@ -587,13 +585,15 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
        gpt_header *pgpt = NULL, *agpt = NULL;
        gpt_entry *pptes = NULL, *aptes = NULL;
        legacy_mbr *legacymbr;
-       sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
+       struct gendisk *disk = state->disk;
+       const struct block_device_operations *fops = disk->fops;
+       sector_t total_sectors = get_capacity(state->disk);
        u64 lastlba;
 
        if (!ptes)
                return 0;
 
-       lastlba = last_lba(state->bdev);
+       lastlba = last_lba(state->disk);
         if (!force_gpt) {
                /* This will be added to the EFI Spec. per Intel after v1.02. */
                legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
@@ -621,6 +621,16 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
         if (!good_agpt && force_gpt)
                 good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
 
+       if (!good_agpt && force_gpt && fops->alternative_gpt_sector) {
+               sector_t agpt_sector;
+               int err;
+
+               err = fops->alternative_gpt_sector(disk, &agpt_sector);
+               if (!err)
+                       good_agpt = is_gpt_valid(state, agpt_sector,
+                                                &agpt, &aptes);
+       }
+
         /* The obviously unsuccessful case */
         if (!good_pgpt && !good_agpt)
                 goto fail;
@@ -705,7 +715,7 @@ int efi_partition(struct parsed_partitions *state)
        gpt_header *gpt = NULL;
        gpt_entry *ptes = NULL;
        u32 i;
-       unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
+       unsigned ssz = queue_logical_block_size(state->disk->queue) / 512;
 
        if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
                kfree(gpt);
@@ -722,7 +732,7 @@ int efi_partition(struct parsed_partitions *state)
                u64 size = le64_to_cpu(ptes[i].ending_lba) -
                           le64_to_cpu(ptes[i].starting_lba) + 1ULL;
 
-               if (!is_pte_valid(&ptes[i], last_lba(state->bdev)))
+               if (!is_pte_valid(&ptes[i], last_lba(state->disk)))
                        continue;
 
                put_partition(state, i+1, start * ssz, size * ssz);
index 4b044e6..9bca396 100644 (file)
@@ -290,8 +290,8 @@ static int find_cms1_partitions(struct parsed_partitions *state,
 int ibm_partition(struct parsed_partitions *state)
 {
        int (*fn)(struct gendisk *disk, dasd_information2_t *info);
-       struct block_device *bdev = state->bdev;
-       struct gendisk *disk = bdev->bd_disk;
+       struct gendisk *disk = state->disk;
+       struct block_device *bdev = disk->part0;
        int blocksize, res;
        loff_t i_size, offset, size;
        dasd_information2_t *info;
index cc86534..27f6c7d 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
  * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
  *
  * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
@@ -304,7 +304,7 @@ static bool ldm_validate_privheads(struct parsed_partitions *state,
                }
        }
 
-       num_sects = state->bdev->bd_inode->i_size >> 9;
+       num_sects = get_capacity(state->disk);
 
        if ((ph[0]->config_start > num_sects) ||
           ((ph[0]->config_start + ph[0]->config_size) > num_sects)) {
@@ -339,11 +339,11 @@ out:
 /**
  * ldm_validate_tocblocks - Validate the table of contents and its backups
  * @state: Partition check state including device holding the LDM Database
- * @base:  Offset, into @state->bdev, of the database
+ * @base:  Offset, into @state->disk, of the database
  * @ldb:   Cache of the database structures
  *
  * Find and compare the four tables of contents of the LDM Database stored on
- * @state->bdev and return the parsed information into @toc1.
+ * @state->disk and return the parsed information into @toc1.
  *
  * The offsets and sizes of the configs are range-checked against a privhead.
  *
@@ -486,8 +486,8 @@ out:
  *       only likely to happen if the underlying device is strange.  If that IS
  *       the case we should return zero to let someone else try.
  *
- * Return:  'true'   @state->bdev is a dynamic disk
- *          'false'  @state->bdev is not a dynamic disk, or an error occurred
+ * Return:  'true'   @state->disk is a dynamic disk
+ *          'false'  @state->disk is not a dynamic disk, or an error occurred
  */
 static bool ldm_validate_partition_table(struct parsed_partitions *state)
 {
@@ -1340,7 +1340,7 @@ static bool ldm_frag_commit (struct list_head *frags, struct ldmdb *ldb)
 /**
  * ldm_get_vblks - Read the on-disk database of VBLKs into memory
  * @state: Partition check state including device holding the LDM Database
- * @base:  Offset, into @state->bdev, of the database
+ * @base:  Offset, into @state->disk, of the database
  * @ldb:   Cache of the database structures
  *
  * To use the information from the VBLKs, they need to be read from the disk,
@@ -1432,10 +1432,10 @@ static void ldm_free_vblks (struct list_head *lh)
  * example, if the device is hda, we would have: hda1: LDM database, hda2, hda3,
  * and so on: the actual data containing partitions.
  *
- * Return:  1 Success, @state->bdev is a dynamic disk and we handled it
- *          0 Success, @state->bdev is not a dynamic disk
+ * Return:  1 Success, @state->disk is a dynamic disk and we handled it
+ *          0 Success, @state->disk is not a dynamic disk
  *         -1 An error occurred before enough information had been read
- *            Or @state->bdev is a dynamic disk, but it may be corrupted
+ *            Or @state->disk is a dynamic disk, but it may be corrupted
  */
 int ldm_partition(struct parsed_partitions *state)
 {
index b609533..7b521df 100644 (file)
@@ -133,7 +133,7 @@ int mac_partition(struct parsed_partitions *state)
        }
 #ifdef CONFIG_PPC_PMAC
        if (found_root_goodness)
-               note_bootable_part(state->bdev->bd_dev, found_root,
+               note_bootable_part(state->disk->part0->bd_dev, found_root,
                                   found_root_goodness);
 #endif
 
index f510259..b5d5c22 100644 (file)
@@ -135,11 +135,12 @@ static void parse_extended(struct parsed_partitions *state,
        Sector sect;
        unsigned char *data;
        sector_t this_sector, this_size;
-       sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
+       sector_t sector_size;
        int loopct = 0;         /* number of links followed
                                   without finding a data partition */
        int i;
 
+       sector_size = queue_logical_block_size(state->disk->queue) / 512;
        this_sector = first_sector;
        this_size = first_size;
 
@@ -579,7 +580,7 @@ static struct {
 
 int msdos_partition(struct parsed_partitions *state)
 {
-       sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
+       sector_t sector_size;
        Sector sect;
        unsigned char *data;
        struct msdos_partition *p;
@@ -587,6 +588,7 @@ int msdos_partition(struct parsed_partitions *state)
        int slot;
        u32 disksig;
 
+       sector_size = queue_logical_block_size(state->disk->queue) / 512;
        data = read_part_sector(state, 0, &sect);
        if (!data)
                return -1;
index 4273f1b..9cc6b8c 100644 (file)
@@ -43,7 +43,6 @@ int sgi_partition(struct parsed_partitions *state)
        Sector sect;
        struct sgi_disklabel *label;
        struct sgi_partition *p;
-       char b[BDEVNAME_SIZE];
 
        label = read_part_sector(state, 0, &sect);
        if (!label)
@@ -52,7 +51,7 @@ int sgi_partition(struct parsed_partitions *state)
        magic = label->magic_mushroom;
        if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) {
                /*printk("Dev %s SGI disklabel: bad magic %08x\n",
-                      bdevname(bdev, b), be32_to_cpu(magic));*/
+                      state->disk->disk_name, be32_to_cpu(magic));*/
                put_dev_sector(sect);
                return 0;
        }
@@ -63,7 +62,7 @@ int sgi_partition(struct parsed_partitions *state)
        }
        if(csum) {
                printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n",
-                      bdevname(state->bdev, b));
+                      state->disk->disk_name);
                put_dev_sector(sect);
                return 0;
        }
index 47dc53e..ddf9e6d 100644 (file)
@@ -65,7 +65,6 @@ int sun_partition(struct parsed_partitions *state)
        } * label;
        struct sun_partition *p;
        unsigned long spc;
-       char b[BDEVNAME_SIZE];
        int use_vtoc;
        int nparts;
 
@@ -76,7 +75,7 @@ int sun_partition(struct parsed_partitions *state)
        p = label->partitions;
        if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
 /*             printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
-                      bdevname(bdev, b), be16_to_cpu(label->magic)); */
+                      state->disk->disk_name, be16_to_cpu(label->magic)); */
                put_dev_sector(sect);
                return 0;
        }
@@ -86,7 +85,7 @@ int sun_partition(struct parsed_partitions *state)
                csum ^= *ush--;
        if (csum) {
                printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
-                      bdevname(state->bdev, b));
+                      state->disk->disk_name);
                put_dev_sector(sect);
                return 0;
        }
index d910534..00c203b 100644 (file)
@@ -147,11 +147,10 @@ static void t10_pi_type1_prepare(struct request *rq)
                        break;
 
                bip_for_each_vec(iv, bip, iter) {
-                       void *p, *pmap;
                        unsigned int j;
+                       void *p;
 
-                       pmap = kmap_atomic(iv.bv_page);
-                       p = pmap + iv.bv_offset;
+                       p = bvec_kmap_local(&iv);
                        for (j = 0; j < iv.bv_len; j += tuple_sz) {
                                struct t10_pi_tuple *pi = p;
 
@@ -161,8 +160,7 @@ static void t10_pi_type1_prepare(struct request *rq)
                                ref_tag++;
                                p += tuple_sz;
                        }
-
-                       kunmap_atomic(pmap);
+                       kunmap_local(p);
                }
 
                bip->bip_flags |= BIP_MAPPED_INTEGRITY;
@@ -195,11 +193,10 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
                struct bvec_iter iter;
 
                bip_for_each_vec(iv, bip, iter) {
-                       void *p, *pmap;
                        unsigned int j;
+                       void *p;
 
-                       pmap = kmap_atomic(iv.bv_page);
-                       p = pmap + iv.bv_offset;
+                       p = bvec_kmap_local(&iv);
                        for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
                                struct t10_pi_tuple *pi = p;
 
@@ -210,8 +207,7 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
                                intervals--;
                                p += tuple_sz;
                        }
-
-                       kunmap_atomic(pmap);
+                       kunmap_local(p);
                }
        }
 }
index f4e6111..ae7f2e8 100644 (file)
@@ -15,6 +15,32 @@ config MODULE_SIG_KEY
          then the kernel will automatically generate the private key and
          certificate as described in Documentation/admin-guide/module-signing.rst
 
+choice
+       prompt "Type of module signing key to be generated"
+       default MODULE_SIG_KEY_TYPE_RSA
+       help
+        The type of module signing key type to generate. This option
+        does not apply if a #PKCS11 URI is used.
+
+config MODULE_SIG_KEY_TYPE_RSA
+       bool "RSA"
+       depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
+       help
+        Use an RSA key for module signing.
+
+config MODULE_SIG_KEY_TYPE_ECDSA
+       bool "ECDSA"
+       select CRYPTO_ECDSA
+       depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
+       help
+        Use an elliptic curve key (NIST P384) for module signing. Consider
+        using a strong hash like sha256 or sha384 for hashing modules.
+
+        Note: Remove all ECDSA signing keys, e.g. certs/signing_key.pem,
+        when falling back to building Linux 5.14 and older kernels.
+
+endchoice
+
 config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
        depends on KEYS
index 359239a..2794337 100644 (file)
@@ -57,11 +57,31 @@ endif
 redirect_openssl       = 2>&1
 quiet_redirect_openssl = 2>&1
 silent_redirect_openssl = 2>/dev/null
+openssl_available       = $(shell openssl help 2>/dev/null && echo yes)
 
 # We do it this way rather than having a boolean option for enabling an
 # external private key, because 'make randconfig' might enable such a
 # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
 ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
+
+ifeq ($(openssl_available),yes)
+X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
+endif
+
+# Support user changing key type
+ifdef CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
+keytype_openssl = -newkey ec -pkeyopt ec_paramgen_curve:secp384r1
+ifeq ($(openssl_available),yes)
+$(if $(findstring id-ecPublicKey,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
+endif
+endif # CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
+
+ifdef CONFIG_MODULE_SIG_KEY_TYPE_RSA
+ifeq ($(openssl_available),yes)
+$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
+endif
+endif # CONFIG_MODULE_SIG_KEY_TYPE_RSA
+
 $(obj)/signing_key.pem: $(obj)/x509.genkey
        @$(kecho) "###"
        @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
@@ -75,6 +95,7 @@ $(obj)/signing_key.pem: $(obj)/x509.genkey
                -batch -x509 -config $(obj)/x509.genkey \
                -outform PEM -out $(obj)/signing_key.pem \
                -keyout $(obj)/signing_key.pem \
+               $(keytype_openssl) \
                $($(quiet)redirect_openssl)
        @$(kecho) "###"
        @$(kecho) "### Key pair generated."
index ca3b02d..536df4b 100644 (file)
@@ -1547,6 +1547,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
 config CRYPTO_SM4
        tristate "SM4 cipher algorithm"
        select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
        help
          SM4 cipher algorithms (OSCCA GB/T 32907-2016).
 
@@ -1569,6 +1570,49 @@ config CRYPTO_SM4
 
          If unsure, say N.
 
+config CRYPTO_SM4_AESNI_AVX_X86_64
+       tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX)"
+       depends on X86 && 64BIT
+       select CRYPTO_SKCIPHER
+       select CRYPTO_SIMD
+       select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
+       help
+         SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX).
+
+         SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+         Organization of State Commercial Administration of China (OSCCA)
+         as an authorized cryptographic algorithms for the use within China.
+
+         This is SM4 optimized implementation using AES-NI/AVX/x86_64
+         instruction set for block cipher. Through two affine transforms,
+         we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+         effect of instruction acceleration.
+
+         If unsure, say N.
+
+config CRYPTO_SM4_AESNI_AVX2_X86_64
+       tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX2)"
+       depends on X86 && 64BIT
+       select CRYPTO_SKCIPHER
+       select CRYPTO_SIMD
+       select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
+       select CRYPTO_SM4_AESNI_AVX_X86_64
+       help
+         SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2).
+
+         SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+         Organization of State Commercial Administration of China (OSCCA)
+         as an authorized cryptographic algorithms for the use within China.
+
+         This is SM4 optimized implementation using AES-NI/AVX2/x86_64
+         instruction set for block cipher. Through two affine transforms,
+         we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+         effect of instruction acceleration.
+
+         If unsure, say N.
+
 config CRYPTO_TEA
        tristate "TEA, XTEA and XETA cipher algorithms"
        depends on CRYPTO_USER_API_ENABLE_OBSOLETE
@@ -1768,7 +1812,7 @@ config CRYPTO_DRBG_HMAC
        bool
        default y
        select CRYPTO_HMAC
-       select CRYPTO_SHA256
+       select CRYPTO_SHA512
 
 config CRYPTO_DRBG_HASH
        bool "Enable Hash DRBG"
index 10526d4..c633f15 100644 (file)
@@ -74,7 +74,6 @@ obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
 obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
-obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
 obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
 obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
index 967329e..6592279 100644 (file)
@@ -269,6 +269,14 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
                ctx->sinfo->sig->pkey_algo = "rsa";
                ctx->sinfo->sig->encoding = "pkcs1";
                break;
+       case OID_id_ecdsa_with_sha1:
+       case OID_id_ecdsa_with_sha224:
+       case OID_id_ecdsa_with_sha256:
+       case OID_id_ecdsa_with_sha384:
+       case OID_id_ecdsa_with_sha512:
+               ctx->sinfo->sig->pkey_algo = "ecdsa";
+               ctx->sinfo->sig->encoding = "x962";
+               break;
        default:
                printk("Unsupported pkey algo: %u\n", ctx->last_oid);
                return -ENOPKG;
index a006132..1350e8e 100644 (file)
@@ -27,6 +27,7 @@
 #define _CRYPTO_ECC_H
 
 #include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
 
 /* One digit is u64 qword. */
 #define ECC_CURVE_NIST_P192_DIGITS  3
  * @out:      Output array
  * @ndigits:  Number of digits to copy
  */
-static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
 {
        const __be64 *src = (__force __be64 *)in;
        int i;
 
        for (i = 0; i < ndigits; i++)
-               out[i] = be64_to_cpu(src[ndigits - 1 - i]);
+               out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
 }
 
 /**
index c72d72a..be70e76 100644 (file)
@@ -143,9 +143,6 @@ sha512_transform(u64 *state, const u8 *input)
 
        state[0] += a; state[1] += b; state[2] += c; state[3] += d;
        state[4] += e; state[5] += f; state[6] += g; state[7] += h;
-
-       /* erase our data */
-       a = b = c = d = e = f = g = h = t1 = t2 = 0;
 }
 
 static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
index a153762..4182111 100644 (file)
@@ -431,7 +431,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
 
 static int skcipher_walk_first(struct skcipher_walk *walk)
 {
-       if (WARN_ON_ONCE(in_irq()))
+       if (WARN_ON_ONCE(in_hardirq()))
                return -EDEADLK;
 
        walk->buffer = NULL;
index 016dbc5..4a6480a 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
-static const u32 fk[4] = {
-       0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
-};
-
-static const u8 sbox[256] = {
-       0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
-       0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
-       0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
-       0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
-       0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
-       0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
-       0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
-       0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
-       0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
-       0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
-       0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
-       0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
-       0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
-       0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
-       0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
-       0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
-       0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
-       0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
-       0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
-       0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
-       0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
-       0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
-       0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
-       0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
-       0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
-       0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
-       0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
-       0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
-       0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
-       0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
-       0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
-       0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
-};
-
-static const u32 ck[] = {
-       0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
-       0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
-       0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
-       0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
-       0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
-       0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
-       0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
-       0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
-};
-
-static u32 sm4_t_non_lin_sub(u32 x)
-{
-       int i;
-       u8 *b = (u8 *)&x;
-
-       for (i = 0; i < 4; ++i)
-               b[i] = sbox[b[i]];
-
-       return x;
-}
-
-static u32 sm4_key_lin_sub(u32 x)
-{
-       return x ^ rol32(x, 13) ^ rol32(x, 23);
-
-}
-
-static u32 sm4_enc_lin_sub(u32 x)
-{
-       return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
-}
-
-static u32 sm4_key_sub(u32 x)
-{
-       return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static u32 sm4_enc_sub(u32 x)
-{
-       return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static u32 sm4_round(const u32 *x, const u32 rk)
-{
-       return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk);
-}
-
-
 /**
- * crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016
- * @ctx:       The location where the computed key will be stored.
- * @in_key:    The supplied key.
- * @key_len:   The length of the supplied key.
- *
- * Returns 0 on success. The function fails only if an invalid key size (or
- * pointer) is supplied.
- */
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
-                         unsigned int key_len)
-{
-       u32 rk[4], t;
-       const u32 *key = (u32 *)in_key;
-       int i;
-
-       if (key_len != SM4_KEY_SIZE)
-               return -EINVAL;
-
-       for (i = 0; i < 4; ++i)
-               rk[i] = get_unaligned_be32(&key[i]) ^ fk[i];
-
-       for (i = 0; i < 32; ++i) {
-               t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
-               ctx->rkey_enc[i] = t;
-               rk[0] = rk[1];
-               rk[1] = rk[2];
-               rk[2] = rk[3];
-               rk[3] = t;
-       }
-
-       for (i = 0; i < 32; ++i)
-               ctx->rkey_dec[i] = ctx->rkey_enc[31 - i];
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
-
-/**
- * crypto_sm4_set_key - Set the SM4 key.
+ * sm4_setkey - Set the SM4 key.
  * @tfm:       The %crypto_tfm that is used in the context.
  * @in_key:    The input key.
  * @key_len:   The size of the key.
  *
- * This function uses crypto_sm4_expand_key() to expand the key.
- * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
+ * This function uses sm4_expandkey() to expand the key.
+ * &sm4_ctx _must_ be the private data embedded in @tfm which is
  * retrieved with crypto_tfm_ctx().
  *
  * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
  */
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int sm4_setkey(struct crypto_tfm *tfm, const u8 *in_key,
                       unsigned int key_len)
 {
-       struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       return crypto_sm4_expand_key(ctx, in_key, key_len);
-}
-EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
-
-static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
-{
-       u32 x[4], i, t;
-
-       for (i = 0; i < 4; ++i)
-               x[i] = get_unaligned_be32(&in[i]);
-
-       for (i = 0; i < 32; ++i) {
-               t = sm4_round(x, rk[i]);
-               x[0] = x[1];
-               x[1] = x[2];
-               x[2] = x[3];
-               x[3] = t;
-       }
+       struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       for (i = 0; i < 4; ++i)
-               put_unaligned_be32(x[3 - i], &out[i]);
+       return sm4_expandkey(ctx, in_key, key_len);
 }
 
 /* encrypt a block of text */
 
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
+       sm4_crypt_block(ctx->rkey_enc, out, in);
 }
-EXPORT_SYMBOL_GPL(crypto_sm4_encrypt);
 
 /* decrypt a block of text */
 
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
+       sm4_crypt_block(ctx->rkey_dec, out, in);
 }
-EXPORT_SYMBOL_GPL(crypto_sm4_decrypt);
 
 static struct crypto_alg sm4_alg = {
        .cra_name               =       "sm4",
@@ -208,15 +60,15 @@ static struct crypto_alg sm4_alg = {
        .cra_priority           =       100,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       SM4_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypto_sm4_ctx),
+       .cra_ctxsize            =       sizeof(struct sm4_ctx),
        .cra_module             =       THIS_MODULE,
        .cra_u                  =       {
                .cipher = {
                        .cia_min_keysize        =       SM4_KEY_SIZE,
                        .cia_max_keysize        =       SM4_KEY_SIZE,
-                       .cia_setkey             =       crypto_sm4_set_key,
-                       .cia_encrypt            =       crypto_sm4_encrypt,
-                       .cia_decrypt            =       crypto_sm4_decrypt
+                       .cia_setkey             =       sm4_setkey,
+                       .cia_encrypt            =       sm4_encrypt,
+                       .cia_decrypt            =       sm4_decrypt
                }
        }
 };
index f8d06da..82b0400 100644 (file)
@@ -77,7 +77,7 @@ static const char *check[] = {
        NULL
 };
 
-static const int block_sizes[] = { 16, 64, 256, 1024, 1420, 4096, 0 };
+static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 };
 static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
 
 #define XBUFSIZE 8
@@ -290,6 +290,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
        }
 
        ret = crypto_aead_setauthsize(tfm, authsize);
+       if (ret) {
+               pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
+                      ret);
+               goto out_free_tfm;
+       }
 
        for (i = 0; i < num_mb; ++i)
                if (testmgr_alloc_buf(data[i].xbuf)) {
@@ -315,7 +320,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
        for (i = 0; i < num_mb; ++i) {
                data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
                if (!data[i].req) {
-                       pr_err("alg: skcipher: Failed to allocate request for %s\n",
+                       pr_err("alg: aead: Failed to allocate request for %s\n",
                               algo);
                        while (i--)
                                aead_request_free(data[i].req);
@@ -567,13 +572,19 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
        sgout = &sg[9];
 
        tfm = crypto_alloc_aead(algo, 0, 0);
-
        if (IS_ERR(tfm)) {
                pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
                       PTR_ERR(tfm));
                goto out_notfm;
        }
 
+       ret = crypto_aead_setauthsize(tfm, authsize);
+       if (ret) {
+               pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
+                      ret);
+               goto out_noreq;
+       }
+
        crypto_init_wait(&wait);
        printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
                        get_driver_name(crypto_aead, tfm), e);
@@ -611,8 +622,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                                        break;
                                }
                        }
+
                        ret = crypto_aead_setkey(tfm, key, *keysize);
-                       ret = crypto_aead_setauthsize(tfm, authsize);
+                       if (ret) {
+                               pr_err("setkey() failed flags=%x: %d\n",
+                                       crypto_aead_get_flags(tfm), ret);
+                               goto out;
+                       }
 
                        iv_len = crypto_aead_ivsize(tfm);
                        if (iv_len)
@@ -622,15 +638,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                        printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
                                        i, *keysize * 8, bs);
 
-
                        memset(tvmem[0], 0xff, PAGE_SIZE);
 
-                       if (ret) {
-                               pr_err("setkey() failed flags=%x\n",
-                                               crypto_aead_get_flags(tfm));
-                               goto out;
-                       }
-
                        sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
                                     assoc, aad_size);
 
@@ -1907,6 +1916,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                ret += tcrypt_test("streebog512");
                break;
 
+       case 55:
+               ret += tcrypt_test("gcm(sm4)");
+               break;
+
+       case 56:
+               ret += tcrypt_test("ccm(sm4)");
+               break;
+
        case 100:
                ret += tcrypt_test("hmac(md5)");
                break;
@@ -1998,6 +2015,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
        case 157:
                ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
                break;
+
+       case 158:
+               ret += tcrypt_test("cbcmac(sm4)");
+               break;
+
+       case 159:
+               ret += tcrypt_test("cmac(sm4)");
+               break;
+
        case 181:
                ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
                break;
@@ -2031,6 +2057,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
        case 191:
                ret += tcrypt_test("ecb(sm4)");
                ret += tcrypt_test("cbc(sm4)");
+               ret += tcrypt_test("cfb(sm4)");
                ret += tcrypt_test("ctr(sm4)");
                break;
        case 200:
@@ -2289,6 +2316,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                speed_template_16);
                test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
                                speed_template_16);
+               test_cipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_cipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
                test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
                                speed_template_16);
                test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
@@ -2322,6 +2353,34 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                NULL, 0, 16, 8, speed_template_16);
                break;
 
+       case 222:
+               test_aead_speed("gcm(sm4)", ENCRYPT, sec,
+                               NULL, 0, 16, 8, speed_template_16);
+               test_aead_speed("gcm(sm4)", DECRYPT, sec,
+                               NULL, 0, 16, 8, speed_template_16);
+               break;
+
+       case 223:
+               test_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec,
+                               NULL, 0, 16, 16, aead_speed_template_19);
+               test_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec,
+                               NULL, 0, 16, 16, aead_speed_template_19);
+               break;
+
+       case 224:
+               test_mb_aead_speed("gcm(sm4)", ENCRYPT, sec, NULL, 0, 16, 8,
+                                  speed_template_16, num_mb);
+               test_mb_aead_speed("gcm(sm4)", DECRYPT, sec, NULL, 0, 16, 8,
+                                  speed_template_16, num_mb);
+               break;
+
+       case 225:
+               test_mb_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec, NULL, 0,
+                                  16, 16, aead_speed_template_19, num_mb);
+               test_mb_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec, NULL, 0,
+                                  16, 16, aead_speed_template_19, num_mb);
+               break;
+
        case 300:
                if (alg) {
                        test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -2757,6 +2816,25 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                   speed_template_8_32);
                break;
 
+       case 518:
+               test_acipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               break;
+
        case 600:
                test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
                                       speed_template_16_24_32, num_mb);
index c978e41..70f69f0 100644 (file)
@@ -4450,6 +4450,12 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .hash = __VECS(aes_cbcmac_tv_template)
                }
+       }, {
+               .alg = "cbcmac(sm4)",
+               .test = alg_test_hash,
+               .suite = {
+                       .hash = __VECS(sm4_cbcmac_tv_template)
+               }
        }, {
                .alg = "ccm(aes)",
                .generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
@@ -4461,6 +4467,16 @@ static const struct alg_test_desc alg_test_descs[] = {
                                .einval_allowed = 1,
                        }
                }
+       }, {
+               .alg = "ccm(sm4)",
+               .generic_driver = "ccm_base(ctr(sm4-generic),cbcmac(sm4-generic))",
+               .test = alg_test_aead,
+               .suite = {
+                       .aead = {
+                               ____VECS(sm4_ccm_tv_template),
+                               .einval_allowed = 1,
+                       }
+               }
        }, {
                .alg = "cfb(aes)",
                .test = alg_test_skcipher,
@@ -4494,6 +4510,12 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .hash = __VECS(des3_ede_cmac64_tv_template)
                }
+       }, {
+               .alg = "cmac(sm4)",
+               .test = alg_test_hash,
+               .suite = {
+                       .hash = __VECS(sm4_cmac128_tv_template)
+               }
        }, {
                .alg = "compress_null",
                .test = alg_test_null,
@@ -4967,6 +4989,13 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .aead = __VECS(aes_gcm_tv_template)
                }
+       }, {
+               .alg = "gcm(sm4)",
+               .generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)",
+               .test = alg_test_aead,
+               .suite = {
+                       .aead = __VECS(sm4_gcm_tv_template)
+               }
        }, {
                .alg = "ghash",
                .test = alg_test_hash,
index 3ed6ab3..e6fca34 100644 (file)
@@ -13328,6 +13328,154 @@ static const struct cipher_testvec sm4_cfb_tv_template[] = {
        }
 };
 
+static const struct aead_testvec sm4_gcm_tv_template[] = {
+       { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.1 */
+               .key    = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+               .klen   = 16,
+               .iv     = "\x00\x00\x12\x34\x56\x78\x00\x00"
+                         "\x00\x00\xAB\xCD",
+               .ptext  = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+                         "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB"
+                         "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC"
+                         "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+               .plen   = 64,
+               .assoc  = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xAB\xAD\xDA\xD2",
+               .alen   = 20,
+               .ctext  = "\x17\xF3\x99\xF0\x8C\x67\xD5\xEE"
+                         "\x19\xD0\xDC\x99\x69\xC4\xBB\x7D"
+                         "\x5F\xD4\x6F\xD3\x75\x64\x89\x06"
+                         "\x91\x57\xB2\x82\xBB\x20\x07\x35"
+                         "\xD8\x27\x10\xCA\x5C\x22\xF0\xCC"
+                         "\xFA\x7C\xBF\x93\xD4\x96\xAC\x15"
+                         "\xA5\x68\x34\xCB\xCF\x98\xC3\x97"
+                         "\xB4\x02\x4A\x26\x91\x23\x3B\x8D"
+                         "\x83\xDE\x35\x41\xE4\xC2\xB5\x81"
+                         "\x77\xE0\x65\xA9\xBF\x7B\x62\xEC",
+               .clen   = 80,
+       }
+};
+
+static const struct aead_testvec sm4_ccm_tv_template[] = {
+       { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.2 */
+               .key    = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+               .klen   = 16,
+               .iv     = "\x02\x00\x00\x12\x34\x56\x78\x00"
+                         "\x00\x00\x00\xAB\xCD\x00\x00\x00",
+               .ptext  = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+                         "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB"
+                         "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC"
+                         "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+               .plen   = 64,
+               .assoc  = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xAB\xAD\xDA\xD2",
+               .alen   = 20,
+               .ctext  = "\x48\xAF\x93\x50\x1F\xA6\x2A\xDB"
+                         "\xCD\x41\x4C\xCE\x60\x34\xD8\x95"
+                         "\xDD\xA1\xBF\x8F\x13\x2F\x04\x20"
+                         "\x98\x66\x15\x72\xE7\x48\x30\x94"
+                         "\xFD\x12\xE5\x18\xCE\x06\x2C\x98"
+                         "\xAC\xEE\x28\xD9\x5D\xF4\x41\x6B"
+                         "\xED\x31\xA2\xF0\x44\x76\xC1\x8B"
+                         "\xB4\x0C\x84\xA7\x4B\x97\xDC\x5B"
+                         "\x16\x84\x2D\x4F\xA1\x86\xF5\x6A"
+                         "\xB3\x32\x56\x97\x1F\xA1\x10\xF4",
+               .clen   = 80,
+       }
+};
+
+static const struct hash_testvec sm4_cbcmac_tv_template[] = {
+       {
+               .key            = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88"
+                                 "\x77\x66\x55\x44\x33\x22\x11\x00",
+               .plaintext      = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+               .digest         = "\x97\xb4\x75\x8f\x84\x92\x3d\x3f"
+                                 "\x86\x81\x0e\x0e\xea\x14\x6d\x73",
+               .psize          = 16,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                                 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+                                 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+                                 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                                 "\xee",
+               .digest         = "\xc7\xdb\x17\x71\xa1\x5c\x0d\x22"
+                                 "\xa3\x39\x3a\x31\x88\x91\x49\xa1",
+               .psize          = 33,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16"
+                                 "\xf9\xdd\xbe\x91\x73\x53\x37\x1a"
+                                 "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c"
+                                 "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11"
+                                 "\xf0\xd8\xbc\x96\x73\x5c\x34\x11"
+                                 "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f"
+                                 "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17"
+                                 "\xfd\xdb\xb1\x9b\x76\x5c\x37",
+               .digest         = "\x9b\x07\x88\x7f\xd5\x95\x23\x12"
+                                 "\x64\x0a\x66\x7f\x4e\x25\xca\xd0",
+               .psize          = 63,
+               .ksize          = 16,
+       }
+};
+
+static const struct hash_testvec sm4_cmac128_tv_template[] = {
+       {
+               .key            = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88"
+                                 "\x77\x66\x55\x44\x33\x22\x11\x00",
+               .plaintext      = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+               .digest         = "\x00\xd4\x63\xb4\x9a\xf3\x52\xe2"
+                                 "\x74\xa9\x00\x55\x13\x54\x2a\xd1",
+               .psize          = 16,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                                 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+                                 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+                                 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                                 "\xee",
+               .digest         = "\x8a\x8a\xe9\xc0\xc8\x97\x0e\x85"
+                                 "\x21\x57\x02\x10\x1a\xbf\x9c\xc6",
+               .psize          = 33,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16"
+                                 "\xf9\xdd\xbe\x91\x73\x53\x37\x1a"
+                                 "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c"
+                                 "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11"
+                                 "\xf0\xd8\xbc\x96\x73\x5c\x34\x11"
+                                 "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f"
+                                 "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17"
+                                 "\xfd\xdb\xb1\x9b\x76\x5c\x37",
+               .digest         = "\x5f\x14\xc9\xa9\x20\xb2\xb4\xf0"
+                                 "\x76\xe0\xd8\xd6\xdc\x4f\xe1\xbc",
+               .psize          = 63,
+               .ksize          = 16,
+       }
+};
+
 /* Cast6 test vectors from RFC 2612 */
 static const struct cipher_testvec cast6_tv_template[] = {
        {
index bf79fbb..5e820af 100644 (file)
@@ -775,7 +775,7 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = {
        0xca2dbf07ad5a8333ULL,
 };
 
-/**
+/*
  * The core Whirlpool transform.
  */
 
index 8bad634..30d2db3 100644 (file)
@@ -51,8 +51,6 @@ source "drivers/net/Kconfig"
 
 source "drivers/isdn/Kconfig"
 
-source "drivers/lightnvm/Kconfig"
-
 # input before char - char/joystick depends on it. As does USB.
 
 source "drivers/input/Kconfig"
index 27c018b..be5d40a 100644 (file)
@@ -70,7 +70,6 @@ obj-$(CONFIG_FB_I810)           += video/fbdev/i810/
 obj-$(CONFIG_FB_INTEL)          += video/fbdev/intelfb/
 
 obj-$(CONFIG_PARPORT)          += parport/
-obj-$(CONFIG_NVM)              += lightnvm/
 obj-y                          += base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_LIBNVDIMM)                += nvdimm/
 obj-$(CONFIG_DAX)              += dax/
index 8f9940f..1da360c 100644 (file)
@@ -280,9 +280,9 @@ config ACPI_CPPC_LIB
 
 config ACPI_PROCESSOR
        tristate "Processor"
-       depends on X86 || IA64 || ARM64
+       depends on X86 || IA64 || ARM64 || LOONGARCH
        select ACPI_PROCESSOR_IDLE
-       select ACPI_CPU_FREQ_PSS if X86 || IA64
+       select ACPI_CPU_FREQ_PSS if X86 || IA64 || LOONGARCH
        default y
        help
          This driver adds support for the ACPI Processor package. It is required
index 76b83b1..c970792 100644 (file)
@@ -70,7 +70,7 @@ static inline struct acpi_table_header *get_header(struct config_item *cfg)
        if (!table->header)
                pr_err("table not loaded\n");
 
-       return table->header;
+       return table->header ?: ERR_PTR(-EINVAL);
 }
 
 static ssize_t acpi_table_aml_read(struct config_item *cfg,
@@ -78,8 +78,8 @@ static ssize_t acpi_table_aml_read(struct config_item *cfg,
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
        if (data)
                memcpy(data, h, h->length);
@@ -100,60 +100,60 @@ static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature);
+       return sysfs_emit(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature);
 }
 
 static ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%d\n", h->length);
+       return sysfs_emit(str, "%d\n", h->length);
 }
 
 static ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%d\n", h->revision);
+       return sysfs_emit(str, "%d\n", h->revision);
 }
 
 static ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id);
+       return sysfs_emit(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id);
 }
 
 static ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id);
+       return sysfs_emit(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id);
 }
 
 static ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%d\n", h->oem_revision);
+       return sysfs_emit(str, "%d\n", h->oem_revision);
 }
 
 static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg,
@@ -161,10 +161,10 @@ static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg,
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id);
+       return sysfs_emit(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id);
 }
 
 static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
@@ -172,10 +172,10 @@ static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
 {
        struct acpi_table_header *h = get_header(cfg);
 
-       if (!h)
-               return -EINVAL;
+       if (IS_ERR(h))
+               return PTR_ERR(h);
 
-       return sprintf(str, "%d\n", h->asl_compiler_revision);
+       return sysfs_emit(str, "%d\n", h->asl_compiler_revision);
 }
 
 CONFIGFS_ATTR_RO(acpi_table_, signature);
index 4ee2ad2..6922a44 100644 (file)
@@ -220,8 +220,8 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
                        break;
 
                default:
-                       pr_err(FW_BUG "Invalid record %d found.\n", record_header->type);
-                       return -EINVAL;
+                       /* Other types are reserved in ACPI 6.4 spec. */
+                       break;
                }
        }
        return 0;
@@ -254,8 +254,7 @@ static int __init acpi_init_fpdt(void)
                                              subtable->type);
                        break;
                default:
-                       pr_info(FW_BUG "Invalid subtable type %d found.\n",
-                              subtable->type);
+                       /* Other types are reserved in ACPI 6.4 spec. */
                        break;
                }
                offset += sizeof(*subtable);
index df4adeb..f45979a 100644 (file)
@@ -249,12 +249,12 @@ static void set_power_saving_task_num(unsigned int num)
 
 static void acpi_pad_idle_cpus(unsigned int num_cpus)
 {
-       get_online_cpus();
+       cpus_read_lock();
 
        num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
        set_power_saving_task_num(num_cpus);
 
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static uint32_t acpi_pad_idle_cpus_num(void)
index 2d5bd2a..6737b1c 100644 (file)
@@ -182,7 +182,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
                return -ENODEV;
 
        cpu_maps_update_begin();
-       cpu_hotplug_begin();
+       cpus_write_lock();
 
        ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
        if (ret)
@@ -203,7 +203,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        pr->flags.need_hotplug_init = 1;
 
 out:
-       cpu_hotplug_done();
+       cpus_write_unlock();
        cpu_maps_update_done();
        return ret;
 }
@@ -454,13 +454,13 @@ static void acpi_processor_remove(struct acpi_device *device)
        per_cpu(processors, pr->id) = NULL;
 
        cpu_maps_update_begin();
-       cpu_hotplug_begin();
+       cpus_write_lock();
 
        /* Remove the CPU. */
        arch_unregister_cpu(pr->id);
        acpi_unmap_cpu(pr->id);
 
-       cpu_hotplug_done();
+       cpus_write_unlock();
        cpu_maps_update_done();
 
        try_offline_node(cpu_to_node(pr->id));
index 41ba777..f2d2267 100644 (file)
@@ -561,11 +561,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
                                                                op->common.
                                                                node->object,
                                                                NULL);
-                               if ACPI_FAILURE
-                                       (status) {
+                               if (ACPI_FAILURE(status)) {
                                        ACPI_EXCEPTION((AE_INFO, status,
                                                        "While writing to buffer field"));
-                                       }
+                               }
                        }
                        ACPI_FREE(namepath);
                        status = AE_OK;
index 38e10ab..14b71b4 100644 (file)
@@ -379,13 +379,6 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
 
                        (*element_ptr)->common.reference_count =
                            original_ref_count;
-
-                       /*
-                        * The original_element holds a reference from the package object
-                        * that represents _HID. Since a new element was created by _HID,
-                        * remove the reference from the _CID package.
-                        */
-                       acpi_ut_remove_reference(original_element);
                }
 
                element_ptr++;
index f854bcb..3abb437 100644 (file)
@@ -498,24 +498,24 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
        acpi_evaluate_ost(handle, type, ost_code, NULL);
 }
 
-static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
+static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
 {
        struct acpi_device *device = data;
 
        device->driver->ops.notify(device, event);
 }
 
-static void acpi_device_notify_fixed(void *data)
+static void acpi_notify_device_fixed(void *data)
 {
        struct acpi_device *device = data;
 
        /* Fixed hardware devices have no handles */
-       acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
+       acpi_notify_device(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
 }
 
 static u32 acpi_device_fixed_event(void *data)
 {
-       acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
+       acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_notify_device_fixed, data);
        return ACPI_INTERRUPT_HANDLED;
 }
 
@@ -536,7 +536,7 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
        else
                status = acpi_install_notify_handler(device->handle,
                                                     ACPI_DEVICE_NOTIFY,
-                                                    acpi_device_notify,
+                                                    acpi_notify_device,
                                                     device);
 
        if (ACPI_FAILURE(status))
@@ -554,7 +554,7 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
                                                acpi_device_fixed_event);
        else
                acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-                                          acpi_device_notify);
+                                          acpi_notify_device);
 }
 
 /* Handle events targeting \_SB device (at present only graceful shutdown) */
index f25bd33..1f9b9a4 100644 (file)
@@ -77,6 +77,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
                },
                .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
        },
+       {
+               /*
+                * Lenovo Yoga 9 14ITL5, initial notification of the LID device
+                * never happens.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "82BG"),
+               },
+               .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+       },
        {
                /*
                 * Medion Akoya E2215T, notification of the LID device only
index 550b908..f4e9c2e 100644 (file)
@@ -90,15 +90,24 @@ static ssize_t name##_store(struct device *dev,\
 
 PCH_FIVR_SHOW(freq_mhz_low_clock, GFC0)
 PCH_FIVR_SHOW(freq_mhz_high_clock, GFC1)
+PCH_FIVR_SHOW(ssc_clock_info, GEMI)
+PCH_FIVR_SHOW(fivr_switching_freq_mhz, GFCS)
+PCH_FIVR_SHOW(fivr_switching_fault_status, GFFS)
 PCH_FIVR_STORE(freq_mhz_low_clock, RFC0)
 PCH_FIVR_STORE(freq_mhz_high_clock, RFC1)
 
 static DEVICE_ATTR_RW(freq_mhz_low_clock);
 static DEVICE_ATTR_RW(freq_mhz_high_clock);
+static DEVICE_ATTR_RO(ssc_clock_info);
+static DEVICE_ATTR_RO(fivr_switching_freq_mhz);
+static DEVICE_ATTR_RO(fivr_switching_fault_status);
 
 static struct attribute *fivr_attrs[] = {
        &dev_attr_freq_mhz_low_clock.attr,
        &dev_attr_freq_mhz_high_clock.attr,
+       &dev_attr_ssc_clock_info.attr,
+       &dev_attr_fivr_switching_freq_mhz.attr,
+       &dev_attr_fivr_switching_fault_status.attr,
        NULL
 };
 
index fce3f3b..7a33a6d 100644 (file)
@@ -285,29 +285,27 @@ int acpi_unbind_one(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(acpi_unbind_one);
 
-static int acpi_device_notify(struct device *dev)
+void acpi_device_notify(struct device *dev)
 {
        struct acpi_bus_type *type = acpi_get_bus_type(dev);
        struct acpi_device *adev;
        int ret;
 
        ret = acpi_bind_one(dev, NULL);
-       if (ret && type) {
-               struct acpi_device *adev;
+       if (ret) {
+               if (!type)
+                       goto err;
 
                adev = type->find_companion(dev);
                if (!adev) {
-                       pr_debug("Unable to get handle for %s\n", dev_name(dev));
-                       ret = -ENODEV;
-                       goto out;
+                       dev_dbg(dev, "ACPI companion not found\n");
+                       goto err;
                }
                ret = acpi_bind_one(dev, adev);
                if (ret)
-                       goto out;
+                       goto err;
        }
        adev = ACPI_COMPANION(dev);
-       if (!adev)
-               goto out;
 
        if (dev_is_platform(dev))
                acpi_configure_pmsi_domain(dev);
@@ -317,27 +315,22 @@ static int acpi_device_notify(struct device *dev)
        else if (adev->handler && adev->handler->bind)
                adev->handler->bind(dev);
 
- out:
-       if (!ret) {
-               struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n",
+                         dev_name(dev));
 
-               acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
-               pr_debug("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
-               kfree(buffer.pointer);
-       } else {
-               pr_debug("Device %s -> No ACPI support\n", dev_name(dev));
-       }
+       return;
 
-       return ret;
+err:
+       dev_dbg(dev, "No ACPI support\n");
 }
 
-static int acpi_device_notify_remove(struct device *dev)
+void acpi_device_notify_remove(struct device *dev)
 {
        struct acpi_device *adev = ACPI_COMPANION(dev);
        struct acpi_bus_type *type;
 
        if (!adev)
-               return 0;
+               return;
 
        type = acpi_get_bus_type(dev);
        if (type && type->cleanup)
@@ -346,20 +339,4 @@ static int acpi_device_notify_remove(struct device *dev)
                adev->handler->unbind(dev);
 
        acpi_unbind_one(dev);
-       return 0;
-}
-
-int acpi_platform_notify(struct device *dev, enum kobject_action action)
-{
-       switch (action) {
-       case KOBJ_ADD:
-               acpi_device_notify(dev);
-               break;
-       case KOBJ_REMOVE:
-               acpi_device_notify_remove(dev);
-               break;
-       default:
-               break;
-       }
-       return 0;
 }
index 23d9a09..a3ef6cc 100644 (file)
@@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
                struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
                struct nd_mapping_desc *mapping;
 
+               /* range index 0 == unmapped in SPA or invalid-SPA */
+               if (memdev->range_index == 0 || spa->range_index == 0)
+                       continue;
                if (memdev->range_index != spa->range_index)
                        continue;
                if (count >= ND_MAX_MAPPINGS) {
index fcf2e55..39b1f34 100644 (file)
@@ -2,7 +2,7 @@
 config ACPI_NUMA
        bool "NUMA support"
        depends on NUMA
-       depends on (X86 || IA64 || ARM64)
+       depends on (X86 || IA64 || ARM64 || LOONGARCH)
        default y if IA64 || ARM64
 
 config ACPI_HMAT
index 6021a10..b8795fc 100644 (file)
@@ -206,7 +206,7 @@ int __init srat_disabled(void)
        return acpi_numa < 0;
 }
 
-#if defined(CONFIG_X86) || defined(CONFIG_ARM64)
+#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
 /*
  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
  * I/O localities since SRAT does not list them.  I/O localities are
index 45c5c0e..a43f152 100644 (file)
@@ -284,7 +284,8 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
 #define should_use_kmap(pfn)   page_is_ram(pfn)
 #endif
 
-static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
+                             bool memory)
 {
        unsigned long pfn;
 
@@ -294,7 +295,8 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
                        return NULL;
                return (void __iomem __force *)kmap(pfn_to_page(pfn));
        } else
-               return acpi_os_ioremap(pg_off, pg_sz);
+               return memory ? acpi_os_memmap(pg_off, pg_sz) :
+                               acpi_os_ioremap(pg_off, pg_sz);
 }
 
 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
@@ -309,9 +311,10 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
 }
 
 /**
- * acpi_os_map_iomem - Get a virtual address for a given physical address range.
+ * __acpi_os_map_iomem - Get a virtual address for a given physical address range.
  * @phys: Start of the physical address range to map.
  * @size: Size of the physical address range to map.
+ * @memory: true if remapping memory, false if IO
  *
  * Look up the given physical address range in the list of existing ACPI memory
  * mappings.  If found, get a reference to it and return a pointer to it (its
@@ -321,8 +324,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
  * During early init (when acpi_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_map_table() to get the job done.
  */
-void __iomem __ref
-*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
+static void __iomem __ref
+*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
 {
        struct acpi_ioremap *map;
        void __iomem *virt;
@@ -353,7 +356,7 @@ void __iomem __ref
 
        pg_off = round_down(phys, PAGE_SIZE);
        pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-       virt = acpi_map(phys, size);
+       virt = acpi_map(phys, size, memory);
        if (!virt) {
                mutex_unlock(&acpi_ioremap_lock);
                kfree(map);
@@ -372,11 +375,17 @@ out:
        mutex_unlock(&acpi_ioremap_lock);
        return map->virt + (phys - map->phys);
 }
+
+void __iomem *__ref
+acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
+{
+       return __acpi_os_map_iomem(phys, size, false);
+}
 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
 
 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
-       return (void *)acpi_os_map_iomem(phys, size);
+       return (void *)__acpi_os_map_iomem(phys, size, true);
 }
 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
 
index dd2fbf3..d418462 100644 (file)
@@ -106,6 +106,9 @@ static ssize_t platform_profile_store(struct device *dev,
        }
 
        err = cur_profile->profile_set(cur_profile, i);
+       if (!err)
+               sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
        mutex_unlock(&profile_lock);
        if (err)
                return err;
index a091d5a..cbe08e6 100644 (file)
@@ -178,15 +178,17 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
 {
        int data, ret;
 
-       /* GPIO1 LDO regulator needs special handling */
-       if (reg == XPOWER_GPI1_CTRL)
-               return regmap_update_bits(regmap, reg, GPI1_LDO_MASK,
-                                         on ? GPI1_LDO_ON : GPI1_LDO_OFF);
-
        ret = iosf_mbi_block_punit_i2c_access();
        if (ret)
                return ret;
 
+       /* GPIO1 LDO regulator needs special handling */
+       if (reg == XPOWER_GPI1_CTRL) {
+               ret = regmap_update_bits(regmap, reg, GPI1_LDO_MASK,
+                                        on ? GPI1_LDO_ON : GPI1_LDO_OFF);
+               goto out;
+       }
+
        if (regmap_read(regmap, reg, &data)) {
                ret = -EIO;
                goto out;
@@ -234,6 +236,11 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
                return ret;
 
        if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+               /*
+                * AXP288_ADC_TS_PIN_CTRL reads are cached by the regmap, so
+                * this does to a single I2C-transfer, and thus there is no
+                * need to explicitly call iosf_mbi_block_punit_i2c_access().
+                */
                ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
                                         AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
                                         AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
@@ -244,6 +251,10 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
                usleep_range(6000, 10000);
        }
 
+       ret = iosf_mbi_block_punit_i2c_access();
+       if (ret)
+               return ret;
+
        ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
        if (ret == 0)
                ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
@@ -254,6 +265,31 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
                                   AXP288_ADC_TS_CURRENT_ON);
        }
 
+       iosf_mbi_unblock_punit_i2c_access();
+
+       return ret;
+}
+
+static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap,
+                                                  u16 i2c_address, u32 reg_address,
+                                                  u32 value, u32 mask)
+{
+       int ret;
+
+       if (i2c_address != 0x34) {
+               pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
+                      __func__, i2c_address, reg_address, value, mask);
+               return -ENXIO;
+       }
+
+       ret = iosf_mbi_block_punit_i2c_access();
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(regmap, reg_address, mask, value);
+
+       iosf_mbi_unblock_punit_i2c_access();
+
        return ret;
 }
 
@@ -261,6 +297,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
        .get_power = intel_xpower_pmic_get_power,
        .update_power = intel_xpower_pmic_update_power,
        .get_raw_temp = intel_xpower_pmic_get_raw_temp,
+       .exec_mipi_pmic_seq_element = intel_xpower_exec_mipi_pmic_seq_element,
        .power_table = power_table,
        .power_table_count = ARRAY_SIZE(power_table),
        .thermal_table = thermal_table,
index eba7785..b9863e2 100644 (file)
@@ -48,7 +48,6 @@ struct acpi_power_dependent_device {
 struct acpi_power_resource {
        struct acpi_device device;
        struct list_head list_node;
-       char *name;
        u32 system_level;
        u32 order;
        unsigned int ref_count;
@@ -70,6 +69,11 @@ static DEFINE_MUTEX(power_resource_list_lock);
                              Power Resource Management
    -------------------------------------------------------------------------- */
 
+static inline const char *resource_dev_name(struct acpi_power_resource *pr)
+{
+       return dev_name(&pr->device.dev);
+}
+
 static inline
 struct acpi_power_resource *to_power_resource(struct acpi_device *device)
 {
@@ -264,7 +268,8 @@ acpi_power_resource_add_dependent(struct acpi_power_resource *resource,
 
        dep->dev = dev;
        list_add_tail(&dep->node, &resource->dependents);
-       dev_dbg(dev, "added power dependency to [%s]\n", resource->name);
+       dev_dbg(dev, "added power dependency to [%s]\n",
+               resource_dev_name(resource));
 
 unlock:
        mutex_unlock(&resource->resource_lock);
@@ -283,7 +288,7 @@ acpi_power_resource_remove_dependent(struct acpi_power_resource *resource,
                        list_del(&dep->node);
                        kfree(dep);
                        dev_dbg(dev, "removed power dependency to [%s]\n",
-                               resource->name);
+                               resource_dev_name(resource));
                        break;
                }
        }
@@ -356,10 +361,11 @@ void acpi_device_power_remove_dependent(struct acpi_device *adev,
 
 static int __acpi_power_on(struct acpi_power_resource *resource)
 {
+       acpi_handle handle = resource->device.handle;
        struct acpi_power_dependent_device *dep;
        acpi_status status = AE_OK;
 
-       status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
+       status = acpi_evaluate_object(handle, "_ON", NULL, NULL);
        if (ACPI_FAILURE(status)) {
                resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
                return -ENODEV;
@@ -367,7 +373,7 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
 
        resource->state = ACPI_POWER_RESOURCE_STATE_ON;
 
-       pr_debug("Power resource [%s] turned on\n", resource->name);
+       acpi_handle_debug(handle, "Power resource turned on\n");
 
        /*
         * If there are other dependents on this power resource we need to
@@ -380,7 +386,7 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
 
        list_for_each_entry(dep, &resource->dependents, node) {
                dev_dbg(dep->dev, "runtime resuming because [%s] turned on\n",
-                       resource->name);
+                       resource_dev_name(resource));
                pm_request_resume(dep->dev);
        }
 
@@ -392,7 +398,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
        int result = 0;
 
        if (resource->ref_count++) {
-               pr_debug("Power resource [%s] already on\n", resource->name);
+               acpi_handle_debug(resource->device.handle,
+                                 "Power resource already on\n");
        } else {
                result = __acpi_power_on(resource);
                if (result)
@@ -413,10 +420,10 @@ static int acpi_power_on(struct acpi_power_resource *resource)
 
 static int __acpi_power_off(struct acpi_power_resource *resource)
 {
+       acpi_handle handle = resource->device.handle;
        acpi_status status;
 
-       status = acpi_evaluate_object(resource->device.handle, "_OFF",
-                                     NULL, NULL);
+       status = acpi_evaluate_object(handle, "_OFF", NULL, NULL);
        if (ACPI_FAILURE(status)) {
                resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
                return -ENODEV;
@@ -424,7 +431,7 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
 
        resource->state = ACPI_POWER_RESOURCE_STATE_OFF;
 
-       pr_debug("Power resource [%s] turned off\n", resource->name);
+       acpi_handle_debug(handle, "Power resource turned off\n");
 
        return 0;
 }
@@ -434,12 +441,14 @@ static int acpi_power_off_unlocked(struct acpi_power_resource *resource)
        int result = 0;
 
        if (!resource->ref_count) {
-               pr_debug("Power resource [%s] already off\n", resource->name);
+               acpi_handle_debug(resource->device.handle,
+                                 "Power resource already off\n");
                return 0;
        }
 
        if (--resource->ref_count) {
-               pr_debug("Power resource [%s] still in use\n", resource->name);
+               acpi_handle_debug(resource->device.handle,
+                                 "Power resource still in use\n");
        } else {
                result = __acpi_power_off(resource);
                if (result)
@@ -949,7 +958,6 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
        mutex_init(&resource->resource_lock);
        INIT_LIST_HEAD(&resource->list_node);
        INIT_LIST_HEAD(&resource->dependents);
-       resource->name = device->pnp.bus_id;
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
        device->power.state = ACPI_STATE_UNKNOWN;
@@ -1004,7 +1012,7 @@ void acpi_resume_power_resources(void)
 
                if (state == ACPI_POWER_RESOURCE_STATE_OFF
                    && resource->ref_count) {
-                       dev_dbg(&resource->device.dev, "Turning ON\n");
+                       acpi_handle_debug(resource->device.handle, "Turning ON\n");
                        __acpi_power_on(resource);
                }
 
@@ -1034,7 +1042,7 @@ void acpi_turn_off_unused_power_resources(void)
                 */
                if (!resource->ref_count &&
                    resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
-                       dev_dbg(&resource->device.dev, "Turning OFF\n");
+                       acpi_handle_debug(resource->device.handle, "Turning OFF\n");
                        __acpi_power_off(resource);
                }
 
index 31cf9ae..1f6007a 100644 (file)
@@ -292,6 +292,12 @@ void __init init_prmt(void)
        int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
                                          sizeof (struct acpi_table_prmt_header),
                                          0, acpi_parse_prmt, 0);
+       /*
+        * Return immediately if PRMT table is not present or no PRM module found.
+        */
+       if (mc <= 0)
+               return;
+
        pr_info("PRM: found %u modules\n", mc);
 
        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
index 095c8ac..f37fba9 100644 (file)
@@ -1301,7 +1301,7 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
        if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
 
                /* Protect against cpu-hotplug */
-               get_online_cpus();
+               cpus_read_lock();
                cpuidle_pause_and_lock();
 
                /* Disable all cpuidle devices */
@@ -1330,7 +1330,7 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
                        }
                }
                cpuidle_resume_and_unlock();
-               put_online_cpus();
+               cpus_read_unlock();
        }
 
        return 0;
index 88460ba..25c2d0b 100644 (file)
@@ -136,6 +136,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
                break;
        case ACPI_DBG2_16550_COMPATIBLE:
        case ACPI_DBG2_16550_SUBSET:
+       case ACPI_DBG2_16550_WITH_GAS:
                uart = "uart";
                break;
        default:
index fbdbef0..3a30846 100644 (file)
@@ -452,7 +452,7 @@ int acpi_s2idle_prepare_late(void)
        if (lps0_dsm_func_mask_microsoft > 0) {
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
@@ -479,7 +479,7 @@ void acpi_s2idle_restore_early(void)
        if (lps0_dsm_func_mask_microsoft > 0) {
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
index fec2e97..5b3fa2c 100644 (file)
@@ -125,6 +125,7 @@ EXPORT_SYMBOL_GPL(ahci_shost_attrs);
 struct device_attribute *ahci_sdev_attrs[] = {
        &dev_attr_sw_activity,
        &dev_attr_unload_heads,
+       &dev_attr_ncq_prio_supported,
        &dev_attr_ncq_prio_enable,
        NULL
 };
index 61c7629..b8459c5 100644 (file)
@@ -159,6 +159,12 @@ MODULE_DESCRIPTION("Library module for ATA devices");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+static inline bool ata_dev_print_info(struct ata_device *dev)
+{
+       struct ata_eh_context *ehc = &dev->link->eh_context;
+
+       return ehc->i.flags & ATA_EHI_PRINTINFO;
+}
 
 static bool ata_sstatus_online(u32 sstatus)
 {
@@ -706,11 +712,9 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
                if (tf->flags & ATA_TFLAG_FUA)
                        tf->device |= 1 << 7;
 
-               if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
-                       if (class == IOPRIO_CLASS_RT)
-                               tf->hob_nsect |= ATA_PRIO_HIGH <<
-                                                ATA_SHIFT_PRIO;
-               }
+               if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE &&
+                   class == IOPRIO_CLASS_RT)
+                       tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
        } else if (dev->flags & ATA_DFLAG_LBA) {
                tf->flags |= ATA_TFLAG_LBA;
 
@@ -1266,8 +1270,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
  */
 static int ata_hpa_resize(struct ata_device *dev)
 {
-       struct ata_eh_context *ehc = &dev->link->eh_context;
-       int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+       bool print_info = ata_dev_print_info(dev);
        bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
        u64 sectors = ata_id_n_sectors(dev->id);
        u64 native_sectors;
@@ -2023,13 +2026,15 @@ retry:
        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
                                     buf, sectors * ATA_SECT_SIZE, 0);
 
-       if (err_mask && dma) {
-               dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
-               ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
-               goto retry;
+       if (err_mask) {
+               if (dma) {
+                       dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+                       goto retry;
+               }
+               ata_dev_err(dev, "Read log page 0x%02x failed, Emask 0x%x\n",
+                           (unsigned int)page, err_mask);
        }
 
-       DPRINTK("EXIT, err_mask=%x\n", err_mask);
        return err_mask;
 }
 
@@ -2058,12 +2063,8 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
         */
        err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
                                1);
-       if (err) {
-               ata_dev_info(dev,
-                            "failed to get Device Identify Log Emask 0x%x\n",
-                            err);
+       if (err)
                return false;
-       }
 
        for (i = 0; i < ap->sector_buf[8]; i++) {
                if (ap->sector_buf[9 + i] == page)
@@ -2127,11 +2128,7 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
        }
        err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
                                     0, ap->sector_buf, 1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to get NCQ Send/Recv Log Emask 0x%x\n",
-                           err_mask);
-       } else {
+       if (!err_mask) {
                u8 *cmds = dev->ncq_send_recv_cmds;
 
                dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
@@ -2157,11 +2154,7 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
        }
        err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
                                     0, ap->sector_buf, 1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to get NCQ Non-Data Log Emask 0x%x\n",
-                           err_mask);
-       } else {
+       if (!err_mask) {
                u8 *cmds = dev->ncq_non_data_cmds;
 
                memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
@@ -2173,30 +2166,24 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
        struct ata_port *ap = dev->link->ap;
        unsigned int err_mask;
 
-       if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
-               dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
-               return;
-       }
-
        err_mask = ata_read_log_page(dev,
                                     ATA_LOG_IDENTIFY_DEVICE,
                                     ATA_LOG_SATA_SETTINGS,
                                     ap->sector_buf,
                                     1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to get Identify Device data, Emask 0x%x\n",
-                           err_mask);
-               return;
-       }
+       if (err_mask)
+               goto not_supported;
 
-       if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
-               dev->flags |= ATA_DFLAG_NCQ_PRIO;
-       } else {
-               dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
-               ata_dev_dbg(dev, "SATA page does not support priority\n");
-       }
+       if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
+               goto not_supported;
+
+       dev->flags |= ATA_DFLAG_NCQ_PRIO;
+
+       return;
 
+not_supported:
+       dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+       dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
 }
 
 static int ata_dev_config_ncq(struct ata_device *dev,
@@ -2346,11 +2333,8 @@ static void ata_dev_config_trusted(struct ata_device *dev)
 
        err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
                        ap->sector_buf, 1);
-       if (err) {
-               ata_dev_dbg(dev,
-                           "failed to read Security Log, Emask 0x%x\n", err);
+       if (err)
                return;
-       }
 
        trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
        if (!(trusted_cap & (1ULL << 63))) {
@@ -2363,6 +2347,106 @@ static void ata_dev_config_trusted(struct ata_device *dev)
                dev->flags |= ATA_DFLAG_TRUSTED;
 }
 
+static int ata_dev_config_lba(struct ata_device *dev)
+{
+       struct ata_port *ap = dev->link->ap;
+       const u16 *id = dev->id;
+       const char *lba_desc;
+       char ncq_desc[24];
+       int ret;
+
+       dev->flags |= ATA_DFLAG_LBA;
+
+       if (ata_id_has_lba48(id)) {
+               lba_desc = "LBA48";
+               dev->flags |= ATA_DFLAG_LBA48;
+               if (dev->n_sectors >= (1UL << 28) &&
+                   ata_id_has_flush_ext(id))
+                       dev->flags |= ATA_DFLAG_FLUSH_EXT;
+       } else {
+               lba_desc = "LBA";
+       }
+
+       /* config NCQ */
+       ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+
+       /* print device info to dmesg */
+       if (ata_msg_drv(ap) && ata_dev_print_info(dev))
+               ata_dev_info(dev,
+                            "%llu sectors, multi %u: %s %s\n",
+                            (unsigned long long)dev->n_sectors,
+                            dev->multi_count, lba_desc, ncq_desc);
+
+       return ret;
+}
+
+static void ata_dev_config_chs(struct ata_device *dev)
+{
+       struct ata_port *ap = dev->link->ap;
+       const u16 *id = dev->id;
+
+       if (ata_id_current_chs_valid(id)) {
+               /* Current CHS translation is valid. */
+               dev->cylinders = id[54];
+               dev->heads     = id[55];
+               dev->sectors   = id[56];
+       } else {
+               /* Default translation */
+               dev->cylinders  = id[1];
+               dev->heads      = id[3];
+               dev->sectors    = id[6];
+       }
+
+       /* print device info to dmesg */
+       if (ata_msg_drv(ap) && ata_dev_print_info(dev))
+               ata_dev_info(dev,
+                            "%llu sectors, multi %u, CHS %u/%u/%u\n",
+                            (unsigned long long)dev->n_sectors,
+                            dev->multi_count, dev->cylinders,
+                            dev->heads, dev->sectors);
+}
+
+static void ata_dev_config_devslp(struct ata_device *dev)
+{
+       u8 *sata_setting = dev->link->ap->sector_buf;
+       unsigned int err_mask;
+       int i, j;
+
+       /*
+        * Check device sleep capability. Get DevSlp timing variables
+        * from SATA Settings page of Identify Device Data Log.
+        */
+       if (!ata_id_has_devslp(dev->id))
+               return;
+
+       err_mask = ata_read_log_page(dev,
+                                    ATA_LOG_IDENTIFY_DEVICE,
+                                    ATA_LOG_SATA_SETTINGS,
+                                    sata_setting, 1);
+       if (err_mask)
+               return;
+
+       dev->flags |= ATA_DFLAG_DEVSLP;
+       for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
+               j = ATA_LOG_DEVSLP_OFFSET + i;
+               dev->devslp_timing[i] = sata_setting[j];
+       }
+}
+
+static void ata_dev_print_features(struct ata_device *dev)
+{
+       if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
+               return;
+
+       ata_dev_info(dev,
+                    "Features:%s%s%s%s%s\n",
+                    dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
+                    dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
+                    dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
+                    dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
+                    dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "");
+}
+
 /**
  *     ata_dev_configure - Configure the specified ATA/ATAPI device
  *     @dev: Target device to configure
@@ -2379,8 +2463,7 @@ static void ata_dev_config_trusted(struct ata_device *dev)
 int ata_dev_configure(struct ata_device *dev)
 {
        struct ata_port *ap = dev->link->ap;
-       struct ata_eh_context *ehc = &dev->link->eh_context;
-       int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+       bool print_info = ata_dev_print_info(dev);
        const u16 *id = dev->id;
        unsigned long xfer_mask;
        unsigned int err_mask;
@@ -2507,91 +2590,28 @@ int ata_dev_configure(struct ata_device *dev)
                                        dev->multi_count = cnt;
                }
 
-               if (ata_id_has_lba(id)) {
-                       const char *lba_desc;
-                       char ncq_desc[24];
-
-                       lba_desc = "LBA";
-                       dev->flags |= ATA_DFLAG_LBA;
-                       if (ata_id_has_lba48(id)) {
-                               dev->flags |= ATA_DFLAG_LBA48;
-                               lba_desc = "LBA48";
-
-                               if (dev->n_sectors >= (1UL << 28) &&
-                                   ata_id_has_flush_ext(id))
-                                       dev->flags |= ATA_DFLAG_FLUSH_EXT;
-                       }
+               /* print device info to dmesg */
+               if (ata_msg_drv(ap) && print_info)
+                       ata_dev_info(dev, "%s: %s, %s, max %s\n",
+                                    revbuf, modelbuf, fwrevbuf,
+                                    ata_mode_string(xfer_mask));
 
-                       /* config NCQ */
-                       rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+               if (ata_id_has_lba(id)) {
+                       rc = ata_dev_config_lba(dev);
                        if (rc)
                                return rc;
-
-                       /* print device info to dmesg */
-                       if (ata_msg_drv(ap) && print_info) {
-                               ata_dev_info(dev, "%s: %s, %s, max %s\n",
-                                            revbuf, modelbuf, fwrevbuf,
-                                            ata_mode_string(xfer_mask));
-                               ata_dev_info(dev,
-                                            "%llu sectors, multi %u: %s %s\n",
-                                       (unsigned long long)dev->n_sectors,
-                                       dev->multi_count, lba_desc, ncq_desc);
-                       }
                } else {
-                       /* CHS */
-
-                       /* Default translation */
-                       dev->cylinders  = id[1];
-                       dev->heads      = id[3];
-                       dev->sectors    = id[6];
-
-                       if (ata_id_current_chs_valid(id)) {
-                               /* Current CHS translation is valid. */
-                               dev->cylinders = id[54];
-                               dev->heads     = id[55];
-                               dev->sectors   = id[56];
-                       }
-
-                       /* print device info to dmesg */
-                       if (ata_msg_drv(ap) && print_info) {
-                               ata_dev_info(dev, "%s: %s, %s, max %s\n",
-                                            revbuf,    modelbuf, fwrevbuf,
-                                            ata_mode_string(xfer_mask));
-                               ata_dev_info(dev,
-                                            "%llu sectors, multi %u, CHS %u/%u/%u\n",
-                                            (unsigned long long)dev->n_sectors,
-                                            dev->multi_count, dev->cylinders,
-                                            dev->heads, dev->sectors);
-                       }
+                       ata_dev_config_chs(dev);
                }
 
-               /* Check and mark DevSlp capability. Get DevSlp timing variables
-                * from SATA Settings page of Identify Device Data Log.
-                */
-               if (ata_id_has_devslp(dev->id)) {
-                       u8 *sata_setting = ap->sector_buf;
-                       int i, j;
-
-                       dev->flags |= ATA_DFLAG_DEVSLP;
-                       err_mask = ata_read_log_page(dev,
-                                                    ATA_LOG_IDENTIFY_DEVICE,
-                                                    ATA_LOG_SATA_SETTINGS,
-                                                    sata_setting,
-                                                    1);
-                       if (err_mask)
-                               ata_dev_dbg(dev,
-                                           "failed to get Identify Device Data, Emask 0x%x\n",
-                                           err_mask);
-                       else
-                               for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
-                                       j = ATA_LOG_DEVSLP_OFFSET + i;
-                                       dev->devslp_timing[i] = sata_setting[j];
-                               }
-               }
+               ata_dev_config_devslp(dev);
                ata_dev_config_sense_reporting(dev);
                ata_dev_config_zac(dev);
                ata_dev_config_trusted(dev);
                dev->cdb_len = 32;
+
+               if (ata_msg_drv(ap) && print_info)
+                       ata_dev_print_features(dev);
        }
 
        /* ATAPI-specific feature tests */
@@ -5573,7 +5593,7 @@ int ata_host_start(struct ata_host *host)
                        have_stop = 1;
        }
 
-       if (host->ops->host_stop)
+       if (host->ops && host->ops->host_stop)
                have_stop = 1;
 
        if (have_stop) {
index 8adeab7..8f3ff83 100644 (file)
@@ -834,28 +834,46 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
            ata_scsi_lpm_show, ata_scsi_lpm_store);
 EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
 
+static ssize_t ata_ncq_prio_supported_show(struct device *device,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(device);
+       struct ata_port *ap = ata_shost_to_port(sdev->host);
+       struct ata_device *dev;
+       bool ncq_prio_supported;
+       int rc = 0;
+
+       spin_lock_irq(ap->lock);
+       dev = ata_scsi_find_dev(ap, sdev);
+       if (!dev)
+               rc = -ENODEV;
+       else
+               ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
+       spin_unlock_irq(ap->lock);
+
+       return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
+}
+
+DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
+
 static ssize_t ata_ncq_prio_enable_show(struct device *device,
                                        struct device_attribute *attr,
                                        char *buf)
 {
        struct scsi_device *sdev = to_scsi_device(device);
-       struct ata_port *ap;
+       struct ata_port *ap = ata_shost_to_port(sdev->host);
        struct ata_device *dev;
        bool ncq_prio_enable;
        int rc = 0;
 
-       ap = ata_shost_to_port(sdev->host);
-
        spin_lock_irq(ap->lock);
        dev = ata_scsi_find_dev(ap, sdev);
-       if (!dev) {
+       if (!dev)
                rc = -ENODEV;
-               goto unlock;
-       }
-
-       ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
-
-unlock:
+       else
+               ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
        spin_unlock_irq(ap->lock);
 
        return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
@@ -869,7 +887,7 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device,
        struct ata_port *ap;
        struct ata_device *dev;
        long int input;
-       int rc;
+       int rc = 0;
 
        rc = kstrtol(buf, 10, &input);
        if (rc)
@@ -883,27 +901,20 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device,
                return  -ENODEV;
 
        spin_lock_irq(ap->lock);
+
+       if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+               rc = -EINVAL;
+               goto unlock;
+       }
+
        if (input)
                dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
        else
                dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
 
-       dev->link->eh_info.action |= ATA_EH_REVALIDATE;
-       dev->link->eh_info.flags |= ATA_EHI_QUIET;
-       ata_port_schedule_eh(ap);
+unlock:
        spin_unlock_irq(ap->lock);
 
-       ata_port_wait_eh(ap);
-
-       if (input) {
-               spin_lock_irq(ap->lock);
-               if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
-                       dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
-                       rc = -EIO;
-               }
-               spin_unlock_irq(ap->lock);
-       }
-
        return rc ? rc : len;
 }
 
@@ -914,6 +925,7 @@ EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
 struct device_attribute *ata_ncq_sdev_attrs[] = {
        &dev_attr_unload_heads,
        &dev_attr_ncq_prio_enable,
+       &dev_attr_ncq_prio_supported,
        NULL
 };
 EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs);
index b9588c5..0b7b462 100644 (file)
@@ -1765,53 +1765,6 @@ struct ata_scsi_args {
        struct scsi_cmnd        *cmd;
 };
 
-/**
- *     ata_scsi_rbuf_get - Map response buffer.
- *     @cmd: SCSI command containing buffer to be mapped.
- *     @flags: unsigned long variable to store irq enable status
- *     @copy_in: copy in from user buffer
- *
- *     Prepare buffer for simulated SCSI commands.
- *
- *     LOCKING:
- *     spin_lock_irqsave(ata_scsi_rbuf_lock) on success
- *
- *     RETURNS:
- *     Pointer to response buffer.
- */
-static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
-                              unsigned long *flags)
-{
-       spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
-
-       memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
-       if (copy_in)
-               sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
-                                 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
-       return ata_scsi_rbuf;
-}
-
-/**
- *     ata_scsi_rbuf_put - Unmap response buffer.
- *     @cmd: SCSI command containing buffer to be unmapped.
- *     @copy_out: copy out result
- *     @flags: @flags passed to ata_scsi_rbuf_get()
- *
- *     Returns rbuf buffer.  The result is copied to @cmd's buffer if
- *     @copy_back is true.
- *
- *     LOCKING:
- *     Unlocks ata_scsi_rbuf_lock.
- */
-static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
-                                    unsigned long *flags)
-{
-       if (copy_out)
-               sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
-                                   ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
-       spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
-}
-
 /**
  *     ata_scsi_rbuf_fill - wrapper for SCSI command simulators
  *     @args: device IDENTIFY data / SCSI command of interest.
@@ -1830,14 +1783,19 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
 static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
                unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
 {
-       u8 *rbuf;
        unsigned int rc;
        struct scsi_cmnd *cmd = args->cmd;
        unsigned long flags;
 
-       rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
-       rc = actor(args, rbuf);
-       ata_scsi_rbuf_put(cmd, rc == 0, &flags);
+       spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+
+       memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
+       rc = actor(args, ata_scsi_rbuf);
+       if (rc == 0)
+               sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+                                   ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+
+       spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
 
        if (rc == 0)
                cmd->result = SAM_STAT_GOOD;
index f0ef844..338c2e5 100644 (file)
@@ -1259,24 +1259,20 @@ static int sata_dwc_probe(struct platform_device *ofdev)
        irq = irq_of_parse_and_map(np, 0);
        if (irq == NO_IRQ) {
                dev_err(&ofdev->dev, "no SATA DMA irq\n");
-               err = -ENODEV;
-               goto error_out;
+               return -ENODEV;
        }
 
 #ifdef CONFIG_SATA_DWC_OLD_DMA
        if (!of_find_property(np, "dmas", NULL)) {
                err = sata_dwc_dma_init_old(ofdev, hsdev);
                if (err)
-                       goto error_out;
+                       return err;
        }
 #endif
 
        hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
-       if (IS_ERR(hsdev->phy)) {
-               err = PTR_ERR(hsdev->phy);
-               hsdev->phy = NULL;
-               goto error_out;
-       }
+       if (IS_ERR(hsdev->phy))
+               return PTR_ERR(hsdev->phy);
 
        err = phy_init(hsdev->phy);
        if (err)
index 404db83..2882af2 100644 (file)
@@ -202,3 +202,6 @@ int devtmpfs_delete_node(struct device *dev);
 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
 #endif
+
+void software_node_notify(struct device *dev);
+void software_node_notify_remove(struct device *dev);
index f636049..3a72241 100644 (file)
@@ -2002,24 +2002,24 @@ static inline int device_is_not_partition(struct device *dev)
 }
 #endif
 
-static int
-device_platform_notify(struct device *dev, enum kobject_action action)
+static void device_platform_notify(struct device *dev)
 {
-       int ret;
+       acpi_device_notify(dev);
 
-       ret = acpi_platform_notify(dev, action);
-       if (ret)
-               return ret;
+       software_node_notify(dev);
 
-       ret = software_node_notify(dev, action);
-       if (ret)
-               return ret;
-
-       if (platform_notify && action == KOBJ_ADD)
+       if (platform_notify)
                platform_notify(dev);
-       else if (platform_notify_remove && action == KOBJ_REMOVE)
+}
+
+static void device_platform_notify_remove(struct device *dev)
+{
+       acpi_device_notify_remove(dev);
+
+       software_node_notify_remove(dev);
+
+       if (platform_notify_remove)
                platform_notify_remove(dev);
-       return 0;
 }
 
 /**
@@ -2837,6 +2837,7 @@ void device_initialize(struct device *dev)
        device_pm_init(dev);
        set_dev_node(dev, -1);
 #ifdef CONFIG_GENERIC_MSI_IRQ
+       raw_spin_lock_init(&dev->msi_lock);
        INIT_LIST_HEAD(&dev->msi_list);
 #endif
        INIT_LIST_HEAD(&dev->links.consumers);
@@ -3291,9 +3292,7 @@ int device_add(struct device *dev)
        }
 
        /* notify platform of device entry */
-       error = device_platform_notify(dev, KOBJ_ADD);
-       if (error)
-               goto platform_error;
+       device_platform_notify(dev);
 
        error = device_create_file(dev, &dev_attr_uevent);
        if (error)
@@ -3396,8 +3395,7 @@ done:
  SymlinkError:
        device_remove_file(dev, &dev_attr_uevent);
  attrError:
-       device_platform_notify(dev, KOBJ_REMOVE);
-platform_error:
+       device_platform_notify_remove(dev);
        kobject_uevent(&dev->kobj, KOBJ_REMOVE);
        glue_dir = get_glue_dir(dev);
        kobject_del(&dev->kobj);
@@ -3542,7 +3540,7 @@ void device_del(struct device *dev)
        bus_remove_device(dev);
        device_pm_remove(dev);
        driver_deferred_probe_del(dev);
-       device_platform_notify(dev, KOBJ_REMOVE);
+       device_platform_notify_remove(dev);
        device_remove_properties(dev);
        device_links_purge(dev);
 
index daeb9b5..437cd61 100644 (file)
@@ -653,8 +653,6 @@ dev_groups_failed:
        else if (drv->remove)
                drv->remove(dev);
 probe_failed:
-       kfree(dev->dma_range_map);
-       dev->dma_range_map = NULL;
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -662,6 +660,8 @@ pinctrl_bind_failed:
        device_links_no_driver(dev);
        devres_release_all(dev);
        arch_teardown_dma_ops(dev);
+       kfree(dev->dma_range_map);
+       dev->dma_range_map = NULL;
        driver_sysfs_remove(dev);
        dev->driver = NULL;
        dev_set_drvdata(dev, NULL);
index 91899d1..d7d63c1 100644 (file)
@@ -89,12 +89,11 @@ static void __fw_load_abort(struct fw_priv *fw_priv)
 {
        /*
         * There is a small window in which user can write to 'loading'
-        * between loading done and disappearance of 'loading'
+        * between loading done/aborted and disappearance of 'loading'
         */
-       if (fw_sysfs_done(fw_priv))
+       if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
                return;
 
-       list_del_init(&fw_priv->pending_list);
        fw_state_aborted(fw_priv);
 }
 
@@ -280,7 +279,6 @@ static ssize_t firmware_loading_store(struct device *dev,
                         * Same logic as fw_load_abort, only the DONE bit
                         * is ignored and we set ABORT only on failure.
                         */
-                       list_del_init(&fw_priv->pending_list);
                        if (rc) {
                                fw_state_aborted(fw_priv);
                                written = rc;
@@ -513,6 +511,11 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
        }
 
        mutex_lock(&fw_lock);
+       if (fw_state_is_aborted(fw_priv)) {
+               mutex_unlock(&fw_lock);
+               retval = -EINTR;
+               goto out;
+       }
        list_add(&fw_priv->pending_list, &pending_fw_head);
        mutex_unlock(&fw_lock);
 
@@ -535,11 +538,10 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
        if (fw_state_is_aborted(fw_priv)) {
                if (retval == -ERESTARTSYS)
                        retval = -EINTR;
-               else
-                       retval = -EAGAIN;
        } else if (fw_priv->is_paged_buf && !fw_priv->data)
                retval = -ENOMEM;
 
+out:
        device_del(f_dev);
 err_put_dev:
        put_device(f_dev);
index 63bd29f..a3014e9 100644 (file)
@@ -117,8 +117,16 @@ static inline void __fw_state_set(struct fw_priv *fw_priv,
 
        WRITE_ONCE(fw_st->status, status);
 
-       if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+       if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+               /*
+                * Doing this here ensures that the fw_priv is deleted from
+                * the pending list in all abort/done paths.
+                */
+               list_del_init(&fw_priv->pending_list);
+#endif
                complete_all(&fw_st->completion);
+       }
 }
 
 static inline void fw_state_aborted(struct fw_priv *fw_priv)
index 4fdb821..68c549d 100644 (file)
@@ -783,8 +783,10 @@ static void fw_abort_batch_reqs(struct firmware *fw)
                return;
 
        fw_priv = fw->priv;
+       mutex_lock(&fw_lock);
        if (!fw_state_is_aborted(fw_priv))
                fw_state_aborted(fw_priv);
+       mutex_unlock(&fw_lock);
 }
 
 /* called from request_firmware() and request_firmware_work_func() */
index 0b72b13..3d6c8f9 100644 (file)
  * and the callback to write the MSI message.
  */
 struct platform_msi_priv_data {
-       struct device           *dev;
-       void                    *host_data;
-       msi_alloc_info_t        arg;
-       irq_write_msi_msg_t     write_msg;
-       int                     devid;
+       struct device                   *dev;
+       void                            *host_data;
+       const struct attribute_group    **msi_irq_groups;
+       msi_alloc_info_t                arg;
+       irq_write_msi_msg_t             write_msg;
+       int                             devid;
 };
 
 /* The devid allocator */
@@ -272,8 +273,16 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
        if (err)
                goto out_free_desc;
 
+       priv_data->msi_irq_groups = msi_populate_sysfs(dev);
+       if (IS_ERR(priv_data->msi_irq_groups)) {
+               err = PTR_ERR(priv_data->msi_irq_groups);
+               goto out_free_irqs;
+       }
+
        return 0;
 
+out_free_irqs:
+       msi_domain_free_irqs(dev->msi_domain, dev);
 out_free_desc:
        platform_msi_free_descs(dev, 0, nvec);
 out_free_priv_data:
@@ -293,6 +302,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
                struct msi_desc *desc;
 
                desc = first_msi_entry(dev);
+               msi_destroy_sysfs(dev, desc->platform.msi_priv_data->msi_irq_groups);
                platform_msi_free_priv_data(desc->platform.msi_priv_data);
        }
 
index a934c67..5db704f 100644 (file)
@@ -435,7 +435,7 @@ static void genpd_restore_performance_state(struct device *dev,
 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 {
        struct generic_pm_domain *genpd;
-       int ret;
+       int ret = 0;
 
        genpd = dev_to_genpd_safe(dev);
        if (!genpd)
@@ -446,7 +446,13 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
                return -EINVAL;
 
        genpd_lock(genpd);
-       ret = genpd_set_performance_state(dev, state);
+       if (pm_runtime_suspended(dev)) {
+               dev_gpd_data(dev)->rpm_pstate = state;
+       } else {
+               ret = genpd_set_performance_state(dev, state);
+               if (!ret)
+                       dev_gpd_data(dev)->rpm_pstate = 0;
+       }
        genpd_unlock(genpd);
 
        return ret;
@@ -2598,6 +2604,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
 
        dev_dbg(dev, "removing from PM domain %s\n", pd->name);
 
+       /* Drop the default performance state */
+       if (dev_gpd_data(dev)->default_pstate) {
+               dev_pm_genpd_set_performance_state(dev, 0);
+               dev_gpd_data(dev)->default_pstate = 0;
+       }
+
        for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
                ret = genpd_remove_device(pd, dev);
                if (ret != -EAGAIN)
@@ -2637,6 +2649,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
 {
        struct of_phandle_args pd_args;
        struct generic_pm_domain *pd;
+       int pstate;
        int ret;
 
        ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
@@ -2675,10 +2688,29 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
                genpd_unlock(pd);
        }
 
-       if (ret)
+       if (ret) {
                genpd_remove_device(pd, dev);
+               return -EPROBE_DEFER;
+       }
 
-       return ret ? -EPROBE_DEFER : 1;
+       /* Set the default performance state */
+       pstate = of_get_required_opp_performance_state(dev->of_node, index);
+       if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
+               ret = pstate;
+               goto err;
+       } else if (pstate > 0) {
+               ret = dev_pm_genpd_set_performance_state(dev, pstate);
+               if (ret)
+                       goto err;
+               dev_gpd_data(dev)->default_pstate = pstate;
+       }
+       return 1;
+
+err:
+       dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
+               pd->name, ret);
+       genpd_remove_device(pd, dev);
+       return ret;
 }
 
 /**
index d0874f6..453918e 100644 (file)
@@ -1033,7 +1033,26 @@ struct fwnode_handle *
 fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
                               struct fwnode_handle *prev)
 {
-       return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev);
+       const struct fwnode_handle *parent;
+       struct fwnode_handle *ep;
+
+       /*
+        * If this function is in a loop and the previous iteration returned
+        * an endpoint from fwnode->secondary, then we need to use the secondary
+        * as parent rather than @fwnode.
+        */
+       if (prev)
+               parent = fwnode_graph_get_port_parent(prev);
+       else
+               parent = fwnode;
+
+       ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
+
+       if (IS_ERR_OR_NULL(ep) &&
+           !IS_ERR_OR_NULL(parent) && !IS_ERR_OR_NULL(parent->secondary))
+               ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+
+       return ep;
 }
 EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
 
@@ -1212,14 +1231,7 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
                best_ep_id = fwnode_ep.id;
        }
 
-       if (best_ep)
-               return best_ep;
-
-       if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
-               return fwnode_graph_get_endpoint_by_id(fwnode->secondary, port,
-                                                      endpoint, flags);
-
-       return NULL;
+       return best_ep;
 }
 EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
 
index 0097696..b190591 100644 (file)
@@ -53,6 +53,10 @@ struct regmap {
                        spinlock_t spinlock;
                        unsigned long spinlock_flags;
                };
+               struct {
+                       raw_spinlock_t raw_spinlock;
+                       unsigned long raw_spinlock_flags;
+               };
        };
        regmap_lock lock;
        regmap_unlock unlock;
index 211a335..ad684d3 100644 (file)
@@ -368,7 +368,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
        char *buf;
        char *entry;
        int ret;
-       unsigned entry_len;
+       unsigned int entry_len;
 
        if (*ppos < 0 || !count)
                return -EINVAL;
index f9cd51a..71f16be 100644 (file)
@@ -15,7 +15,7 @@
 
 struct regmap_mmio_context {
        void __iomem *regs;
-       unsigned val_bytes;
+       unsigned int val_bytes;
        bool relaxed_mmio;
 
        bool attached_clk;
index fe3e38d..21a0c25 100644 (file)
@@ -533,6 +533,23 @@ __releases(&map->spinlock)
        spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
 }
 
+static void regmap_lock_raw_spinlock(void *__map)
+__acquires(&map->raw_spinlock)
+{
+       struct regmap *map = __map;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&map->raw_spinlock, flags);
+       map->raw_spinlock_flags = flags;
+}
+
+static void regmap_unlock_raw_spinlock(void *__map)
+__releases(&map->raw_spinlock)
+{
+       struct regmap *map = __map;
+       raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
+}
+
 static void dev_get_regmap_release(struct device *dev, void *res)
 {
        /*
@@ -770,11 +787,19 @@ struct regmap *__regmap_init(struct device *dev,
        } else {
                if ((bus && bus->fast_io) ||
                    config->fast_io) {
-                       spin_lock_init(&map->spinlock);
-                       map->lock = regmap_lock_spinlock;
-                       map->unlock = regmap_unlock_spinlock;
-                       lockdep_set_class_and_name(&map->spinlock,
-                                                  lock_key, lock_name);
+                       if (config->use_raw_spinlock) {
+                               raw_spin_lock_init(&map->raw_spinlock);
+                               map->lock = regmap_lock_raw_spinlock;
+                               map->unlock = regmap_unlock_raw_spinlock;
+                               lockdep_set_class_and_name(&map->raw_spinlock,
+                                                          lock_key, lock_name);
+                       } else {
+                               spin_lock_init(&map->spinlock);
+                               map->lock = regmap_lock_spinlock;
+                               map->unlock = regmap_unlock_spinlock;
+                               lockdep_set_class_and_name(&map->spinlock,
+                                                          lock_key, lock_name);
+                       }
                } else {
                        mutex_init(&map->mutex);
                        map->lock = regmap_lock_mutex;
@@ -1126,10 +1151,10 @@ skip_format_initialization:
                /* Make sure, that this register range has no selector
                   or data window within its boundary */
                for (j = 0; j < config->num_ranges; j++) {
-                       unsigned sel_reg = config->ranges[j].selector_reg;
-                       unsigned win_min = config->ranges[j].window_start;
-                       unsigned win_max = win_min +
-                                          config->ranges[j].window_len - 1;
+                       unsigned int sel_reg = config->ranges[j].selector_reg;
+                       unsigned int win_min = config->ranges[j].window_start;
+                       unsigned int win_max = win_min +
+                                              config->ranges[j].window_len - 1;
 
                        /* Allow data window inside its own virtual range */
                        if (j == i)
@@ -1298,7 +1323,7 @@ EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
  */
 int regmap_field_bulk_alloc(struct regmap *regmap,
                            struct regmap_field **rm_field,
-                           struct reg_field *reg_field,
+                           const struct reg_field *reg_field,
                            int num_fields)
 {
        struct regmap_field *rf;
@@ -1334,7 +1359,7 @@ EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
 int devm_regmap_field_bulk_alloc(struct device *dev,
                                 struct regmap *regmap,
                                 struct regmap_field **rm_field,
-                                struct reg_field *reg_field,
+                                const struct reg_field *reg_field,
                                 int num_fields)
 {
        struct regmap_field *rf;
@@ -1667,7 +1692,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
                        if (ret) {
                                dev_err(map->dev,
                                        "Error in caching of register: %x ret: %d\n",
-                                       reg + i, ret);
+                                       reg + regmap_get_offset(map, i), ret);
                                return ret;
                        }
                }
index d1f1a82..7bd0f3c 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/property.h>
 #include <linux/slab.h>
 
+#include "base.h"
+
 struct swnode {
        struct kobject kobj;
        struct fwnode_handle fwnode;
@@ -1053,7 +1055,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
         * balance.
         */
        if (device_is_registered(dev))
-               software_node_notify(dev, KOBJ_ADD);
+               software_node_notify(dev);
 
        return 0;
 }
@@ -1074,7 +1076,8 @@ void device_remove_software_node(struct device *dev)
                return;
 
        if (device_is_registered(dev))
-               software_node_notify(dev, KOBJ_REMOVE);
+               software_node_notify_remove(dev);
+
        set_secondary_fwnode(dev, NULL);
        kobject_put(&swnode->kobj);
 }
@@ -1117,44 +1120,44 @@ int device_create_managed_software_node(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(device_create_managed_software_node);
 
-int software_node_notify(struct device *dev, unsigned long action)
+void software_node_notify(struct device *dev)
 {
        struct swnode *swnode;
        int ret;
 
        swnode = dev_to_swnode(dev);
        if (!swnode)
-               return 0;
+               return;
 
-       switch (action) {
-       case KOBJ_ADD:
-               ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
-               if (ret)
-                       break;
+       ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
+       if (ret)
+               return;
 
-               ret = sysfs_create_link(&swnode->kobj, &dev->kobj,
-                                       dev_name(dev));
-               if (ret) {
-                       sysfs_remove_link(&dev->kobj, "software_node");
-                       break;
-               }
-               kobject_get(&swnode->kobj);
-               break;
-       case KOBJ_REMOVE:
-               sysfs_remove_link(&swnode->kobj, dev_name(dev));
+       ret = sysfs_create_link(&swnode->kobj, &dev->kobj, dev_name(dev));
+       if (ret) {
                sysfs_remove_link(&dev->kobj, "software_node");
-               kobject_put(&swnode->kobj);
-
-               if (swnode->managed) {
-                       set_secondary_fwnode(dev, NULL);
-                       kobject_put(&swnode->kobj);
-               }
-               break;
-       default:
-               break;
+               return;
        }
 
-       return 0;
+       kobject_get(&swnode->kobj);
+}
+
+void software_node_notify_remove(struct device *dev)
+{
+       struct swnode *swnode;
+
+       swnode = dev_to_swnode(dev);
+       if (!swnode)
+               return;
+
+       sysfs_remove_link(&swnode->kobj, dev_name(dev));
+       sysfs_remove_link(&dev->kobj, "software_node");
+       kobject_put(&swnode->kobj);
+
+       if (swnode->managed) {
+               set_secondary_fwnode(dev, NULL);
+               kobject_put(&swnode->kobj);
+       }
 }
 
 static int __init software_node_init(void)
index 63056cf..fbb3a55 100644 (file)
@@ -213,7 +213,7 @@ config BLK_DEV_LOOP_MIN_COUNT
          dynamically allocated with the /dev/loop-control interface.
 
 config BLK_DEV_CRYPTOLOOP
-       tristate "Cryptoloop Support"
+       tristate "Cryptoloop Support (DEPRECATED)"
        select CRYPTO
        select CRYPTO_CBC
        depends on BLK_DEV_LOOP
@@ -225,7 +225,7 @@ config BLK_DEV_CRYPTOLOOP
          WARNING: This device is not safe for journaled file systems like
          ext3 or Reiserfs. Please use the Device Mapper crypto module
          instead, which can be configured to be on-disk compatible with the
-         cryptoloop device.
+         cryptoloop device.  cryptoloop support will be removed in Linux 5.16.
 
 source "drivers/block/drbd/Kconfig"
 
index 9569411..58ec167 100644 (file)
@@ -27,9 +27,6 @@
 
 #include <linux/uaccess.h>
 
-#define PAGE_SECTORS_SHIFT     (PAGE_SHIFT - SECTOR_SHIFT)
-#define PAGE_SECTORS           (1 << PAGE_SECTORS_SHIFT)
-
 /*
  * Each block ramdisk device has a radix_tree brd_pages of pages that stores
  * the pages containing the block device's contents. A brd page's ->index is
index 3cabc33..f0a91fa 100644 (file)
@@ -189,6 +189,8 @@ init_cryptoloop(void)
 
        if (rc)
                printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
+       else
+               pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
        return rc;
 }
 
index e7d0e63..44ccf8b 100644 (file)
@@ -1364,7 +1364,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
 
        if (b) {
                blk_stack_limits(&q->limits, &b->limits, 0);
-               blk_queue_update_readahead(q);
+               disk_update_readahead(device->vdisk);
        }
        fixup_discard_if_not_supported(q);
        fixup_write_zeroes(device, q);
index 13beb98..5ca2336 100644 (file)
@@ -905,13 +905,12 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
 static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
                enum drbd_read_balancing rbm)
 {
-       struct backing_dev_info *bdi;
        int stripe_shift;
 
        switch (rbm) {
        case RB_CONGESTED_REMOTE:
-               bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
-               return bdi_read_congested(bdi);
+               return bdi_read_congested(
+                       device->ldev->backing_bdev->bd_disk->bdi);
        case RB_LEAST_PENDING:
                return atomic_read(&device->local_cnt) >
                        atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
index 87460e0..fef79ea 100644 (file)
@@ -4029,23 +4029,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
        if (fdc_state[FDC(drive)].rawcmd == 1)
                fdc_state[FDC(drive)].rawcmd = 2;
 
-       if (mode & (FMODE_READ|FMODE_WRITE)) {
-               drive_state[drive].last_checked = 0;
-               clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
-               if (bdev_check_media_change(bdev))
-                       floppy_revalidate(bdev->bd_disk);
-               if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
-                       goto out;
-               if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+       if (!(mode & FMODE_NDELAY)) {
+               if (mode & (FMODE_READ|FMODE_WRITE)) {
+                       drive_state[drive].last_checked = 0;
+                       clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
+                                 &drive_state[drive].flags);
+                       if (bdev_check_media_change(bdev))
+                               floppy_revalidate(bdev->bd_disk);
+                       if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
+                               goto out;
+                       if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+                               goto out;
+               }
+               res = -EROFS;
+               if ((mode & FMODE_WRITE) &&
+                   !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
                        goto out;
        }
-
-       res = -EROFS;
-
-       if ((mode & FMODE_WRITE) &&
-                       !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
-               goto out;
-
        mutex_unlock(&open_lock);
        mutex_unlock(&floppy_mutex);
        return 0;
index f0cdff0..fa1c298 100644 (file)
@@ -774,6 +774,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
                goto out_err;
 
        /* and ... switch */
+       disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
        blk_mq_freeze_queue(lo->lo_queue);
        mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
        lo->lo_backing_file = file;
@@ -1257,6 +1258,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                goto out_unlock;
        }
 
+       disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
        set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
        INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
@@ -1304,10 +1306,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        if (partscan)
                lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
 
-       /* Grab the block_device to prevent its destruction after we
-        * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
-        */
-       bdgrab(bdev);
        loop_global_unlock(lo, is_loop);
        if (partscan)
                loop_reread_partitions(lo);
@@ -1398,7 +1396,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
        blk_queue_physical_block_size(lo->lo_queue, 512);
        blk_queue_io_min(lo->lo_queue, 512);
        if (bdev) {
-               bdput(bdev);
                invalidate_bdev(bdev);
                bdev->bd_inode->i_mapping->wb_err = 0;
        }
@@ -1415,6 +1412,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
 
        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
        lo_number = lo->lo_number;
+       disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
 out_unlock:
        mutex_unlock(&lo->lo_mutex);
        if (partscan) {
@@ -2335,7 +2333,8 @@ static int loop_add(int i)
        lo->tag_set.queue_depth = 128;
        lo->tag_set.numa_node = NUMA_NO_NODE;
        lo->tag_set.cmd_size = sizeof(struct loop_cmd);
-       lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
+       lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
+               BLK_MQ_F_NO_SCHED_BY_DEFAULT;
        lo->tag_set.driver_data = lo;
 
        err = blk_mq_alloc_tag_set(&lo->tag_set);
@@ -2391,6 +2390,8 @@ static int loop_add(int i)
        disk->fops              = &lo_fops;
        disk->private_data      = lo;
        disk->queue             = lo->lo_queue;
+       disk->events            = DISK_EVENT_MEDIA_CHANGE;
+       disk->event_flags       = DISK_EVENT_FLAG_UEVENT;
        sprintf(disk->disk_name, "loop%d", i);
        add_disk(disk);
        mutex_unlock(&loop_ctl_mutex);
index 7b4dd10..c84be00 100644 (file)
@@ -74,7 +74,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
 
        n64cart_wait_dma();
 
-       n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset);
+       n64cart_write_reg(PI_DRAM_REG, dma_addr);
        n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX);
        n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1);
 
index c383179..5170a63 100644 (file)
@@ -49,6 +49,7 @@
 
 static DEFINE_IDR(nbd_index_idr);
 static DEFINE_MUTEX(nbd_index_mutex);
+static struct workqueue_struct *nbd_del_wq;
 static int nbd_total_devices = 0;
 
 struct nbd_sock {
@@ -113,12 +114,12 @@ struct nbd_device {
        struct mutex config_lock;
        struct gendisk *disk;
        struct workqueue_struct *recv_workq;
+       struct work_struct remove_work;
 
        struct list_head list;
        struct task_struct *task_recv;
        struct task_struct *task_setup;
 
-       struct completion *destroy_complete;
        unsigned long flags;
 
        char *backend;
@@ -237,32 +238,36 @@ static void nbd_dev_remove(struct nbd_device *nbd)
 {
        struct gendisk *disk = nbd->disk;
 
-       if (disk) {
-               del_gendisk(disk);
-               blk_cleanup_disk(disk);
-               blk_mq_free_tag_set(&nbd->tag_set);
-       }
+       del_gendisk(disk);
+       blk_cleanup_disk(disk);
+       blk_mq_free_tag_set(&nbd->tag_set);
 
        /*
-        * Place this in the last just before the nbd is freed to
-        * make sure that the disk and the related kobject are also
-        * totally removed to avoid duplicate creation of the same
-        * one.
+        * Remove from idr after del_gendisk() completes, so if the same ID is
+        * reused, the following add_disk() will succeed.
         */
-       if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
-               complete(nbd->destroy_complete);
+       mutex_lock(&nbd_index_mutex);
+       idr_remove(&nbd_index_idr, nbd->index);
+       mutex_unlock(&nbd_index_mutex);
 
        kfree(nbd);
 }
 
+static void nbd_dev_remove_work(struct work_struct *work)
+{
+       nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
+}
+
 static void nbd_put(struct nbd_device *nbd)
 {
-       if (refcount_dec_and_mutex_lock(&nbd->refs,
-                                       &nbd_index_mutex)) {
-               idr_remove(&nbd_index_idr, nbd->index);
+       if (!refcount_dec_and_test(&nbd->refs))
+               return;
+
+       /* Call del_gendisk() asynchrounously to prevent deadlock */
+       if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
+               queue_work(nbd_del_wq, &nbd->remove_work);
+       else
                nbd_dev_remove(nbd);
-               mutex_unlock(&nbd_index_mutex);
-       }
 }
 
 static int nbd_disconnected(struct nbd_config *config)
@@ -818,6 +823,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 
+       /* don't abort one completed request */
+       if (blk_mq_request_completed(req))
+               return true;
+
        mutex_lock(&cmd->lock);
        cmd->status = BLK_STS_IOERR;
        mutex_unlock(&cmd->lock);
@@ -1384,6 +1393,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                       unsigned int cmd, unsigned long arg)
 {
        struct nbd_config *config = nbd->config;
+       loff_t bytesize;
 
        switch (cmd) {
        case NBD_DISCONNECT:
@@ -1398,8 +1408,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_SIZE:
                return nbd_set_size(nbd, arg, config->blksize);
        case NBD_SET_SIZE_BLOCKS:
-               return nbd_set_size(nbd, arg * config->blksize,
-                                   config->blksize);
+               if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
+                       return -EINVAL;
+               return nbd_set_size(nbd, bytesize, config->blksize);
        case NBD_SET_TIMEOUT:
                nbd_set_cmd_timeout(nbd, arg);
                return 0;
@@ -1661,7 +1672,7 @@ static const struct blk_mq_ops nbd_mq_ops = {
        .timeout        = nbd_xmit_timeout,
 };
 
-static int nbd_dev_add(int index)
+static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
 {
        struct nbd_device *nbd;
        struct gendisk *disk;
@@ -1679,13 +1690,14 @@ static int nbd_dev_add(int index)
        nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
                BLK_MQ_F_BLOCKING;
        nbd->tag_set.driver_data = nbd;
-       nbd->destroy_complete = NULL;
+       INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
        nbd->backend = NULL;
 
        err = blk_mq_alloc_tag_set(&nbd->tag_set);
        if (err)
                goto out_free_nbd;
 
+       mutex_lock(&nbd_index_mutex);
        if (index >= 0) {
                err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
                                GFP_KERNEL);
@@ -1696,9 +1708,10 @@ static int nbd_dev_add(int index)
                if (err >= 0)
                        index = err;
        }
+       nbd->index = index;
+       mutex_unlock(&nbd_index_mutex);
        if (err < 0)
                goto out_free_tags;
-       nbd->index = index;
 
        disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
        if (IS_ERR(disk)) {
@@ -1722,38 +1735,65 @@ static int nbd_dev_add(int index)
 
        mutex_init(&nbd->config_lock);
        refcount_set(&nbd->config_refs, 0);
-       refcount_set(&nbd->refs, 1);
+       /*
+        * Start out with a zero references to keep other threads from using
+        * this device until it is fully initialized.
+        */
+       refcount_set(&nbd->refs, 0);
        INIT_LIST_HEAD(&nbd->list);
        disk->major = NBD_MAJOR;
+
+       /* Too big first_minor can cause duplicate creation of
+        * sysfs files/links, since first_minor will be truncated to
+        * byte in __device_add_disk().
+        */
        disk->first_minor = index << part_shift;
+       if (disk->first_minor > 0xff) {
+               err = -EINVAL;
+               goto out_free_idr;
+       }
+
        disk->minors = 1 << part_shift;
        disk->fops = &nbd_fops;
        disk->private_data = nbd;
        sprintf(disk->disk_name, "nbd%d", index);
        add_disk(disk);
+
+       /*
+        * Now publish the device.
+        */
+       refcount_set(&nbd->refs, refs);
        nbd_total_devices++;
-       return index;
+       return nbd;
 
 out_free_idr:
+       mutex_lock(&nbd_index_mutex);
        idr_remove(&nbd_index_idr, index);
+       mutex_unlock(&nbd_index_mutex);
 out_free_tags:
        blk_mq_free_tag_set(&nbd->tag_set);
 out_free_nbd:
        kfree(nbd);
 out:
-       return err;
+       return ERR_PTR(err);
 }
 
-static int find_free_cb(int id, void *ptr, void *data)
+static struct nbd_device *nbd_find_get_unused(void)
 {
-       struct nbd_device *nbd = ptr;
-       struct nbd_device **found = data;
+       struct nbd_device *nbd;
+       int id;
 
-       if (!refcount_read(&nbd->config_refs)) {
-               *found = nbd;
-               return 1;
+       lockdep_assert_held(&nbd_index_mutex);
+
+       idr_for_each_entry(&nbd_index_idr, nbd, id) {
+               if (refcount_read(&nbd->config_refs) ||
+                   test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
+                       continue;
+               if (refcount_inc_not_zero(&nbd->refs))
+                       return nbd;
        }
-       return 0;
+
+       return NULL;
 }
 
 /* Netlink interface. */
@@ -1802,8 +1842,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
 
 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
 {
-       DECLARE_COMPLETION_ONSTACK(destroy_complete);
-       struct nbd_device *nbd = NULL;
+       struct nbd_device *nbd;
        struct nbd_config *config;
        int index = -1;
        int ret;
@@ -1825,55 +1864,29 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
 again:
        mutex_lock(&nbd_index_mutex);
        if (index == -1) {
-               ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
-               if (ret == 0) {
-                       int new_index;
-                       new_index = nbd_dev_add(-1);
-                       if (new_index < 0) {
-                               mutex_unlock(&nbd_index_mutex);
-                               printk(KERN_ERR "nbd: failed to add new device\n");
-                               return new_index;
-                       }
-                       nbd = idr_find(&nbd_index_idr, new_index);
-               }
+               nbd = nbd_find_get_unused();
        } else {
                nbd = idr_find(&nbd_index_idr, index);
-               if (!nbd) {
-                       ret = nbd_dev_add(index);
-                       if (ret < 0) {
+               if (nbd) {
+                       if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
+                            test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
+                           !refcount_inc_not_zero(&nbd->refs)) {
                                mutex_unlock(&nbd_index_mutex);
-                               printk(KERN_ERR "nbd: failed to add new device\n");
-                               return ret;
+                               pr_err("nbd: device at index %d is going down\n",
+                                       index);
+                               return -EINVAL;
                        }
-                       nbd = idr_find(&nbd_index_idr, index);
                }
        }
-       if (!nbd) {
-               printk(KERN_ERR "nbd: couldn't find device at index %d\n",
-                      index);
-               mutex_unlock(&nbd_index_mutex);
-               return -EINVAL;
-       }
-
-       if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
-           test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
-               nbd->destroy_complete = &destroy_complete;
-               mutex_unlock(&nbd_index_mutex);
-
-               /* Wait untill the the nbd stuff is totally destroyed */
-               wait_for_completion(&destroy_complete);
-               goto again;
-       }
+       mutex_unlock(&nbd_index_mutex);
 
-       if (!refcount_inc_not_zero(&nbd->refs)) {
-               mutex_unlock(&nbd_index_mutex);
-               if (index == -1)
-                       goto again;
-               printk(KERN_ERR "nbd: device at index %d is going down\n",
-                      index);
-               return -EINVAL;
+       if (!nbd) {
+               nbd = nbd_dev_add(index, 2);
+               if (IS_ERR(nbd)) {
+                       pr_err("nbd: failed to add new device\n");
+                       return PTR_ERR(nbd);
+               }
        }
-       mutex_unlock(&nbd_index_mutex);
 
        mutex_lock(&nbd->config_lock);
        if (refcount_read(&nbd->config_refs)) {
@@ -2004,15 +2017,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
 {
        mutex_lock(&nbd->config_lock);
        nbd_disconnect(nbd);
-       nbd_clear_sock(nbd);
-       mutex_unlock(&nbd->config_lock);
+       sock_shutdown(nbd);
        /*
         * Make sure recv thread has finished, so it does not drop the last
         * config ref and try to destroy the workqueue from inside the work
-        * queue.
+        * queue. And this also ensure that we can safely call nbd_clear_que()
+        * to cancel the inflight I/Os.
         */
        if (nbd->recv_workq)
                flush_workqueue(nbd->recv_workq);
+       nbd_clear_que(nbd);
+       nbd->task_setup = NULL;
+       mutex_unlock(&nbd->config_lock);
+
        if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
@@ -2416,16 +2433,21 @@ static int __init nbd_init(void)
        if (register_blkdev(NBD_MAJOR, "nbd"))
                return -EIO;
 
+       nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
+       if (!nbd_del_wq) {
+               unregister_blkdev(NBD_MAJOR, "nbd");
+               return -ENOMEM;
+       }
+
        if (genl_register_family(&nbd_genl_family)) {
+               destroy_workqueue(nbd_del_wq);
                unregister_blkdev(NBD_MAJOR, "nbd");
                return -EINVAL;
        }
        nbd_dbg_init();
 
-       mutex_lock(&nbd_index_mutex);
        for (i = 0; i < nbds_max; i++)
-               nbd_dev_add(i);
-       mutex_unlock(&nbd_index_mutex);
+               nbd_dev_add(i, 1);
        return 0;
 }
 
@@ -2434,7 +2456,10 @@ static int nbd_exit_cb(int id, void *ptr, void *data)
        struct list_head *list = (struct list_head *)data;
        struct nbd_device *nbd = ptr;
 
-       list_add_tail(&nbd->list, list);
+       /* Skip nbd that is being removed asynchronously */
+       if (refcount_read(&nbd->refs))
+               list_add_tail(&nbd->list, list);
+
        return 0;
 }
 
@@ -2457,6 +2482,9 @@ static void __exit nbd_cleanup(void)
                nbd_put(nbd);
        }
 
+       /* Also wait for nbd_dev_remove_work() completes */
+       destroy_workqueue(nbd_del_wq);
+
        idr_destroy(&nbd_index_idr);
        genl_unregister_family(&nbd_genl_family);
        unregister_blkdev(NBD_MAJOR, "nbd");
index d734e9e..187d779 100644 (file)
 #include <linux/init.h>
 #include "null_blk.h"
 
-#define PAGE_SECTORS_SHIFT     (PAGE_SHIFT - SECTOR_SHIFT)
-#define PAGE_SECTORS           (1 << PAGE_SECTORS_SHIFT)
-#define SECTOR_MASK            (PAGE_SECTORS - 1)
-
 #define FREE_BATCH             16
 
 #define TICKS_PER_SEC          50ULL
@@ -1721,8 +1717,7 @@ static int null_gendisk_register(struct nullb *nullb)
                        return ret;
        }
 
-       add_disk(disk);
-       return 0;
+       return add_disk(disk);
 }
 
 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
index 9b32989..675327d 100644 (file)
@@ -892,7 +892,7 @@ static void pd_probe_drive(struct pd_unit *disk)
                return;
 
        p = blk_mq_alloc_disk(&disk->tag_set, disk);
-       if (!p) {
+       if (IS_ERR(p)) {
                blk_mq_free_tag_set(&disk->tag_set);
                return;
        }
index 538446b..0f26b25 100644 (file)
@@ -1183,10 +1183,8 @@ try_next_bio:
        wakeup = (pd->write_congestion_on > 0
                        && pd->bio_queue_size <= pd->write_congestion_off);
        spin_unlock(&pd->lock);
-       if (wakeup) {
-               clear_bdi_congested(pd->disk->queue->backing_dev_info,
-                                       BLK_RW_ASYNC);
-       }
+       if (wakeup)
+               clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
 
        pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
        pkt_set_state(pkt, PACKET_WAITING_STATE);
@@ -2366,7 +2364,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
        spin_lock(&pd->lock);
        if (pd->write_congestion_on > 0
            && pd->bio_queue_size >= pd->write_congestion_on) {
-               set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
+               set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC);
                do {
                        spin_unlock(&pd->lock);
                        congestion_wait(BLK_RW_ASYNC, HZ);
index f374ea2..8d51efb 100644 (file)
@@ -83,26 +83,12 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
        unsigned int offset = 0;
        struct req_iterator iter;
        struct bio_vec bvec;
-       unsigned int i = 0;
-       size_t size;
-       void *buf;
 
        rq_for_each_segment(bvec, req, iter) {
-               unsigned long flags;
-               dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %llu\n",
-                       __func__, __LINE__, i, bio_sectors(iter.bio),
-                       iter.bio->bi_iter.bi_sector);
-
-               size = bvec.bv_len;
-               buf = bvec_kmap_irq(&bvec, &flags);
                if (gather)
-                       memcpy(dev->bounce_buf+offset, buf, size);
+                       memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
                else
-                       memcpy(buf, dev->bounce_buf+offset, size);
-               offset += size;
-               flush_kernel_dcache_page(bvec.bv_page);
-               bvec_kunmap_irq(buf, &flags);
-               i++;
+                       memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
        }
 }
 
index 7fbf469..c7b19e1 100644 (file)
@@ -541,7 +541,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
 
        bio_for_each_segment(bvec, bio, iter) {
                /* PS3 is ppc64, so we don't handle highmem */
-               char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+               char *ptr = bvec_virt(&bvec);
                size_t len = bvec.bv_len, retlen;
 
                dev_dbg(&dev->core, "    %s %zu bytes at offset %llu\n", op,
index 90b947c..e65c9d7 100644 (file)
@@ -1219,24 +1219,13 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
        rbd_dev->mapping.size = 0;
 }
 
-static void zero_bvec(struct bio_vec *bv)
-{
-       void *buf;
-       unsigned long flags;
-
-       buf = bvec_kmap_irq(bv, &flags);
-       memset(buf, 0, bv->bv_len);
-       flush_dcache_page(bv->bv_page);
-       bvec_kunmap_irq(buf, &flags);
-}
-
 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
 {
        struct ceph_bio_iter it = *bio_pos;
 
        ceph_bio_iter_advance(&it, off);
        ceph_bio_iter_advance_step(&it, bytes, ({
-               zero_bvec(&bv);
+               memzero_bvec(&bv);
        }));
 }
 
@@ -1246,7 +1235,7 @@ static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
 
        ceph_bvec_iter_advance(&it, off);
        ceph_bvec_iter_advance_step(&it, bytes, ({
-               zero_bvec(&bv);
+               memzero_bvec(&bv);
        }));
 }
 
@@ -2997,8 +2986,7 @@ static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
        };
 
        ceph_bvec_iter_advance_step(&it, bytes, ({
-               if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
-                              bv.bv_len))
+               if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
                        return false;
        }));
        return true;
index 324afdd..4b93fd8 100644 (file)
@@ -227,17 +227,17 @@ static ssize_t state_show(struct kobject *kobj,
 
        switch (dev->dev_state) {
        case DEV_STATE_INIT:
-               return snprintf(page, PAGE_SIZE, "init\n");
+               return sysfs_emit(page, "init\n");
        case DEV_STATE_MAPPED:
                /* TODO fix cli tool before changing to proper state */
-               return snprintf(page, PAGE_SIZE, "open\n");
+               return sysfs_emit(page, "open\n");
        case DEV_STATE_MAPPED_DISCONNECTED:
                /* TODO fix cli tool before changing to proper state */
-               return snprintf(page, PAGE_SIZE, "closed\n");
+               return sysfs_emit(page, "closed\n");
        case DEV_STATE_UNMAPPED:
-               return snprintf(page, PAGE_SIZE, "unmapped\n");
+               return sysfs_emit(page, "unmapped\n");
        default:
-               return snprintf(page, PAGE_SIZE, "unknown\n");
+               return sysfs_emit(page, "unknown\n");
        }
 }
 
@@ -263,7 +263,7 @@ static ssize_t mapping_path_show(struct kobject *kobj,
 
        dev = container_of(kobj, struct rnbd_clt_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n", dev->pathname);
+       return sysfs_emit(page, "%s\n", dev->pathname);
 }
 
 static struct kobj_attribute rnbd_clt_mapping_path_attr =
@@ -276,8 +276,7 @@ static ssize_t access_mode_show(struct kobject *kobj,
 
        dev = container_of(kobj, struct rnbd_clt_dev, kobj);
 
-       return snprintf(page, PAGE_SIZE, "%s\n",
-                       rnbd_access_mode_str(dev->access_mode));
+       return sysfs_emit(page, "%s\n", rnbd_access_mode_str(dev->access_mode));
 }
 
 static struct kobj_attribute rnbd_clt_access_mode =
@@ -286,8 +285,8 @@ static struct kobj_attribute rnbd_clt_access_mode =
 static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *page)
 {
-       return scnprintf(page, PAGE_SIZE, "Usage: echo <normal|force> > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo <normal|force> > %s\n",
+                         attr->attr.name);
 }
 
 static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
@@ -357,9 +356,8 @@ static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
                                         struct kobj_attribute *attr,
                                         char *page)
 {
-       return scnprintf(page, PAGE_SIZE,
-                        "Usage: echo <new size in sectors> > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo <new size in sectors> > %s\n",
+                         attr->attr.name);
 }
 
 static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
@@ -390,8 +388,7 @@ static struct kobj_attribute rnbd_clt_resize_dev_attr =
 static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *page)
 {
-       return scnprintf(page, PAGE_SIZE, "Usage: echo <1> > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo <1> > %s\n", attr->attr.name);
 }
 
 static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
@@ -436,7 +433,7 @@ static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
 
        dev = container_of(kobj, struct rnbd_clt_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n", dev->sess->sessname);
+       return sysfs_emit(page, "%s\n", dev->sess->sessname);
 }
 
 static struct kobj_attribute rnbd_clt_session_attr =
@@ -499,8 +496,8 @@ static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
                                         struct kobj_attribute *attr,
                                         char *page)
 {
-       return scnprintf(page, PAGE_SIZE,
-                        "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
+       return sysfs_emit(page,
+                         "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
                         attr->attr.name);
 }
 
index e9cc413..bd4a41a 100644 (file)
@@ -271,7 +271,7 @@ unlock:
         */
        if (cpu_q)
                *cpup = cpu_q->cpu;
-       put_cpu_var(sess->cpu_rr);
+       put_cpu_ptr(sess->cpu_rr);
 
        if (q)
                rnbd_clt_dev_requeue(q);
index acf5fce..4db98e0 100644 (file)
@@ -90,8 +90,8 @@ static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr,
 
        sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%d\n",
-                        !(sess_dev->open_flags & FMODE_WRITE));
+       return sysfs_emit(page, "%d\n",
+                         !(sess_dev->open_flags & FMODE_WRITE));
 }
 
 static struct kobj_attribute rnbd_srv_dev_session_ro_attr =
@@ -105,8 +105,8 @@ static ssize_t access_mode_show(struct kobject *kobj,
 
        sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n",
-                        rnbd_access_mode_str(sess_dev->access_mode));
+       return sysfs_emit(page, "%s\n",
+                         rnbd_access_mode_str(sess_dev->access_mode));
 }
 
 static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr =
@@ -119,7 +119,7 @@ static ssize_t mapping_path_show(struct kobject *kobj,
 
        sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n", sess_dev->pathname);
+       return sysfs_emit(page, "%s\n", sess_dev->pathname);
 }
 
 static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
@@ -128,8 +128,8 @@ static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
 static ssize_t rnbd_srv_dev_session_force_close_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *page)
 {
-       return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo 1 > %s\n",
+                         attr->attr.name);
 }
 
 static ssize_t rnbd_srv_dev_session_force_close_store(struct kobject *kobj,
index 7b54353..420cd95 100644 (file)
@@ -1373,7 +1373,7 @@ static void carm_free_disk(struct carm_host *host, unsigned int port_no)
        if (!disk)
                return;
 
-       if (disk->flags & GENHD_FL_UP)
+       if (host->state > HST_DEV_ACTIVATE)
                del_gendisk(disk);
        blk_cleanup_disk(disk);
 }
index 4b49df2..57c6ae7 100644 (file)
@@ -166,11 +166,8 @@ static inline void virtblk_request_done(struct request *req)
 {
        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
 
-       if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-               kfree(page_address(req->special_vec.bv_page) +
-                     req->special_vec.bv_offset);
-       }
-
+       if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
+               kfree(bvec_virt(&req->special_vec));
        blk_mq_end_request(req, virtblk_result(vbr));
 }
 
@@ -692,6 +689,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
 static unsigned int virtblk_queue_depth;
 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
 
+static int virtblk_validate(struct virtio_device *vdev)
+{
+       u32 blk_size;
+
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
+               return 0;
+
+       blk_size = virtio_cread32(vdev,
+                       offsetof(struct virtio_blk_config, blk_size));
+
+       if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
+               __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
+
+       return 0;
+}
+
 static int virtblk_probe(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk;
@@ -703,12 +722,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        u8 physical_block_exp, alignment_offset;
        unsigned int queue_depth;
 
-       if (!vdev->config->get) {
-               dev_err(&vdev->dev, "%s failure: config access disabled\n",
-                       __func__);
-               return -EINVAL;
-       }
-
        err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
                             GFP_KERNEL);
        if (err < 0)
@@ -823,6 +836,14 @@ static int virtblk_probe(struct virtio_device *vdev)
        else
                blk_size = queue_logical_block_size(q);
 
+       if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+               dev_err(&vdev->dev,
+                       "block size is changed unexpectedly, now is %u\n",
+                       blk_size);
+               err = -EINVAL;
+               goto out_cleanup_disk;
+       }
+
        /* Use topology information if available */
        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
                                   struct virtio_blk_config, physical_block_exp,
@@ -878,9 +899,14 @@ static int virtblk_probe(struct virtio_device *vdev)
        virtblk_update_capacity(vblk, false);
        virtio_device_ready(vdev);
 
-       device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
+       err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
+       if (err)
+               goto out_cleanup_disk;
+
        return 0;
 
+out_cleanup_disk:
+       blk_cleanup_disk(vblk->disk);
 out_free_tags:
        blk_mq_free_tag_set(&vblk->tag_set);
 out_free_vq:
@@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
        .driver.name                    = KBUILD_MODNAME,
        .driver.owner                   = THIS_MODULE,
        .id_table                       = id_table,
+       .validate                       = virtblk_validate,
        .probe                          = virtblk_probe,
        .remove                         = virtblk_remove,
        .config_changed                 = virtblk_config_changed,
index d83fee2..715bfa8 100644 (file)
@@ -1092,7 +1092,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        err = xlbd_reserve_minors(minor, nr_minors);
        if (err)
                return err;
-       err = -ENODEV;
 
        memset(&info->tag_set, 0, sizeof(info->tag_set));
        info->tag_set.ops = &blkfront_mq_ops;
index 38cb116..148a4dd 100644 (file)
@@ -100,6 +100,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
  * @cookie: data used by legacy platform callbacks
  * @name: name if available
  * @revision: interconnect target module revision
+ * @reserved: target module is reserved and already in use
  * @enabled: sysc runtime enabled status
  * @needs_resume: runtime resume needed on resume from suspend
  * @child_needs_resume: runtime resume needed for child on resume from suspend
@@ -130,6 +131,7 @@ struct sysc {
        struct ti_sysc_cookie cookie;
        const char *name;
        u32 revision;
+       unsigned int reserved:1;
        unsigned int enabled:1;
        unsigned int needs_resume:1;
        unsigned int child_needs_resume:1;
@@ -2951,6 +2953,8 @@ static int sysc_init_soc(struct sysc *ddata)
                case SOC_3430 ... SOC_3630:
                        sysc_add_disabled(0x48304000);  /* timer12 */
                        break;
+               case SOC_AM3:
+                       sysc_add_disabled(0x48310000);  /* rng */
                default:
                        break;
                }
@@ -3093,7 +3097,9 @@ static int sysc_probe(struct platform_device *pdev)
                return error;
 
        error = sysc_check_active_timer(ddata);
-       if (error)
+       if (error == -ENXIO)
+               ddata->reserved = true;
+       else if (error)
                return error;
 
        error = sysc_get_clocks(ddata);
@@ -3130,11 +3136,15 @@ static int sysc_probe(struct platform_device *pdev)
        sysc_show_registers(ddata);
 
        ddata->dev->type = &sysc_device_type;
-       error = of_platform_populate(ddata->dev->of_node, sysc_match_table,
-                                    pdata ? pdata->auxdata : NULL,
-                                    ddata->dev);
-       if (error)
-               goto err;
+
+       if (!ddata->reserved) {
+               error = of_platform_populate(ddata->dev->of_node,
+                                            sysc_match_table,
+                                            pdata ? pdata->auxdata : NULL,
+                                            ddata->dev);
+               if (error)
+                       goto err;
+       }
 
        INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
 
index 3f166c8..239eca4 100644 (file)
@@ -524,6 +524,20 @@ config HW_RANDOM_XIPHERA
          To compile this driver as a module, choose M here: the
          module will be called xiphera-trng.
 
+config HW_RANDOM_ARM_SMCCC_TRNG
+       tristate "Arm SMCCC TRNG firmware interface support"
+       depends on HAVE_ARM_SMCCC_DISCOVERY
+       default HW_RANDOM
+       help
+         Say 'Y' to enable the True Random Number Generator driver using
+         the Arm SMCCC TRNG firmware interface. This reads entropy from
+         higher exception levels (firmware, hypervisor). Uses SMCCC for
+         communicating with the firmware:
+         https://developer.arm.com/documentation/den0098/latest/
+
+         To compile this driver as a module, choose M here: the
+         module will be called arm_smccc_trng.
+
 endif # HW_RANDOM
 
 config UML_RANDOM
index 8933fad..a5a1c76 100644 (file)
@@ -45,3 +45,4 @@ obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
 obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
 obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
 obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
+obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
index d8d4ef5..c22d418 100644 (file)
@@ -124,7 +124,7 @@ static struct hwrng amd_rng = {
        .read           = amd_rng_read,
 };
 
-static int __init mod_init(void)
+static int __init amd_rng_mod_init(void)
 {
        int err;
        struct pci_dev *pdev = NULL;
@@ -188,7 +188,7 @@ out:
        return err;
 }
 
-static void __exit mod_exit(void)
+static void __exit amd_rng_mod_exit(void)
 {
        struct amd768_priv *priv;
 
@@ -203,8 +203,8 @@ static void __exit mod_exit(void)
        kfree(priv);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(amd_rng_mod_init);
+module_exit(amd_rng_mod_exit);
 
 MODULE_AUTHOR("The Linux Kernel team");
 MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
new file mode 100644 (file)
index 0000000..b24ac39
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Randomness driver for the ARM SMCCC TRNG Firmware Interface
+ * https://developer.arm.com/documentation/den0098/latest/
+ *
+ *  Copyright (C) 2020 Arm Ltd.
+ *
+ * The ARM TRNG firmware interface specifies a protocol to read entropy
+ * from a higher exception level, to abstract from any machine specific
+ * implemenations and allow easier use in hypervisors.
+ *
+ * The firmware interface is realised using the SMCCC specification.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+
+#ifdef CONFIG_ARM64
+#define ARM_SMCCC_TRNG_RND     ARM_SMCCC_TRNG_RND64
+#define MAX_BITS_PER_CALL      (3 * 64UL)
+#else
+#define ARM_SMCCC_TRNG_RND     ARM_SMCCC_TRNG_RND32
+#define MAX_BITS_PER_CALL      (3 * 32UL)
+#endif
+
+/* We don't want to allow the firmware to stall us forever. */
+#define SMCCC_TRNG_MAX_TRIES   20
+
+#define SMCCC_RET_TRNG_INVALID_PARAMETER       -2
+#define SMCCC_RET_TRNG_NO_ENTROPY              -3
+
+static int copy_from_registers(char *buf, struct arm_smccc_res *res,
+                              size_t bytes)
+{
+       unsigned int chunk, copied;
+
+       if (bytes == 0)
+               return 0;
+
+       chunk = min(bytes, sizeof(long));
+       memcpy(buf, &res->a3, chunk);
+       copied = chunk;
+       if (copied >= bytes)
+               return copied;
+
+       chunk = min((bytes - copied), sizeof(long));
+       memcpy(&buf[copied], &res->a2, chunk);
+       copied += chunk;
+       if (copied >= bytes)
+               return copied;
+
+       chunk = min((bytes - copied), sizeof(long));
+       memcpy(&buf[copied], &res->a1, chunk);
+
+       return copied + chunk;
+}
+
+static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       struct arm_smccc_res res;
+       u8 *buf = data;
+       unsigned int copied = 0;
+       int tries = 0;
+
+       while (copied < max) {
+               size_t bits = min_t(size_t, (max - copied) * BITS_PER_BYTE,
+                                 MAX_BITS_PER_CALL);
+
+               arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
+               if ((int)res.a0 < 0)
+                       return (int)res.a0;
+
+               switch ((int)res.a0) {
+               case SMCCC_RET_SUCCESS:
+                       copied += copy_from_registers(buf + copied, &res,
+                                                     bits / BITS_PER_BYTE);
+                       tries = 0;
+                       break;
+               case SMCCC_RET_TRNG_NO_ENTROPY:
+                       if (!wait)
+                               return copied;
+                       tries++;
+                       if (tries >= SMCCC_TRNG_MAX_TRIES)
+                               return copied;
+                       cond_resched();
+                       break;
+               }
+       }
+
+       return copied;
+}
+
+static int smccc_trng_probe(struct platform_device *pdev)
+{
+       struct hwrng *trng;
+
+       trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+       if (!trng)
+               return -ENOMEM;
+
+       trng->name = "smccc_trng";
+       trng->read = smccc_trng_read;
+
+       platform_set_drvdata(pdev, trng);
+
+       return devm_hwrng_register(&pdev->dev, trng);
+}
+
+static struct platform_driver smccc_trng_driver = {
+       .driver = {
+               .name           = "smccc_trng",
+       },
+       .probe          = smccc_trng_probe,
+};
+module_platform_driver(smccc_trng_driver);
+
+MODULE_ALIAS("platform:smccc_trng");
+MODULE_AUTHOR("Andre Przywara");
+MODULE_LICENSE("GPL");
index e1d421a..138ce43 100644 (file)
@@ -83,7 +83,7 @@ static struct hwrng geode_rng = {
 };
 
 
-static int __init mod_init(void)
+static int __init geode_rng_init(void)
 {
        int err = -ENODEV;
        struct pci_dev *pdev = NULL;
@@ -124,7 +124,7 @@ err_unmap:
        goto out;
 }
 
-static void __exit mod_exit(void)
+static void __exit geode_rng_exit(void)
 {
        void __iomem *mem = (void __iomem *)geode_rng.priv;
 
@@ -132,8 +132,8 @@ static void __exit mod_exit(void)
        iounmap(mem);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(geode_rng_init);
+module_exit(geode_rng_exit);
 
 MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
 MODULE_LICENSE("GPL");
index d740b88..7b171cb 100644 (file)
@@ -325,7 +325,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
 }
 
 
-static int __init mod_init(void)
+static int __init intel_rng_mod_init(void)
 {
        int err = -ENODEV;
        int i;
@@ -403,7 +403,7 @@ out:
 
 }
 
-static void __exit mod_exit(void)
+static void __exit intel_rng_mod_exit(void)
 {
        void __iomem *mem = (void __iomem *)intel_rng.priv;
 
@@ -411,8 +411,8 @@ static void __exit mod_exit(void)
        iounmap(mem);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(intel_rng_mod_init);
+module_exit(intel_rng_mod_exit);
 
 MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
 MODULE_LICENSE("GPL");
index 39943bc..7444cc1 100644 (file)
@@ -192,7 +192,7 @@ static struct hwrng via_rng = {
 };
 
 
-static int __init mod_init(void)
+static int __init via_rng_mod_init(void)
 {
        int err;
 
@@ -209,13 +209,13 @@ static int __init mod_init(void)
 out:
        return err;
 }
-module_init(mod_init);
+module_init(via_rng_mod_init);
 
-static void __exit mod_exit(void)
+static void __exit via_rng_mod_exit(void)
 {
        hwrng_unregister(&via_rng);
 }
-module_exit(mod_exit);
+module_exit(via_rng_mod_exit);
 
 static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
        X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
index 4308f9c..d6ba644 100644 (file)
@@ -89,7 +89,6 @@ config TCG_TIS_SYNQUACER
 config TCG_TIS_I2C_CR50
        tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
        depends on I2C
-       select TCG_CR50
        help
          This is a driver for the Google cr50 I2C TPM interface which is a
          custom microcontroller and requires a custom i2c protocol interface
index 2ccdf8a..6e32355 100644 (file)
@@ -254,11 +254,11 @@ static int ftpm_tee_probe(struct device *dev)
        pvt_data->session = sess_arg.session;
 
        /* Allocate dynamic shared memory with fTPM TA */
-       pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
-                                     MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
-                                     TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+                                                MAX_COMMAND_SIZE +
+                                                MAX_RESPONSE_SIZE);
        if (IS_ERR(pvt_data->shm)) {
-               dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+               dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__);
                rc = -ENOMEM;
                goto out_shm_alloc;
        }
index 9036047..3af4c07 100644 (file)
@@ -106,17 +106,12 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
        u16 len;
-       int sig;
 
        if (!ibmvtpm->rtce_buf) {
                dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
                return 0;
        }
 
-       sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
-       if (sig)
-               return -EINTR;
-
        len = ibmvtpm->res_len;
 
        if (count < len) {
@@ -237,7 +232,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
         * set the processing flag before the Hcall, since we may get the
         * result (interrupt) before even being able to check rc.
         */
-       ibmvtpm->tpm_processing_cmd = true;
+       ibmvtpm->tpm_processing_cmd = 1;
 
 again:
        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
@@ -255,7 +250,7 @@ again:
                        goto again;
                }
                dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
-               ibmvtpm->tpm_processing_cmd = false;
+               ibmvtpm->tpm_processing_cmd = 0;
        }
 
        spin_unlock(&ibmvtpm->rtce_lock);
@@ -269,7 +264,9 @@ static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
 
 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
 {
-       return 0;
+       struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+
+       return ibmvtpm->tpm_processing_cmd;
 }
 
 /**
@@ -457,7 +454,7 @@ static const struct tpm_class_ops tpm_ibmvtpm = {
        .send = tpm_ibmvtpm_send,
        .cancel = tpm_ibmvtpm_cancel,
        .status = tpm_ibmvtpm_status,
-       .req_complete_mask = 0,
+       .req_complete_mask = 1,
        .req_complete_val = 0,
        .req_canceled = tpm_ibmvtpm_req_canceled,
 };
@@ -550,7 +547,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
                case VTPM_TPM_COMMAND_RES:
                        /* len of the data in rtce buffer */
                        ibmvtpm->res_len = be16_to_cpu(crq->len);
-                       ibmvtpm->tpm_processing_cmd = false;
+                       ibmvtpm->tpm_processing_cmd = 0;
                        wake_up_interruptible(&ibmvtpm->wq);
                        return;
                default:
@@ -688,8 +685,15 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto init_irq_cleanup;
        }
 
-       if (!strcmp(id->compat, "IBM,vtpm20")) {
+
+       if (!strcmp(id->compat, "IBM,vtpm20"))
                chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+       rc = tpm_get_timeouts(chip);
+       if (rc)
+               goto init_irq_cleanup;
+
+       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
                rc = tpm2_get_cc_attrs_tbl(chip);
                if (rc)
                        goto init_irq_cleanup;
index b92aa7d..51198b1 100644 (file)
@@ -41,7 +41,7 @@ struct ibmvtpm_dev {
        wait_queue_head_t wq;
        u16 res_len;
        u32 vtpm_version;
-       bool tpm_processing_cmd;
+       u8 tpm_processing_cmd;
 };
 
 #define CRQ_RES_BUF_SIZE       PAGE_SIZE
index 44dde2f..c892781 100644 (file)
@@ -639,12 +639,6 @@ static const struct tpm_class_ops cr50_i2c = {
        .req_canceled = &tpm_cr50_i2c_req_canceled,
 };
 
-static const struct i2c_device_id cr50_i2c_table[] = {
-       {"cr50_i2c", 0},
-       {}
-};
-MODULE_DEVICE_TABLE(i2c, cr50_i2c_table);
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id cr50_i2c_acpi_id[] = {
        { "GOOG0005", 0 },
@@ -670,8 +664,7 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
  * - 0:                Success.
  * - -errno:   A POSIX error code.
  */
-static int tpm_cr50_i2c_probe(struct i2c_client *client,
-                             const struct i2c_device_id *id)
+static int tpm_cr50_i2c_probe(struct i2c_client *client)
 {
        struct tpm_i2c_cr50_priv_data *priv;
        struct device *dev = &client->dev;
@@ -774,8 +767,7 @@ static int tpm_cr50_i2c_remove(struct i2c_client *client)
 static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
 
 static struct i2c_driver cr50_i2c_driver = {
-       .id_table = cr50_i2c_table,
-       .probe = tpm_cr50_i2c_probe,
+       .probe_new = tpm_cr50_i2c_probe,
        .remove = tpm_cr50_i2c_remove,
        .driver = {
                .name = "cr50_i2c",
index be16076..f9d5b73 100644 (file)
@@ -92,13 +92,20 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
 }
 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
 
+static void devm_clk_bulk_release_all(struct device *dev, void *res)
+{
+       struct clk_bulk_devres *devres = res;
+
+       clk_bulk_put_all(devres->num_clks, devres->clks);
+}
+
 int __must_check devm_clk_bulk_get_all(struct device *dev,
                                       struct clk_bulk_data **clks)
 {
        struct clk_bulk_devres *devres;
        int ret;
 
-       devres = devres_alloc(devm_clk_bulk_release,
+       devres = devres_alloc(devm_clk_bulk_release_all,
                              sizeof(*devres), GFP_KERNEL);
        if (!devres)
                return -ENOMEM;
index 18117ce..5c75e3d 100644 (file)
@@ -526,7 +526,7 @@ struct stm32f4_pll {
 
 struct stm32f4_pll_post_div_data {
        int idx;
-       u8 pll_num;
+       int pll_idx;
        const char *name;
        const char *parent;
        u8 flag;
@@ -557,13 +557,13 @@ static const struct clk_div_table post_divr_table[] = {
 
 #define MAX_POST_DIV 3
 static const struct stm32f4_pll_post_div_data  post_div_data[MAX_POST_DIV] = {
-       { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q",
+       { CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q",
                CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL},
 
-       { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q",
+       { CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q",
                CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL },
 
-       { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
+       { NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
                STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table },
 };
 
@@ -1774,7 +1774,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
                                post_div->width,
                                post_div->flag_div,
                                post_div->div_table,
-                               clks[post_div->pll_num],
+                               clks[post_div->pll_idx],
                                &stm32f4_clk_lock);
 
                if (post_div->idx != NO_IDX)
index 5ecc37a..c1ec75a 100644 (file)
@@ -18,6 +18,7 @@ config COMMON_CLK_HI3519
 config COMMON_CLK_HI3559A
        bool "Hi3559A Clock Driver"
        depends on ARCH_HISI || COMPILE_TEST
+       select RESET_HISI
        default ARCH_HISI
        help
          Build the clock driver for hi3559a.
index 496900d..de36f58 100644 (file)
@@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                               hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
        }
 
-       imx_register_uart_clocks(1);
+       imx_register_uart_clocks(2);
 }
 CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
index 800b2fe..b2c142f 100644 (file)
@@ -467,7 +467,7 @@ DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK,
 
 static struct clk_smd_rpm *msm8936_clks[] = {
        [RPM_SMD_PCNOC_CLK]             = &msm8916_pcnoc_clk,
-       [RPM_SMD_PCNOC_A_CLK]           = &msm8916_pcnoc_clk,
+       [RPM_SMD_PCNOC_A_CLK]           = &msm8916_pcnoc_a_clk,
        [RPM_SMD_SNOC_CLK]              = &msm8916_snoc_clk,
        [RPM_SMD_SNOC_A_CLK]            = &msm8916_snoc_a_clk,
        [RPM_SMD_BIMC_CLK]              = &msm8916_bimc_clk,
index 51ed640..4ece326 100644 (file)
@@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc)
        if (on < 0)
                return on;
 
-       /*
-        * Votable GDSCs can be ON due to Vote from other masters.
-        * If a Votable GDSC is ON, make sure we have a Vote.
-        */
-       if ((sc->flags & VOTABLE) && on)
-               gdsc_enable(&sc->pd);
+       if (on) {
+               /* The regulator must be on, sync the kernel state */
+               if (sc->rsupply) {
+                       ret = regulator_enable(sc->rsupply);
+                       if (ret < 0)
+                               return ret;
+               }
 
-       /*
-        * Make sure the retain bit is set if the GDSC is already on, otherwise
-        * we end up turning off the GDSC and destroying all the register
-        * contents that we thought we were saving.
-        */
-       if ((sc->flags & RETAIN_FF_ENABLE) && on)
-               gdsc_retain_ff_on(sc);
+               /*
+                * Votable GDSCs can be ON due to Vote from other masters.
+                * If a Votable GDSC is ON, make sure we have a Vote.
+                */
+               if (sc->flags & VOTABLE) {
+                       ret = regmap_update_bits(sc->regmap, sc->gdscr,
+                                                SW_COLLAPSE_MASK, val);
+                       if (ret)
+                               return ret;
+               }
+
+               /* Turn on HW trigger mode if supported */
+               if (sc->flags & HW_CTRL) {
+                       ret = gdsc_hwctrl(sc, true);
+                       if (ret < 0)
+                               return ret;
+               }
 
-       /* If ALWAYS_ON GDSCs are not ON, turn them ON */
-       if (sc->flags & ALWAYS_ON) {
-               if (!on)
-                       gdsc_enable(&sc->pd);
+               /*
+                * Make sure the retain bit is set if the GDSC is already on,
+                * otherwise we end up turning off the GDSC and destroying all
+                * the register contents that we thought we were saving.
+                */
+               if (sc->flags & RETAIN_FF_ENABLE)
+                       gdsc_retain_ff_on(sc);
+       } else if (sc->flags & ALWAYS_ON) {
+               /* If ALWAYS_ON GDSCs are not ON, turn them ON */
+               gdsc_enable(&sc->pd);
                on = true;
-               sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
        }
 
        if (on || (sc->pwrsts & PWRSTS_RET))
@@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc)
        else
                gdsc_clear_mem_on(sc);
 
+       if (sc->flags & ALWAYS_ON)
+               sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
        if (!sc->pd.power_off)
                sc->pd.power_off = gdsc_disable;
        if (!sc->pd.power_on)
index 9fb79bd..684d893 100644 (file)
@@ -187,7 +187,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
        init.ops = &usb2_clock_sel_clock_ops;
        priv->hw.init = &init;
 
-       ret = devm_clk_hw_register(NULL, &priv->hw);
+       ret = devm_clk_hw_register(dev, &priv->hw);
        if (ret)
                goto pm_put;
 
index 316912d..4f2c330 100644 (file)
@@ -194,6 +194,15 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw)
        gate_ops->disable(gate_hw);
 }
 
+static void clk_sdmmc_mux_disable_unused(struct clk_hw *hw)
+{
+       struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+       const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+       struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+       gate_ops->disable_unused(gate_hw);
+}
+
 static void clk_sdmmc_mux_restore_context(struct clk_hw *hw)
 {
        struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -218,6 +227,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
        .is_enabled = clk_sdmmc_mux_is_enabled,
        .enable = clk_sdmmc_mux_enable,
        .disable = clk_sdmmc_mux_disable,
+       .disable_unused = clk_sdmmc_mux_disable_unused,
        .restore_context = clk_sdmmc_mux_restore_context,
 };
 
index fabad79..5e3e96d 100644 (file)
 
 #define TICK_BASE_CNT  1
 
+#ifdef CONFIG_ARM
+/* Use values higher than ARM arch timer. See 6282edb72bed. */
+#define MCT_CLKSOURCE_RATING           450
+#define MCT_CLKEVENTS_RATING           500
+#else
+#define MCT_CLKSOURCE_RATING           350
+#define MCT_CLKEVENTS_RATING           350
+#endif
+
 enum {
        MCT_INT_SPI,
        MCT_INT_PPI
@@ -206,7 +215,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
 
 static struct clocksource mct_frc = {
        .name           = "mct-frc",
-       .rating         = 450,  /* use value higher than ARM arch timer */
+       .rating         = MCT_CLKSOURCE_RATING,
        .read           = exynos4_frc_read,
        .mask           = CLOCKSOURCE_MASK(32),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -456,8 +465,9 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
        evt->set_state_oneshot = set_state_shutdown;
        evt->set_state_oneshot_stopped = set_state_shutdown;
        evt->tick_resume = set_state_shutdown;
-       evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-       evt->rating = 500;      /* use value higher than ARM arch timer */
+       evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+                       CLOCK_EVT_FEAT_PERCPU;
+       evt->rating = MCT_CLKEVENTS_RATING,
 
        exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
 
index a129840..cb6fc2f 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
@@ -34,8 +35,6 @@
 /* bits within the OSTCCR register */
 #define OSTCCR_PRESCALE1_MASK  0x3
 #define OSTCCR_PRESCALE2_MASK  0xc
-#define OSTCCR_PRESCALE1_LSB   0
-#define OSTCCR_PRESCALE2_LSB   2
 
 /* bits within the OSTCR register */
 #define OSTCR_OST1CLR                  BIT(0)
@@ -98,7 +97,7 @@ static unsigned long ingenic_ost_percpu_timer_recalc_rate(struct clk_hw *hw,
 
        prescale = readl(ost_clk->ost->base + info->ostccr_reg);
 
-       prescale = (prescale & OSTCCR_PRESCALE1_MASK) >> OSTCCR_PRESCALE1_LSB;
+       prescale = FIELD_GET(OSTCCR_PRESCALE1_MASK, prescale);
 
        return parent_rate >> (prescale * 2);
 }
@@ -112,7 +111,7 @@ static unsigned long ingenic_ost_global_timer_recalc_rate(struct clk_hw *hw,
 
        prescale = readl(ost_clk->ost->base + info->ostccr_reg);
 
-       prescale = (prescale & OSTCCR_PRESCALE2_MASK) >> OSTCCR_PRESCALE2_LSB;
+       prescale = FIELD_GET(OSTCCR_PRESCALE2_MASK, prescale);
 
        return parent_rate >> (prescale * 2);
 }
@@ -151,7 +150,8 @@ static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long re
        int val;
 
        val = readl(ost_clk->ost->base + info->ostccr_reg);
-       val = (val & ~OSTCCR_PRESCALE1_MASK) | (prescale << OSTCCR_PRESCALE1_LSB);
+       val &= ~OSTCCR_PRESCALE1_MASK;
+       val |= FIELD_PREP(OSTCCR_PRESCALE1_MASK, prescale);
        writel(val, ost_clk->ost->base + info->ostccr_reg);
 
        return 0;
@@ -166,7 +166,8 @@ static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long re
        int val;
 
        val = readl(ost_clk->ost->base + info->ostccr_reg);
-       val = (val & ~OSTCCR_PRESCALE2_MASK) | (prescale << OSTCCR_PRESCALE2_LSB);
+       val &= ~OSTCCR_PRESCALE2_MASK;
+       val |= FIELD_PREP(OSTCCR_PRESCALE2_MASK, prescale);
        writel(val, ost_clk->ost->base + info->ostccr_reg);
 
        return 0;
index d7ed99f..dd0956a 100644 (file)
@@ -579,7 +579,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
        ch->flags |= flag;
 
        /* setup timeout if no clockevent */
-       if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
+       if (ch->cmt->num_channels == 1 &&
+           flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
                __sh_cmt_set_next(ch, ch->max_match_value);
  out:
        raw_spin_unlock_irqrestore(&ch->lock, flags);
@@ -621,20 +622,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
 static u64 sh_cmt_clocksource_read(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
-       unsigned long flags;
        u32 has_wrapped;
-       u64 value;
-       u32 raw;
 
-       raw_spin_lock_irqsave(&ch->lock, flags);
-       value = ch->total_cycles;
-       raw = sh_cmt_get_counter(ch, &has_wrapped);
+       if (ch->cmt->num_channels == 1) {
+               unsigned long flags;
+               u64 value;
+               u32 raw;
 
-       if (unlikely(has_wrapped))
-               raw += ch->match_value + 1;
-       raw_spin_unlock_irqrestore(&ch->lock, flags);
+               raw_spin_lock_irqsave(&ch->lock, flags);
+               value = ch->total_cycles;
+               raw = sh_cmt_get_counter(ch, &has_wrapped);
+
+               if (unlikely(has_wrapped))
+                       raw += ch->match_value + 1;
+               raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+               return value + raw;
+       }
 
-       return value + raw;
+       return sh_cmt_get_counter(ch, &has_wrapped);
 }
 
 static int sh_cmt_clocksource_enable(struct clocksource *cs)
@@ -697,7 +703,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
        cs->disable = sh_cmt_clocksource_disable;
        cs->suspend = sh_cmt_clocksource_suspend;
        cs->resume = sh_cmt_clocksource_resume;
-       cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
+       cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 
        dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
index edb1d5f..126fb1f 100644 (file)
@@ -271,9 +271,7 @@ static irqreturn_t ast2600_timer_interrupt(int irq, void *dev_id)
 }
 
 static int __init fttmr010_common_init(struct device_node *np,
-               bool is_aspeed,
-               int (*timer_shutdown)(struct clock_event_device *),
-               irq_handler_t irq_handler)
+                                      bool is_aspeed, bool is_ast2600)
 {
        struct fttmr010 *fttmr010;
        int irq;
@@ -374,8 +372,6 @@ static int __init fttmr010_common_init(struct device_node *np,
                                     fttmr010->tick_rate);
        }
 
-       fttmr010->timer_shutdown = timer_shutdown;
-
        /*
         * Setup clockevent timer (interrupt-driven) on timer 1.
         */
@@ -383,8 +379,18 @@ static int __init fttmr010_common_init(struct device_node *np,
        writel(0, fttmr010->base + TIMER1_LOAD);
        writel(0, fttmr010->base + TIMER1_MATCH1);
        writel(0, fttmr010->base + TIMER1_MATCH2);
-       ret = request_irq(irq, irq_handler, IRQF_TIMER,
-                         "FTTMR010-TIMER1", &fttmr010->clkevt);
+
+       if (is_ast2600) {
+               fttmr010->timer_shutdown = ast2600_timer_shutdown;
+               ret = request_irq(irq, ast2600_timer_interrupt,
+                                 IRQF_TIMER, "FTTMR010-TIMER1",
+                                 &fttmr010->clkevt);
+       } else {
+               fttmr010->timer_shutdown = fttmr010_timer_shutdown;
+               ret = request_irq(irq, fttmr010_timer_interrupt,
+                                 IRQF_TIMER, "FTTMR010-TIMER1",
+                                 &fttmr010->clkevt);
+       }
        if (ret) {
                pr_err("FTTMR010-TIMER1 no IRQ\n");
                goto out_unmap;
@@ -432,23 +438,17 @@ out_disable_clock:
 
 static __init int ast2600_timer_init(struct device_node *np)
 {
-       return fttmr010_common_init(np, true,
-                       ast2600_timer_shutdown,
-                       ast2600_timer_interrupt);
+       return fttmr010_common_init(np, true, true);
 }
 
 static __init int aspeed_timer_init(struct device_node *np)
 {
-       return fttmr010_common_init(np, true,
-                       fttmr010_timer_shutdown,
-                       fttmr010_timer_interrupt);
+       return fttmr010_common_init(np, true, false);
 }
 
 static __init int fttmr010_timer_init(struct device_node *np)
 {
-       return fttmr010_common_init(np, false,
-                       fttmr010_timer_shutdown,
-                       fttmr010_timer_interrupt);
+       return fttmr010_common_init(np, false, false);
 }
 
 TIMER_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_init);
index ab63b95..7bcb4a3 100644 (file)
@@ -60,9 +60,9 @@
  * SYST_CON_EN: Clock enable. Shall be set to
  *   - Start timer countdown.
  *   - Allow timeout ticks being updated.
- *   - Allow changing interrupt functions.
+ *   - Allow changing interrupt status,like clear irq pending.
  *
- * SYST_CON_IRQ_EN: Set to allow interrupt.
+ * SYST_CON_IRQ_EN: Set to enable interrupt.
  *
  * SYST_CON_IRQ_CLR: Set to clear interrupt.
  */
@@ -75,6 +75,7 @@ static void __iomem *gpt_sched_reg __read_mostly;
 static void mtk_syst_ack_irq(struct timer_of *to)
 {
        /* Clear and disable interrupt */
+       writel(SYST_CON_EN, SYST_CON_REG(to));
        writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
 }
 
@@ -111,6 +112,9 @@ static int mtk_syst_clkevt_next_event(unsigned long ticks,
 
 static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
 {
+       /* Clear any irq */
+       mtk_syst_ack_irq(to_timer_of(clkevt));
+
        /* Disable timer */
        writel(0, SYST_CON_REG(to_timer_of(clkevt)));
 
index 7e74504..b496128 100644 (file)
@@ -163,9 +163,9 @@ static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
        if (ret || val > 1)
                return -EINVAL;
 
-       get_online_cpus();
+       cpus_read_lock();
        set_boost(policy, val);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return count;
 }
index 3fc98a3..c10fc33 100644 (file)
@@ -104,7 +104,11 @@ struct armada_37xx_dvfs {
 };
 
 static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
-       {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+       /*
+        * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+        * unstable because we do not know how to configure it properly.
+        */
+       /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
        {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
        {.cpu_freq_max = 800*1000*1000,  .divider = {1, 2, 3, 4} },
        {.cpu_freq_max = 600*1000*1000,  .divider = {2, 4, 5, 6} },
index bef7528..231e585 100644 (file)
@@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "qcom,qcs404", },
        { .compatible = "qcom,sc7180", },
        { .compatible = "qcom,sc7280", },
+       { .compatible = "qcom,sc8180x", },
        { .compatible = "qcom,sdm845", },
+       { .compatible = "qcom,sm8150", },
 
        { .compatible = "st,stih407", },
        { .compatible = "st,stih410", },
index 45f3416..06c526d 100644 (file)
@@ -2654,18 +2654,18 @@ int cpufreq_boost_trigger_state(int state)
        cpufreq_driver->boost_enabled = state;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_active_policy(policy) {
                ret = cpufreq_driver->set_boost(policy, state);
                if (ret)
                        goto err_reset_state;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 
 err_reset_state:
-       put_online_cpus();
+       cpus_read_unlock();
 
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        cpufreq_driver->boost_enabled = !state;
index ac361a8..eb4320b 100644 (file)
@@ -418,7 +418,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
        default_powersave_bias = powersave_bias;
        cpumask_clear(&done);
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                struct cpufreq_policy *policy;
                struct policy_dbs_info *policy_dbs;
@@ -442,7 +442,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
                od_tuners = dbs_data->tuners;
                od_tuners->powersave_bias = default_powersave_bias;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 void od_register_powersave_bias_handler(unsigned int (*f)
index bb45499..b4ffe6c 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/cpu_device_id.h>
 #include <asm/cpufeature.h>
 #include <asm/intel-family.h>
+#include "../drivers/thermal/intel/thermal_interrupt.h"
 
 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
 
@@ -219,6 +220,7 @@ struct global_params {
  * @sched_flags:       Store scheduler flags for possible cross CPU update
  * @hwp_boost_min:     Last HWP boosted min performance
  * @suspended:         Whether or not the driver has been suspended.
+ * @hwp_notify_work:   workqueue for HWP notifications.
  *
  * This structure stores per CPU instance data for all CPUs.
  */
@@ -257,6 +259,7 @@ struct cpudata {
        unsigned int sched_flags;
        u32 hwp_boost_min;
        bool suspended;
+       struct delayed_work hwp_notify_work;
 };
 
 static struct cpudata **all_cpu_data;
@@ -1625,6 +1628,40 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
 
 /************************** sysfs end ************************/
 
+static void intel_pstate_notify_work(struct work_struct *work)
+{
+       mutex_lock(&intel_pstate_driver_lock);
+       cpufreq_update_policy(smp_processor_id());
+       wrmsrl(MSR_HWP_STATUS, 0);
+       mutex_unlock(&intel_pstate_driver_lock);
+}
+
+void notify_hwp_interrupt(void)
+{
+       unsigned int this_cpu = smp_processor_id();
+       struct cpudata *cpudata;
+       u64 value;
+
+       if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+               return;
+
+       rdmsrl(MSR_HWP_STATUS, value);
+       if (!(value & 0x01))
+               return;
+
+       cpudata = all_cpu_data[this_cpu];
+       schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10));
+}
+
+static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
+{
+       /* Enable HWP notification interrupt for guaranteed performance change */
+       if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+               INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
+               wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+       }
+}
+
 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 {
        /* First disable HWP notification interrupt as we don't process them */
@@ -1634,6 +1671,8 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
        wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
        if (cpudata->epp_default == -EINVAL)
                cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+
+       intel_pstate_enable_hwp_interrupt(cpudata);
 }
 
 static int atom_get_min_pstate(void)
@@ -2969,7 +3008,7 @@ static void intel_pstate_driver_cleanup(void)
 {
        unsigned int cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                if (all_cpu_data[cpu]) {
                        if (intel_pstate_driver == &intel_pstate)
@@ -2979,7 +3018,7 @@ static void intel_pstate_driver_cleanup(void)
                        all_cpu_data[cpu] = NULL;
                }
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        intel_pstate_driver = NULL;
 }
index b9ccb6a..12ab401 100644 (file)
@@ -1180,7 +1180,7 @@ static int powernowk8_init(void)
        if (!x86_match_cpu(powernow_k8_ids))
                return -ENODEV;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(i) {
                smp_call_function_single(i, check_supported_cpu, &ret, 1);
                if (!ret)
@@ -1188,10 +1188,10 @@ static int powernowk8_init(void)
        }
 
        if (supported_cpus != num_online_cpus()) {
-               put_online_cpus();
+               cpus_read_unlock();
                return -ENODEV;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        ret = cpufreq_register_driver(&cpufreq_amd64_driver);
        if (ret)
index 005600c..23a06cb 100644 (file)
@@ -918,7 +918,7 @@ static void powernv_cpufreq_work_fn(struct work_struct *work)
        unsigned int cpu;
        cpumask_t mask;
 
-       get_online_cpus();
+       cpus_read_lock();
        cpumask_and(&mask, &chip->mask, cpu_online_mask);
        smp_call_function_any(&mask,
                              powernv_cpufreq_throttle_check, NULL, 0);
@@ -939,7 +939,7 @@ static void powernv_cpufreq_work_fn(struct work_struct *work)
                cpufreq_cpu_put(policy);
        }
 out:
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
index ec9a87c..75f818d 100644 (file)
@@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
        }
 
        if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
-               ret = -ENOMEM;
+               return -ENOMEM;
 
        /* Obtain CPUs that share SCMI performance controls */
        ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
index 7b91060..d9262db 100644 (file)
@@ -382,8 +382,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
        alt_recent = idx_recent_sum > NR_RECENT / 2;
        if (alt_recent || alt_intercepts) {
-               s64 last_enabled_span_ns = duration_ns;
-               int last_enabled_idx = idx;
+               s64 first_suitable_span_ns = duration_ns;
+               int first_suitable_idx = idx;
 
                /*
                 * Look for the deepest idle state whose target residency had
@@ -397,37 +397,51 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                intercept_sum = 0;
                recent_sum = 0;
 
-               for (i = idx - 1; i >= idx0; i--) {
+               for (i = idx - 1; i >= 0; i--) {
                        struct teo_bin *bin = &cpu_data->state_bins[i];
                        s64 span_ns;
 
                        intercept_sum += bin->intercepts;
                        recent_sum += bin->recent;
 
+                       span_ns = teo_middle_of_bin(i, drv);
+
+                       if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
+                           (!alt_intercepts ||
+                            2 * intercept_sum > idx_intercept_sum)) {
+                               if (teo_time_ok(span_ns) &&
+                                   !dev->states_usage[i].disable) {
+                                       idx = i;
+                                       duration_ns = span_ns;
+                               } else {
+                                       /*
+                                        * The current state is too shallow or
+                                        * disabled, so take the first enabled
+                                        * deeper state with suitable time span.
+                                        */
+                                       idx = first_suitable_idx;
+                                       duration_ns = first_suitable_span_ns;
+                               }
+                               break;
+                       }
+
                        if (dev->states_usage[i].disable)
                                continue;
 
-                       span_ns = teo_middle_of_bin(i, drv);
                        if (!teo_time_ok(span_ns)) {
                                /*
-                                * The current state is too shallow, so select
-                                * the first enabled deeper state.
+                                * The current state is too shallow, but if an
+                                * alternative candidate state has been found,
+                                * it may still turn out to be a better choice.
                                 */
-                               duration_ns = last_enabled_span_ns;
-                               idx = last_enabled_idx;
-                               break;
-                       }
+                               if (first_suitable_idx != idx)
+                                       continue;
 
-                       if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
-                           (!alt_intercepts ||
-                            2 * intercept_sum > idx_intercept_sum)) {
-                               idx = i;
-                               duration_ns = span_ns;
                                break;
                        }
 
-                       last_enabled_span_ns = span_ns;
-                       last_enabled_idx = i;
+                       first_suitable_span_ns = span_ns;
+                       first_suitable_idx = i;
                }
        }
 
index cd1baee..b3a9bbf 100644 (file)
@@ -26,8 +26,7 @@ void sun8i_ce_prng_exit(struct crypto_tfm *tfm)
 {
        struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memzero_explicit(ctx->seed, ctx->slen);
-       kfree(ctx->seed);
+       kfree_sensitive(ctx->seed);
        ctx->seed = NULL;
        ctx->slen = 0;
 }
@@ -38,8 +37,7 @@ int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed,
        struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
 
        if (ctx->seed && ctx->slen != slen) {
-               memzero_explicit(ctx->seed, ctx->slen);
-               kfree(ctx->seed);
+               kfree_sensitive(ctx->seed);
                ctx->slen = 0;
                ctx->seed = NULL;
        }
@@ -157,9 +155,8 @@ err_dst:
                memcpy(dst, d, dlen);
                memcpy(ctx->seed, d + dlen, ctx->slen);
        }
-       memzero_explicit(d, todo);
 err_iv:
-       kfree(d);
+       kfree_sensitive(d);
 err_mem:
        return err;
 }
index 5b7af44..19cd2e5 100644 (file)
@@ -95,9 +95,8 @@ err_pm:
                memcpy(data, d, max);
                err = max;
        }
-       memzero_explicit(d, todo);
 err_dst:
-       kfree(d);
+       kfree_sensitive(d);
        return err;
 }
 
index 3191527..246a678 100644 (file)
@@ -20,8 +20,7 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
        struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
 
        if (ctx->seed && ctx->slen != slen) {
-               memzero_explicit(ctx->seed, ctx->slen);
-               kfree(ctx->seed);
+               kfree_sensitive(ctx->seed);
                ctx->slen = 0;
                ctx->seed = NULL;
        }
@@ -48,8 +47,7 @@ void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
 {
        struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memzero_explicit(ctx->seed, ctx->slen);
-       kfree(ctx->seed);
+       kfree_sensitive(ctx->seed);
        ctx->seed = NULL;
        ctx->slen = 0;
 }
@@ -167,9 +165,8 @@ err_iv:
                /* Update seed */
                memcpy(ctx->seed, d + dlen, ctx->slen);
        }
-       memzero_explicit(d, todo);
 err_free:
-       kfree(d);
+       kfree_sensitive(d);
 
        return err;
 }
index b1d2860..9391ccc 100644 (file)
@@ -143,6 +143,7 @@ struct atmel_aes_xts_ctx {
        struct atmel_aes_base_ctx       base;
 
        u32                     key2[AES_KEYSIZE_256 / sizeof(u32)];
+       struct crypto_skcipher *fallback_tfm;
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -155,6 +156,7 @@ struct atmel_aes_authenc_ctx {
 struct atmel_aes_reqctx {
        unsigned long           mode;
        u8                      lastc[AES_BLOCK_SIZE];
+       struct skcipher_request fallback_req;
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -418,24 +420,15 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
        return len ? block_size - len : 0;
 }
 
-static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
+static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
 {
-       struct atmel_aes_dev *aes_dd = NULL;
-       struct atmel_aes_dev *tmp;
+       struct atmel_aes_dev *aes_dd;
 
        spin_lock_bh(&atmel_aes.lock);
-       if (!ctx->dd) {
-               list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
-                       aes_dd = tmp;
-                       break;
-               }
-               ctx->dd = aes_dd;
-       } else {
-               aes_dd = ctx->dd;
-       }
-
+       /* One AES IP per SoC. */
+       aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
+                                         struct atmel_aes_dev, list);
        spin_unlock_bh(&atmel_aes.lock);
-
        return aes_dd;
 }
 
@@ -967,7 +960,6 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
        ctx = crypto_tfm_ctx(areq->tfm);
 
        dd->areq = areq;
-       dd->ctx = ctx;
        start_async = (areq != new_areq);
        dd->is_async = start_async;
 
@@ -1083,12 +1075,48 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
        return atmel_aes_ctr_transfer(dd);
 }
 
+static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
+{
+       struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+       struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
+                       crypto_skcipher_reqtfm(req));
+
+       skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+       skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
+                                     req->base.complete, req->base.data);
+       skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
+                                  req->cryptlen, req->iv);
+
+       return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+                    crypto_skcipher_decrypt(&rctx->fallback_req);
+}
+
 static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
 {
        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
        struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
        struct atmel_aes_reqctx *rctx;
-       struct atmel_aes_dev *dd;
+       u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
+
+       if (opmode == AES_FLAGS_XTS) {
+               if (req->cryptlen < XTS_BLOCK_SIZE)
+                       return -EINVAL;
+
+               if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
+                       return atmel_aes_xts_fallback(req,
+                                                     mode & AES_FLAGS_ENCRYPT);
+       }
+
+       /*
+        * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
+        * to have a positve integer length.
+        */
+       if (!req->cryptlen && opmode != AES_FLAGS_XTS)
+               return 0;
+
+       if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
+           !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
+               return -EINVAL;
 
        switch (mode & AES_FLAGS_OPMODE_MASK) {
        case AES_FLAGS_CFB8:
@@ -1113,14 +1141,10 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
        }
        ctx->is_aead = false;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
        rctx = skcipher_request_ctx(req);
        rctx->mode = mode;
 
-       if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+       if (opmode != AES_FLAGS_ECB &&
            !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
                unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 
@@ -1130,7 +1154,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
                                                 ivsize, 0);
        }
 
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -1242,8 +1266,15 @@ static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
 static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_start;
 
        return 0;
@@ -1252,8 +1283,15 @@ static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
 static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_ctr_start;
 
        return 0;
@@ -1290,7 +1328,7 @@ static struct skcipher_alg aes_algs[] = {
 {
        .base.cra_name          = "ofb(aes)",
        .base.cra_driver_name   = "atmel-ofb-aes",
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
+       .base.cra_blocksize     = 1,
        .base.cra_ctxsize       = sizeof(struct atmel_aes_ctx),
 
        .init                   = atmel_aes_init_tfm,
@@ -1691,20 +1729,15 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
 {
        struct atmel_aes_base_ctx *ctx;
        struct atmel_aes_reqctx *rctx;
-       struct atmel_aes_dev *dd;
 
        ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        ctx->block_size = AES_BLOCK_SIZE;
        ctx->is_aead = true;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
        rctx = aead_request_ctx(req);
        rctx->mode = AES_FLAGS_GCM | mode;
 
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1742,8 +1775,15 @@ static int atmel_aes_gcm_decrypt(struct aead_request *req)
 static int atmel_aes_gcm_init(struct crypto_aead *tfm)
 {
        struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_gcm_start;
 
        return 0;
@@ -1819,12 +1859,8 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
         * the order of the ciphered tweak bytes need to be reversed before
         * writing them into the ODATARx registers.
         */
-       for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
-               u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
-
-               tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
-               tweak_bytes[i] = tmp;
-       }
+       for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
+               swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
 
        /* Process the data. */
        atmel_aes_write_ctrl(dd, use_dma, NULL);
@@ -1849,6 +1885,13 @@ static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
        if (err)
                return err;
 
+       crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+       crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
+                                 CRYPTO_TFM_REQ_MASK);
+       err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+       if (err)
+               return err;
+
        memcpy(ctx->base.key, key, keylen/2);
        memcpy(ctx->key2, key + keylen/2, keylen/2);
        ctx->base.keylen = keylen/2;
@@ -1869,18 +1912,40 @@ static int atmel_aes_xts_decrypt(struct skcipher_request *req)
 static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+       const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
 
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
+
+       ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
+                                                 CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback_tfm))
+               return PTR_ERR(ctx->fallback_tfm);
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
+                                   crypto_skcipher_reqsize(ctx->fallback_tfm));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_xts_start;
 
        return 0;
 }
 
+static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
+{
+       struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       crypto_free_skcipher(ctx->fallback_tfm);
+}
+
 static struct skcipher_alg aes_xts_alg = {
        .base.cra_name          = "xts(aes)",
        .base.cra_driver_name   = "atmel-xts-aes",
        .base.cra_blocksize     = AES_BLOCK_SIZE,
        .base.cra_ctxsize       = sizeof(struct atmel_aes_xts_ctx),
+       .base.cra_flags         = CRYPTO_ALG_NEED_FALLBACK,
 
        .min_keysize            = 2 * AES_MIN_KEY_SIZE,
        .max_keysize            = 2 * AES_MAX_KEY_SIZE,
@@ -1889,6 +1954,7 @@ static struct skcipher_alg aes_xts_alg = {
        .encrypt                = atmel_aes_xts_encrypt,
        .decrypt                = atmel_aes_xts_decrypt,
        .init                   = atmel_aes_xts_init_tfm,
+       .exit                   = atmel_aes_xts_exit_tfm,
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -2075,6 +2141,11 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
 {
        struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
        unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        ctx->auth = atmel_sha_authenc_spawn(auth_mode);
        if (IS_ERR(ctx->auth))
@@ -2082,6 +2153,8 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
 
        crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
                                      auth_reqsize));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_authenc_start;
 
        return 0;
@@ -2127,7 +2200,6 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
        struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
        u32 authsize = crypto_aead_authsize(tfm);
        bool enc = (mode & AES_FLAGS_ENCRYPT);
-       struct atmel_aes_dev *dd;
 
        /* Compute text length. */
        if (!enc && req->cryptlen < authsize)
@@ -2146,11 +2218,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
        ctx->block_size = AES_BLOCK_SIZE;
        ctx->is_aead = true;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
@@ -2358,7 +2426,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
 
 static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
 {
-       alg->cra_flags = CRYPTO_ALG_ASYNC;
+       alg->cra_flags |= CRYPTO_ALG_ASYNC;
        alg->cra_alignmask = 0xf;
        alg->cra_priority = ATMEL_AES_PRIORITY;
        alg->cra_module = THIS_MODULE;
index 6f01c51..e30786e 100644 (file)
@@ -196,23 +196,15 @@ static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
                atmel_tdes_write(dd, offset, *value);
 }
 
-static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
 {
-       struct atmel_tdes_dev *tdes_dd = NULL;
-       struct atmel_tdes_dev *tmp;
+       struct atmel_tdes_dev *tdes_dd;
 
        spin_lock_bh(&atmel_tdes.lock);
-       if (!ctx->dd) {
-               list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
-                       tdes_dd = tmp;
-                       break;
-               }
-               ctx->dd = tdes_dd;
-       } else {
-               tdes_dd = ctx->dd;
-       }
+       /* One TDES IP per SoC. */
+       tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
+                                          struct atmel_tdes_dev, list);
        spin_unlock_bh(&atmel_tdes.lock);
-
        return tdes_dd;
 }
 
@@ -320,7 +312,7 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
                                dd->buf_out, dd->buflen, dd->dma_size, 1);
                if (count != dd->dma_size) {
                        err = -EINVAL;
-                       pr_err("not all data converted: %zu\n", count);
+                       dev_dbg(dd->dev, "not all data converted: %zu\n", count);
                }
        }
 
@@ -337,24 +329,24 @@ static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
        dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 
        if (!dd->buf_in || !dd->buf_out) {
-               dev_err(dd->dev, "unable to alloc pages.\n");
+               dev_dbg(dd->dev, "unable to alloc pages.\n");
                goto err_alloc;
        }
 
        /* MAP here */
        dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
                                        dd->buflen, DMA_TO_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
-               err = -EINVAL;
+       err = dma_mapping_error(dd->dev, dd->dma_addr_in);
+       if (err) {
+               dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
                goto err_map_in;
        }
 
        dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
                                        dd->buflen, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
-               err = -EINVAL;
+       err = dma_mapping_error(dd->dev, dd->dma_addr_out);
+       if (err) {
+               dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
                goto err_map_out;
        }
 
@@ -367,8 +359,6 @@ err_map_in:
 err_alloc:
        free_page((unsigned long)dd->buf_out);
        free_page((unsigned long)dd->buf_in);
-       if (err)
-               pr_err("error: %d\n", err);
        return err;
 }
 
@@ -520,14 +510,14 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 
                err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_dbg(dd->dev, "dma_map_sg() error\n");
                        return -EINVAL;
                }
 
                err = dma_map_sg(dd->dev, dd->out_sg, 1,
                                DMA_FROM_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_dbg(dd->dev, "dma_map_sg() error\n");
                        dma_unmap_sg(dd->dev, dd->in_sg, 1,
                                DMA_TO_DEVICE);
                        return -EINVAL;
@@ -646,7 +636,6 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
        rctx->mode &= TDES_FLAGS_MODE_MASK;
        dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
        dd->ctx = ctx;
-       ctx->dd = dd;
 
        err = atmel_tdes_write_ctrl(dd);
        if (!err)
@@ -679,7 +668,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
                                dd->buf_out, dd->buflen, dd->dma_size, 1);
                        if (count != dd->dma_size) {
                                err = -EINVAL;
-                               pr_err("not all data converted: %zu\n", count);
+                               dev_dbg(dd->dev, "not all data converted: %zu\n", count);
                        }
                }
        }
@@ -691,11 +680,15 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
        struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
        struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+       struct device *dev = ctx->dd->dev;
+
+       if (!req->cryptlen)
+               return 0;
 
        switch (mode & TDES_FLAGS_OPMODE_MASK) {
        case TDES_FLAGS_CFB8:
                if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB8 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB8_BLOCK_SIZE;
@@ -703,7 +696,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        case TDES_FLAGS_CFB16:
                if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB16 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB16_BLOCK_SIZE;
@@ -711,7 +704,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        case TDES_FLAGS_CFB32:
                if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB32 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB32_BLOCK_SIZE;
@@ -719,7 +712,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        default:
                if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of DES blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of DES blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = DES_BLOCK_SIZE;
@@ -897,14 +890,13 @@ static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
 static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
-       struct atmel_tdes_dev *dd;
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 
-       dd = atmel_tdes_find_dev(ctx);
-       if (!dd)
+       ctx->dd = atmel_tdes_dev_alloc();
+       if (!ctx->dd)
                return -ENODEV;
 
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
+
        return 0;
 }
 
@@ -999,7 +991,7 @@ static struct skcipher_alg tdes_algs[] = {
 {
        .base.cra_name          = "ofb(des)",
        .base.cra_driver_name   = "atmel-ofb-des",
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
+       .base.cra_blocksize     = 1,
        .base.cra_alignmask     = 0x7,
 
        .min_keysize            = DES_KEY_SIZE,
index 9180840..2ecb0e1 100644 (file)
@@ -300,6 +300,9 @@ static int __sev_platform_shutdown_locked(int *error)
        struct sev_device *sev = psp_master->sev_data;
        int ret;
 
+       if (sev->state == SEV_STATE_UNINIT)
+               return 0;
+
        ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
        if (ret)
                return ret;
@@ -1019,6 +1022,20 @@ e_err:
        return ret;
 }
 
+static void sev_firmware_shutdown(struct sev_device *sev)
+{
+       sev_platform_shutdown(NULL);
+
+       if (sev_es_tmr) {
+               /* The TMR area was encrypted, flush it from the cache */
+               wbinvd_on_all_cpus();
+
+               free_pages((unsigned long)sev_es_tmr,
+                          get_order(SEV_ES_TMR_SIZE));
+               sev_es_tmr = NULL;
+       }
+}
+
 void sev_dev_destroy(struct psp_device *psp)
 {
        struct sev_device *sev = psp->sev_data;
@@ -1026,6 +1043,8 @@ void sev_dev_destroy(struct psp_device *psp)
        if (!sev)
                return;
 
+       sev_firmware_shutdown(sev);
+
        if (sev->misc)
                kref_put(&misc_dev->refcount, sev_exit);
 
@@ -1056,21 +1075,6 @@ void sev_pci_init(void)
        if (sev_get_api_version())
                goto err;
 
-       /*
-        * If platform is not in UNINIT state then firmware upgrade and/or
-        * platform INIT command will fail. These command require UNINIT state.
-        *
-        * In a normal boot we should never run into case where the firmware
-        * is not in UNINIT state on boot. But in case of kexec boot, a reboot
-        * may not go through a typical shutdown sequence and may leave the
-        * firmware in INIT or WORKING state.
-        */
-
-       if (sev->state != SEV_STATE_UNINIT) {
-               sev_platform_shutdown(NULL);
-               sev->state = SEV_STATE_UNINIT;
-       }
-
        if (sev_version_greater_or_equal(0, 15) &&
            sev_update_firmware(sev->dev) == 0)
                sev_get_api_version();
@@ -1115,17 +1119,10 @@ err:
 
 void sev_pci_exit(void)
 {
-       if (!psp_master->sev_data)
-               return;
-
-       sev_platform_shutdown(NULL);
+       struct sev_device *sev = psp_master->sev_data;
 
-       if (sev_es_tmr) {
-               /* The TMR area was encrypted, flush it from the cache */
-               wbinvd_on_all_cpus();
+       if (!sev)
+               return;
 
-               free_pages((unsigned long)sev_es_tmr,
-                          get_order(SEV_ES_TMR_SIZE));
-               sev_es_tmr = NULL;
-       }
+       sev_firmware_shutdown(sev);
 }
index 6fb6ba3..88c672a 100644 (file)
@@ -241,6 +241,17 @@ e_err:
        return ret;
 }
 
+static void sp_pci_shutdown(struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct sp_device *sp = dev_get_drvdata(dev);
+
+       if (!sp)
+               return;
+
+       sp_destroy(sp);
+}
+
 static void sp_pci_remove(struct pci_dev *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -349,6 +360,12 @@ static const struct sp_dev_vdata dev_vdata[] = {
 #endif
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
                .psp_vdata = &pspv3,
+#endif
+       },
+       {       /* 5 */
+               .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+               .psp_vdata = &pspv2,
 #endif
        },
 };
@@ -359,6 +376,7 @@ static const struct pci_device_id sp_pci_table[] = {
        { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
        { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
        { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
+       { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
        /* Last entry must be zero */
        { 0, }
 };
@@ -371,6 +389,7 @@ static struct pci_driver sp_pci_driver = {
        .id_table = sp_pci_table,
        .probe = sp_pci_probe,
        .remove = sp_pci_remove,
+       .shutdown = sp_pci_shutdown,
        .driver.pm = &sp_pci_pm_ops,
 };
 
index 8b0640f..65a6413 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #include "hpre.h"
 #define HPRE_PREFETCH_DISABLE          BIT(30)
 #define HPRE_SVA_DISABLE_READY         (BIT(4) | BIT(8))
 
+/* clock gate */
+#define HPRE_CLKGATE_CTL               0x301a10
+#define HPRE_PEH_CFG_AUTO_GATE         0x301a2c
+#define HPRE_CLUSTER_DYN_CTL           0x302010
+#define HPRE_CORE_SHB_CFG              0x302088
+#define HPRE_CLKGATE_CTL_EN            BIT(0)
+#define HPRE_PEH_CFG_AUTO_GATE_EN      BIT(0)
+#define HPRE_CLUSTER_DYN_CTL_EN                BIT(0)
+#define HPRE_CORE_GATE_EN              (BIT(30) | BIT(31))
+
 #define HPRE_AM_OOO_SHUTDOWN_ENB       0x301044
 #define HPRE_AM_OOO_SHUTDOWN_ENABLE    BIT(0)
 #define HPRE_WR_MSI_PORT               BIT(2)
@@ -417,12 +428,63 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void hpre_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+       val |= HPRE_CLKGATE_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+       val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+       val |= HPRE_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+       val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+       val |= HPRE_CLUSTER_DYN_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+       val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+       val |= HPRE_CORE_GATE_EN;
+       writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
+static void hpre_disable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+       val &= ~HPRE_CLKGATE_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+       val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+       val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+       val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+       val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+       val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+       val &= ~HPRE_CORE_GATE_EN;
+       writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
 {
        struct device *dev = &qm->pdev->dev;
        u32 val;
        int ret;
 
+       /* disabel dynamic clock gate before sram init */
+       hpre_disable_clock_gate(qm);
+
        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
        writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
@@ -473,6 +535,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
        /* Config data buffer pasid needed by Kunpeng 920 */
        hpre_config_pasid(qm);
 
+       hpre_enable_clock_gate(qm);
+
        return ret;
 }
 
@@ -595,10 +659,15 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
                                    size_t count, loff_t *pos)
 {
        struct hpre_debugfs_file *file = filp->private_data;
+       struct hisi_qm *qm = hpre_file_to_qm(file);
        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->type) {
        case HPRE_CLEAR_ENABLE:
@@ -608,18 +677,25 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
                val = hpre_cluster_inqry_read(file);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        spin_unlock_irq(&file->lock);
+
+       hisi_qm_put_dfx_access(qm);
        ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
                                     size_t count, loff_t *pos)
 {
        struct hpre_debugfs_file *file = filp->private_data;
+       struct hisi_qm *qm = hpre_file_to_qm(file);
        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
        unsigned long val;
        int len, ret;
@@ -639,6 +715,10 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->type) {
        case HPRE_CLEAR_ENABLE:
@@ -655,12 +735,12 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
                ret = -EINVAL;
                goto err_input;
        }
-       spin_unlock_irq(&file->lock);
 
-       return count;
+       ret = count;
 
 err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -700,6 +780,24 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
                         hpre_debugfs_atomic64_set, "%llu\n");
 
+static int hpre_com_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
+
+static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
+
 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
                                    enum hpre_ctrl_dbgfs_file type, int indx)
 {
@@ -737,8 +835,11 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
        regset->regs = hpre_com_dfx_regs;
        regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
        regset->base = qm->io_base;
+       regset->dev = dev;
+
+       debugfs_create_file("regs", 0444, qm->debug.debug_root,
+                           regset, &hpre_com_regs_fops);
 
-       debugfs_create_regset32("regs", 0444,  qm->debug.debug_root, regset);
        return 0;
 }
 
@@ -764,8 +865,10 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
                regset->regs = hpre_cluster_dfx_regs;
                regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
                regset->base = qm->io_base + hpre_cluster_offsets[i];
+               regset->dev = dev;
 
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset,
+                                   &hpre_cluster_regs_fops);
                ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
                                               i + HPRE_CLUSTER_CTRL);
                if (ret)
@@ -1017,6 +1120,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_with_alg_register;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_with_alg_register:
@@ -1040,6 +1145,7 @@ static void hpre_remove(struct pci_dev *pdev)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int ret;
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &hpre_devices);
        hisi_qm_alg_unregister(qm, &hpre_devices);
        if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
@@ -1062,6 +1168,10 @@ static void hpre_remove(struct pci_dev *pdev)
        hisi_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops hpre_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers hpre_err_handler = {
        .error_detected         = hisi_qm_dev_err_detected,
        .slot_reset             = hisi_qm_dev_slot_reset,
@@ -1078,6 +1188,7 @@ static struct pci_driver hpre_pci_driver = {
                                  hisi_qm_sriov_configure : NULL,
        .err_handler            = &hpre_err_handler,
        .shutdown               = hisi_qm_dev_shutdown,
+       .driver.pm              = &hpre_pm_ops,
 };
 
 static void hpre_register_debugfs(void)
index 1d67f94..369562d 100644 (file)
@@ -4,12 +4,12 @@
 #include <linux/acpi.h>
 #include <linux/aer.h>
 #include <linux/bitmap.h>
-#include <linux/debugfs.h>
 #include <linux/dma-mapping.h>
 #include <linux/idr.h>
 #include <linux/io.h>
 #include <linux/irqreturn.h>
 #include <linux/log2.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/uacce.h>
 #define QM_QOS_MAX_CIR_S               11
 #define QM_QOS_VAL_MAX_LEN             32
 
+#define QM_AUTOSUSPEND_DELAY           3000
+
 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
        (((hop_num) << QM_CQ_HOP_NUM_SHIFT)     | \
        ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)      | \
@@ -734,6 +736,34 @@ static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
        return QM_IRQ_NUM_VF_V3;
 }
 
+static int qm_pm_get_sync(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+       int ret;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return 0;
+
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to get_sync(%d).\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void qm_pm_put_sync(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
 {
        u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
@@ -1173,16 +1203,13 @@ static struct hisi_qm *file_to_qm(struct debugfs_file *file)
        return container_of(debug, struct hisi_qm, debug);
 }
 
-static u32 current_q_read(struct debugfs_file *file)
+static u32 current_q_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
 }
 
-static int current_q_write(struct debugfs_file *file, u32 val)
+static int current_q_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val >= qm->debug.curr_qm_qp_num)
@@ -1199,18 +1226,14 @@ static int current_q_write(struct debugfs_file *file, u32 val)
        return 0;
 }
 
-static u32 clear_enable_read(struct debugfs_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
 }
 
 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
-static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
+static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        if (rd_clr_ctrl > 1)
                return -EINVAL;
 
@@ -1219,16 +1242,13 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
        return 0;
 }
 
-static u32 current_qm_read(struct debugfs_file *file)
+static u32 current_qm_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_MB_CNT_VF);
 }
 
-static int current_qm_write(struct debugfs_file *file, u32 val)
+static int current_qm_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val > qm->vfs_num)
@@ -1259,29 +1279,39 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
 {
        struct debugfs_file *file = filp->private_data;
        enum qm_debug_file index = file->index;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[QM_DBG_TMP_BUF_LEN];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        mutex_lock(&file->lock);
        switch (index) {
        case CURRENT_QM:
-               val = current_qm_read(file);
+               val = current_qm_read(qm);
                break;
        case CURRENT_Q:
-               val = current_q_read(file);
+               val = current_q_read(qm);
                break;
        case CLEAR_ENABLE:
-               val = clear_enable_read(file);
+               val = clear_enable_read(qm);
                break;
        default:
-               mutex_unlock(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        mutex_unlock(&file->lock);
 
+       hisi_qm_put_dfx_access(qm);
        ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       mutex_unlock(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
@@ -1289,6 +1319,7 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
 {
        struct debugfs_file *file = filp->private_data;
        enum qm_debug_file index = file->index;
+       struct hisi_qm *qm = file_to_qm(file);
        unsigned long val;
        char tbuf[QM_DBG_TMP_BUF_LEN];
        int len, ret;
@@ -1308,22 +1339,28 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        mutex_lock(&file->lock);
        switch (index) {
        case CURRENT_QM:
-               ret = current_qm_write(file, val);
+               ret = current_qm_write(qm, val);
                break;
        case CURRENT_Q:
-               ret = current_q_write(file, val);
+               ret = current_q_write(qm, val);
                break;
        case CLEAR_ENABLE:
-               ret = clear_enable_write(file, val);
+               ret = clear_enable_write(qm, val);
                break;
        default:
                ret = -EINVAL;
        }
        mutex_unlock(&file->lock);
 
+       hisi_qm_put_dfx_access(qm);
+
        if (ret)
                return ret;
 
@@ -1337,13 +1374,8 @@ static const struct file_operations qm_debug_fops = {
        .write = qm_debug_write,
 };
 
-struct qm_dfx_registers {
-       char  *reg_name;
-       u64   reg_offset;
-};
-
 #define CNT_CYC_REGS_NUM               10
-static struct qm_dfx_registers qm_dfx_regs[] = {
+static const struct debugfs_reg32 qm_dfx_regs[] = {
        /* XXX_CNT are reading clear register */
        {"QM_ECC_1BIT_CNT               ",  0x104000ull},
        {"QM_ECC_MBIT_CNT               ",  0x104008ull},
@@ -1369,31 +1401,59 @@ static struct qm_dfx_registers qm_dfx_regs[] = {
        {"QM_DFX_FF_ST5                 ",  0x1040dcull},
        {"QM_DFX_FF_ST6                 ",  0x1040e0ull},
        {"QM_IN_IDLE_ST                 ",  0x1040e4ull},
-       { NULL, 0}
 };
 
-static struct qm_dfx_registers qm_vf_dfx_regs[] = {
+static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
        {"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
-       { NULL, 0}
 };
 
-static int qm_regs_show(struct seq_file *s, void *unused)
+/**
+ * hisi_qm_regs_dump() - Dump registers's value.
+ * @s: debugfs file handle.
+ * @regset: accelerator registers information.
+ *
+ * Dump accelerator registers.
+ */
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
 {
-       struct hisi_qm *qm = s->private;
-       struct qm_dfx_registers *regs;
+       struct pci_dev *pdev = to_pci_dev(regset->dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       const struct debugfs_reg32 *regs = regset->regs;
+       int regs_len = regset->nregs;
+       int i, ret;
        u32 val;
 
-       if (qm->fun_type == QM_HW_PF)
-               regs = qm_dfx_regs;
-       else
-               regs = qm_vf_dfx_regs;
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return;
 
-       while (regs->reg_name) {
-               val = readl(qm->io_base + regs->reg_offset);
-               seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
-               regs++;
+       for (i = 0; i < regs_len; i++) {
+               val = readl(regset->base + regs[i].offset);
+               seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
        }
 
+       hisi_qm_put_dfx_access(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
+
+static int qm_regs_show(struct seq_file *s, void *unused)
+{
+       struct hisi_qm *qm = s->private;
+       struct debugfs_regset32 regset;
+
+       if (qm->fun_type == QM_HW_PF) {
+               regset.regs = qm_dfx_regs;
+               regset.nregs = ARRAY_SIZE(qm_dfx_regs);
+       } else {
+               regset.regs = qm_vf_dfx_regs;
+               regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
+       }
+
+       regset.base = qm->io_base;
+       regset.dev = &qm->pdev->dev;
+
+       hisi_qm_regs_dump(s, &regset);
+
        return 0;
 }
 
@@ -1823,16 +1883,24 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
        if (*pos)
                return 0;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        /* Judge if the instance is being reset. */
        if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
                return 0;
 
-       if (count > QM_DBG_WRITE_LEN)
-               return -ENOSPC;
+       if (count > QM_DBG_WRITE_LEN) {
+               ret = -ENOSPC;
+               goto put_dfx_access;
+       }
 
        cmd_buf = memdup_user_nul(buffer, count);
-       if (IS_ERR(cmd_buf))
-               return PTR_ERR(cmd_buf);
+       if (IS_ERR(cmd_buf)) {
+               ret = PTR_ERR(cmd_buf);
+               goto put_dfx_access;
+       }
 
        cmd_buf_tmp = strchr(cmd_buf, '\n');
        if (cmd_buf_tmp) {
@@ -1843,12 +1911,16 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
        ret = qm_cmd_write_dump(qm, cmd_buf);
        if (ret) {
                kfree(cmd_buf);
-               return ret;
+               goto put_dfx_access;
        }
 
        kfree(cmd_buf);
 
-       return count;
+       ret = count;
+
+put_dfx_access:
+       hisi_qm_put_dfx_access(qm);
+       return ret;
 }
 
 static const struct file_operations qm_cmd_fops = {
@@ -2445,11 +2517,19 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
 {
        struct hisi_qp *qp;
+       int ret;
+
+       ret = qm_pm_get_sync(qm);
+       if (ret)
+               return ERR_PTR(ret);
 
        down_write(&qm->qps_lock);
        qp = qm_create_qp_nolock(qm, alg_type);
        up_write(&qm->qps_lock);
 
+       if (IS_ERR(qp))
+               qm_pm_put_sync(qm);
+
        return qp;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
@@ -2475,6 +2555,8 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
        idr_remove(&qm->qp_idr, qp->qp_id);
 
        up_write(&qm->qps_lock);
+
+       qm_pm_put_sync(qm);
 }
 EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
 
@@ -3200,6 +3282,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
        init_rwsem(&qm->qps_lock);
        qm->qp_in_used = 0;
        qm->misc_ctl = false;
+       if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+               if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+                       dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+       }
 }
 
 static void qm_cmd_uninit(struct hisi_qm *qm)
@@ -4057,10 +4143,15 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
        u32 qos_val, ir;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        /* Mailbox and reset cannot be operated at the same time */
        if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
                pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
-               return  -EAGAIN;
+               ret = -EAGAIN;
+               goto err_put_dfx_access;
        }
 
        if (qm->fun_type == QM_HW_PF) {
@@ -4079,6 +4170,8 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
 
 err_get_status:
        clear_bit(QM_RESETTING, &qm->misc_ctl);
+err_put_dfx_access:
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -4159,15 +4252,23 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
 
        fun_index = device * 8 + function;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret) {
+               ret = -EINVAL;
+               goto err_get_status;
+       }
+
        ret = qm_func_shaper_enable(qm, fun_index, val);
        if (ret) {
                pci_err(qm->pdev, "failed to enable function shaper!\n");
                ret = -EINVAL;
-               goto err_get_status;
+               goto err_put_sync;
        }
 
-       ret =  count;
+       ret = count;
 
+err_put_sync:
+       qm_pm_put_sync(qm);
 err_get_status:
        clear_bit(QM_RESETTING, &qm->misc_ctl);
        return ret;
@@ -4245,7 +4346,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
  */
 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
 {
-       struct qm_dfx_registers *regs;
+       const struct debugfs_reg32 *regs;
        int i;
 
        /* clear current_qm */
@@ -4264,7 +4365,7 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
 
        regs = qm_dfx_regs;
        for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
-               readl(qm->io_base + regs->reg_offset);
+               readl(qm->io_base + regs->offset);
                regs++;
        }
 
@@ -4287,19 +4388,23 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int pre_existing_vfs, num_vfs, total_vfs, ret;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret)
+               return ret;
+
        total_vfs = pci_sriov_get_totalvfs(pdev);
        pre_existing_vfs = pci_num_vf(pdev);
        if (pre_existing_vfs) {
                pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
                        pre_existing_vfs);
-               return 0;
+               goto err_put_sync;
        }
 
        num_vfs = min_t(int, max_vfs, total_vfs);
        ret = qm_vf_q_assign(qm, num_vfs);
        if (ret) {
                pci_err(pdev, "Can't assign queues for VF!\n");
-               return ret;
+               goto err_put_sync;
        }
 
        qm->vfs_num = num_vfs;
@@ -4308,12 +4413,16 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
        if (ret) {
                pci_err(pdev, "Can't enable VF!\n");
                qm_clear_vft_config(qm);
-               return ret;
+               goto err_put_sync;
        }
 
        pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
 
        return num_vfs;
+
+err_put_sync:
+       qm_pm_put_sync(qm);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
 
@@ -4328,6 +4437,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+       int ret;
 
        if (pci_vfs_assigned(pdev)) {
                pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@@ -4343,8 +4453,13 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
        pci_disable_sriov(pdev);
        /* clear vf function shaper configure array */
        memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+       ret = qm_clear_vft_config(qm);
+       if (ret)
+               return ret;
 
-       return qm_clear_vft_config(qm);
+       qm_pm_put_sync(qm);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
 
@@ -5164,11 +5279,18 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
        struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
        int ret;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret) {
+               clear_bit(QM_RST_SCHED, &qm->misc_ctl);
+               return;
+       }
+
        /* reset pcie device controller */
        ret = qm_controller_reset(qm);
        if (ret)
                dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
 
+       qm_pm_put_sync(qm);
 }
 
 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
@@ -5680,6 +5802,194 @@ err_pci_init:
 }
 EXPORT_SYMBOL_GPL(hisi_qm_init);
 
+/**
+ * hisi_qm_get_dfx_access() - Try to get dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Try to get dfx access, then user can get message.
+ *
+ * If device is in suspended, return failure, otherwise
+ * bump up the runtime PM usage counter.
+ */
+int hisi_qm_get_dfx_access(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (pm_runtime_suspended(dev)) {
+               dev_info(dev, "can not read/write - device in suspended.\n");
+               return -EAGAIN;
+       }
+
+       return qm_pm_get_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
+
+/**
+ * hisi_qm_put_dfx_access() - Put dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Put dfx access, drop runtime PM usage counter.
+ */
+void hisi_qm_put_dfx_access(struct hisi_qm *qm)
+{
+       qm_pm_put_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
+
+/**
+ * hisi_qm_pm_init() - Initialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that initialize qm runtime PM.
+ */
+void hisi_qm_pm_init(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_put_noidle(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
+
+/**
+ * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that uninitialize qm runtime PM.
+ */
+void hisi_qm_pm_uninit(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_get_noresume(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
+
+static int qm_prepare_for_suspend(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+       int ret;
+       u32 val;
+
+       ret = qm->ops->set_msi(qm, false);
+       if (ret) {
+               pci_err(pdev, "failed to disable MSI before suspending!\n");
+               return ret;
+       }
+
+       /* shutdown OOO register */
+       writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+              qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+
+       ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+                                        val,
+                                        (val == ACC_MASTER_TRANS_RETURN_RW),
+                                        POLL_PERIOD, POLL_TIMEOUT);
+       if (ret) {
+               pci_emerg(pdev, "Bus lock! Please reset system.\n");
+               return ret;
+       }
+
+       ret = qm_set_pf_mse(qm, false);
+       if (ret)
+               pci_err(pdev, "failed to disable MSE before suspending!\n");
+
+       return ret;
+}
+
+static int qm_rebuild_for_resume(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+       int ret;
+
+       ret = qm_set_pf_mse(qm, true);
+       if (ret) {
+               pci_err(pdev, "failed to enable MSE after resuming!\n");
+               return ret;
+       }
+
+       ret = qm->ops->set_msi(qm, true);
+       if (ret) {
+               pci_err(pdev, "failed to enable MSI after resuming!\n");
+               return ret;
+       }
+
+       ret = qm_dev_hw_init(qm);
+       if (ret) {
+               pci_err(pdev, "failed to init device after resuming\n");
+               return ret;
+       }
+
+       qm_cmd_init(qm);
+       hisi_qm_dev_err_init(qm);
+
+       return 0;
+}
+
+/**
+ * hisi_qm_suspend() - Runtime suspend of given device.
+ * @dev: device to suspend.
+ *
+ * Function that suspend the device.
+ */
+int hisi_qm_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       int ret;
+
+       pci_info(pdev, "entering suspended state\n");
+
+       ret = hisi_qm_stop(qm, QM_NORMAL);
+       if (ret) {
+               pci_err(pdev, "failed to stop qm(%d)\n", ret);
+               return ret;
+       }
+
+       ret = qm_prepare_for_suspend(qm);
+       if (ret)
+               pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_suspend);
+
+/**
+ * hisi_qm_resume() - Runtime resume of given device.
+ * @dev: device to resume.
+ *
+ * Function that resume the device.
+ */
+int hisi_qm_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       int ret;
+
+       pci_info(pdev, "resuming from suspend state\n");
+
+       ret = qm_rebuild_for_resume(qm);
+       if (ret) {
+               pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
+               return ret;
+       }
+
+       ret = hisi_qm_start(qm);
+       if (ret)
+               pci_err(pdev, "failed to start qm(%d)\n", ret);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_resume);
+
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
index 035eaf8..3068093 100644 (file)
@@ -4,6 +4,7 @@
 #define HISI_ACC_QM_H
 
 #include <linux/bitfield.h>
+#include <linux/debugfs.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -430,4 +431,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev);
 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
 #endif
index 018415b..d97cf02 100644 (file)
@@ -157,11 +157,6 @@ struct sec_ctx {
        struct device *dev;
 };
 
-enum sec_endian {
-       SEC_LE = 0,
-       SEC_32BE,
-       SEC_64BE
-};
 
 enum sec_debug_file_index {
        SEC_CLEAR_ENABLE,
index 490db7b..90551bf 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #define SEC_MEM_START_INIT_REG 0x301100
 #define SEC_MEM_INIT_DONE_REG          0x301104
 
+/* clock gating */
 #define SEC_CONTROL_REG                0x301200
-#define SEC_TRNG_EN_SHIFT              8
+#define SEC_DYNAMIC_GATE_REG           0x30121c
+#define SEC_CORE_AUTO_GATE             0x30212c
+#define SEC_DYNAMIC_GATE_EN            0x7bff
+#define SEC_CORE_AUTO_GATE_EN          GENMASK(3, 0)
 #define SEC_CLK_GATE_ENABLE            BIT(3)
 #define SEC_CLK_GATE_DISABLE           (~BIT(3))
+
+#define SEC_TRNG_EN_SHIFT              8
 #define SEC_AXI_SHUTDOWN_ENABLE        BIT(12)
 #define SEC_AXI_SHUTDOWN_DISABLE       0xFFFFEFFF
 
@@ -312,31 +319,20 @@ static const struct pci_device_id sec_dev_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, sec_dev_ids);
 
-static u8 sec_get_endian(struct hisi_qm *qm)
+static void sec_set_endian(struct hisi_qm *qm)
 {
        u32 reg;
 
-       /*
-        * As for VF, it is a wrong way to get endian setting by
-        * reading a register of the engine
-        */
-       if (qm->pdev->is_virtfn) {
-               dev_err_ratelimited(&qm->pdev->dev,
-                                   "cannot access a register in VF!\n");
-               return SEC_LE;
-       }
        reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       /* BD little endian mode */
-       if (!(reg & BIT(0)))
-               return SEC_LE;
+       reg &= ~(BIT(1) | BIT(0));
+       if (!IS_ENABLED(CONFIG_64BIT))
+               reg |= BIT(1);
 
-       /* BD 32-bits big endian mode */
-       else if (!(reg & BIT(1)))
-               return SEC_32BE;
 
-       /* BD 64-bits big endian mode */
-       else
-               return SEC_64BE;
+       if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+               reg |= BIT(0);
+
+       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
 }
 
 static void sec_open_sva_prefetch(struct hisi_qm *qm)
@@ -378,15 +374,43 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void sec_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+       val |= SEC_CLK_GATE_ENABLE;
+       writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+
+       val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
+       val |= SEC_DYNAMIC_GATE_EN;
+       writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
+
+       val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
+       val |= SEC_CORE_AUTO_GATE_EN;
+       writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
+}
+
+static void sec_disable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       /* Kunpeng920 needs to close clock gating */
+       val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+       val &= SEC_CLK_GATE_DISABLE;
+       writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+}
+
 static int sec_engine_init(struct hisi_qm *qm)
 {
        int ret;
        u32 reg;
 
-       /* disable clock gate control */
-       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       reg &= SEC_CLK_GATE_DISABLE;
-       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+       /* disable clock gate control before mem init */
+       sec_disable_clock_gate(qm);
 
        writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
 
@@ -429,9 +453,9 @@ static int sec_engine_init(struct hisi_qm *qm)
                       qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
 
        /* config endian */
-       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       reg |= sec_get_endian(qm);
-       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+       sec_set_endian(qm);
+
+       sec_enable_clock_gate(qm);
 
        return 0;
 }
@@ -533,17 +557,14 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
        writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
 }
 
-static u32 sec_clear_enable_read(struct sec_debug_file *file)
+static u32 sec_clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file->qm;
-
        return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
                        SEC_CTRL_CNT_CLR_CE_BIT;
 }
 
-static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file->qm;
        u32 tmp;
 
        if (val != 1 && val)
@@ -561,24 +582,34 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
 {
        struct sec_debug_file *file = filp->private_data;
        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+       struct hisi_qm *qm = file->qm;
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
        case SEC_CLEAR_ENABLE:
-               val = sec_clear_enable_read(file);
+               val = sec_clear_enable_read(qm);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
 
        spin_unlock_irq(&file->lock);
-       ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
 
+       hisi_qm_put_dfx_access(qm);
+       ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
@@ -586,6 +617,7 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
 {
        struct sec_debug_file *file = filp->private_data;
        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+       struct hisi_qm *qm = file->qm;
        unsigned long val;
        int len, ret;
 
@@ -604,11 +636,15 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
        case SEC_CLEAR_ENABLE:
-               ret = sec_clear_enable_write(file, val);
+               ret = sec_clear_enable_write(qm, val);
                if (ret)
                        goto err_input;
                break;
@@ -617,12 +653,11 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
                goto err_input;
        }
 
-       spin_unlock_irq(&file->lock);
-
-       return count;
+       ret = count;
 
  err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -653,6 +688,15 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
                         sec_debugfs_atomic64_set, "%lld\n");
 
+static int sec_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_regs);
+
 static int sec_core_debug_init(struct hisi_qm *qm)
 {
        struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
@@ -671,9 +715,10 @@ static int sec_core_debug_init(struct hisi_qm *qm)
        regset->regs = sec_dfx_regs;
        regset->nregs = ARRAY_SIZE(sec_dfx_regs);
        regset->base = qm->io_base;
+       regset->dev = dev;
 
        if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
 
        for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
                atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
@@ -981,10 +1026,13 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_alg_unregister;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_alg_unregister:
-       hisi_qm_alg_unregister(qm, &sec_devices);
+       if (qm->qp_num >= ctx_q_num)
+               hisi_qm_alg_unregister(qm, &sec_devices);
 err_qm_stop:
        sec_debugfs_exit(qm);
        hisi_qm_stop(qm, QM_NORMAL);
@@ -999,6 +1047,7 @@ static void sec_remove(struct pci_dev *pdev)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &sec_devices);
        if (qm->qp_num >= ctx_q_num)
                hisi_qm_alg_unregister(qm, &sec_devices);
@@ -1018,6 +1067,10 @@ static void sec_remove(struct pci_dev *pdev)
        sec_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops sec_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers sec_err_handler = {
        .error_detected = hisi_qm_dev_err_detected,
        .slot_reset     = hisi_qm_dev_slot_reset,
@@ -1033,6 +1086,7 @@ static struct pci_driver sec_pci_driver = {
        .err_handler = &sec_err_handler,
        .sriov_configure = hisi_qm_sriov_configure,
        .shutdown = hisi_qm_dev_shutdown,
+       .driver.pm = &sec_pm_ops,
 };
 
 static void sec_register_debugfs(void)
index f8482ce..7148201 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #define HZIP_DELAY_1_US                1
 #define HZIP_POLL_TIMEOUT_US   1000
 
+/* clock gating */
+#define HZIP_PEH_CFG_AUTO_GATE         0x3011A8
+#define HZIP_PEH_CFG_AUTO_GATE_EN      BIT(0)
+#define HZIP_CORE_GATED_EN             GENMASK(15, 8)
+#define HZIP_CORE_GATED_OOO_EN         BIT(29)
+#define HZIP_CLOCK_GATED_EN            (HZIP_CORE_GATED_EN | \
+                                        HZIP_CORE_GATED_OOO_EN)
+
 static const char hisi_zip_name[] = "hisi_zip";
 static struct dentry *hzip_debugfs_root;
 
@@ -312,6 +321,22 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL);
+       val |= HZIP_CLOCK_GATED_EN;
+       writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL);
+
+       val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+       val |= HZIP_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+}
+
 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
 {
        void __iomem *base = qm->io_base;
@@ -359,6 +384,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
               CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
               FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
 
+       hisi_zip_enable_clock_gate(qm);
+
        return 0;
 }
 
@@ -423,17 +450,14 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
        return &hisi_zip->qm;
 }
 
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
                     HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
 }
 
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int clear_enable_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val != 1 && val != 0)
@@ -450,22 +474,33 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
                                        size_t count, loff_t *pos)
 {
        struct ctrl_debug_file *file = filp->private_data;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[HZIP_BUF_SIZE];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->index) {
        case HZIP_CLEAR_ENABLE:
-               val = clear_enable_read(file);
+               val = clear_enable_read(qm);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        spin_unlock_irq(&file->lock);
+
+       hisi_qm_put_dfx_access(qm);
        ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
@@ -473,6 +508,7 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
                                         size_t count, loff_t *pos)
 {
        struct ctrl_debug_file *file = filp->private_data;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[HZIP_BUF_SIZE];
        unsigned long val;
        int len, ret;
@@ -491,10 +527,14 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->index) {
        case HZIP_CLEAR_ENABLE:
-               ret = clear_enable_write(file, val);
+               ret = clear_enable_write(qm, val);
                if (ret)
                        goto err_input;
                break;
@@ -502,12 +542,12 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
                ret = -EINVAL;
                goto err_input;
        }
-       spin_unlock_irq(&file->lock);
 
-       return count;
+       ret = count;
 
 err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -538,6 +578,15 @@ static int zip_debugfs_atomic64_get(void *data, u64 *val)
 DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
                         zip_debugfs_atomic64_set, "%llu\n");
 
+static int hisi_zip_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
+
 static int hisi_zip_core_debug_init(struct hisi_qm *qm)
 {
        struct device *dev = &qm->pdev->dev;
@@ -560,9 +609,11 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
                regset->regs = hzip_dfx_regs;
                regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
                regset->base = qm->io_base + core_offsets[i];
+               regset->dev = dev;
 
                tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset,
+                                    &hisi_zip_regs_fops);
        }
 
        return 0;
@@ -898,6 +949,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_qm_alg_unregister;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_qm_alg_unregister:
@@ -920,6 +973,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &zip_devices);
        hisi_qm_alg_unregister(qm, &zip_devices);
 
@@ -932,6 +986,10 @@ static void hisi_zip_remove(struct pci_dev *pdev)
        hisi_zip_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops hisi_zip_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers hisi_zip_err_handler = {
        .error_detected = hisi_qm_dev_err_detected,
        .slot_reset     = hisi_qm_dev_slot_reset,
@@ -948,6 +1006,7 @@ static struct pci_driver hisi_zip_pci_driver = {
                                        hisi_qm_sriov_configure : NULL,
        .err_handler            = &hisi_zip_err_handler,
        .shutdown               = hisi_qm_dev_shutdown,
+       .driver.pm              = &hisi_zip_pm_ops,
 };
 
 static void hisi_zip_register_debugfs(void)
index d6a7784..d19e5ff 100644 (file)
@@ -170,15 +170,19 @@ static struct dcp *global_sdcp;
 
 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 {
+       int dma_err;
        struct dcp *sdcp = global_sdcp;
        const int chan = actx->chan;
        uint32_t stat;
        unsigned long ret;
        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
-
        dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
                                              DMA_TO_DEVICE);
 
+       dma_err = dma_mapping_error(sdcp->dev, desc_phys);
+       if (dma_err)
+               return dma_err;
+
        reinit_completion(&sdcp->completion[chan]);
 
        /* Clear status register. */
@@ -216,18 +220,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
                           struct skcipher_request *req, int init)
 {
+       dma_addr_t key_phys, src_phys, dst_phys;
        struct dcp *sdcp = global_sdcp;
        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
        struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
        int ret;
 
-       dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
-                                            2 * AES_KEYSIZE_128,
-                                            DMA_TO_DEVICE);
-       dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
-                                            DCP_BUF_SZ, DMA_TO_DEVICE);
-       dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
-                                            DCP_BUF_SZ, DMA_FROM_DEVICE);
+       key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+                                 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, key_phys);
+       if (ret)
+               return ret;
+
+       src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+                                 DCP_BUF_SZ, DMA_TO_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, src_phys);
+       if (ret)
+               goto err_src;
+
+       dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+                                 DCP_BUF_SZ, DMA_FROM_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, dst_phys);
+       if (ret)
+               goto err_dst;
 
        if (actx->fill % AES_BLOCK_SIZE) {
                dev_err(sdcp->dev, "Invalid block size!\n");
@@ -265,10 +280,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
        ret = mxs_dcp_start_dma(actx);
 
 aes_done_run:
+       dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
+err_dst:
+       dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+err_src:
        dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
                         DMA_TO_DEVICE);
-       dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
-       dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 
        return ret;
 }
@@ -283,21 +300,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 
        struct scatterlist *dst = req->dst;
        struct scatterlist *src = req->src;
-       const int nents = sg_nents(req->src);
+       int dst_nents = sg_nents(dst);
 
        const int out_off = DCP_BUF_SZ;
        uint8_t *in_buf = sdcp->coh->aes_in_buf;
        uint8_t *out_buf = sdcp->coh->aes_out_buf;
 
-       uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
        uint32_t dst_off = 0;
+       uint8_t *src_buf = NULL;
        uint32_t last_out_len = 0;
 
        uint8_t *key = sdcp->coh->aes_key;
 
        int ret = 0;
-       int split = 0;
-       unsigned int i, len, clen, rem = 0, tlen = 0;
+       unsigned int i, len, clen, tlen = 0;
        int init = 0;
        bool limit_hit = false;
 
@@ -315,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
                memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
        }
 
-       for_each_sg(req->src, src, nents, i) {
+       for_each_sg(req->src, src, sg_nents(src), i) {
                src_buf = sg_virt(src);
                len = sg_dma_len(src);
                tlen += len;
@@ -340,34 +356,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
                         * submit the buffer.
                         */
                        if (actx->fill == out_off || sg_is_last(src) ||
-                               limit_hit) {
+                           limit_hit) {
                                ret = mxs_dcp_run_aes(actx, req, init);
                                if (ret)
                                        return ret;
                                init = 0;
 
-                               out_tmp = out_buf;
+                               sg_pcopy_from_buffer(dst, dst_nents, out_buf,
+                                                    actx->fill, dst_off);
+                               dst_off += actx->fill;
                                last_out_len = actx->fill;
-                               while (dst && actx->fill) {
-                                       if (!split) {
-                                               dst_buf = sg_virt(dst);
-                                               dst_off = 0;
-                                       }
-                                       rem = min(sg_dma_len(dst) - dst_off,
-                                                 actx->fill);
-
-                                       memcpy(dst_buf + dst_off, out_tmp, rem);
-                                       out_tmp += rem;
-                                       dst_off += rem;
-                                       actx->fill -= rem;
-
-                                       if (dst_off == sg_dma_len(dst)) {
-                                               dst = sg_next(dst);
-                                               split = 0;
-                                       } else {
-                                               split = 1;
-                                       }
-                               }
+                               actx->fill = 0;
                        }
                } while (len);
 
@@ -557,6 +556,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
        dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
                                             DCP_BUF_SZ, DMA_TO_DEVICE);
 
+       ret = dma_mapping_error(sdcp->dev, buf_phys);
+       if (ret)
+               return ret;
+
        /* Fill in the DMA descriptor. */
        desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
                    MXS_DCP_CONTROL0_INTERRUPT |
@@ -589,6 +592,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
        if (rctx->fini) {
                digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
                                             DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
+               ret = dma_mapping_error(sdcp->dev, digest_phys);
+               if (ret)
+                       goto done_run;
+
                desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
                desc->payload = digest_phys;
        }
index 0dd4c6b..9b968ac 100644 (file)
@@ -1175,9 +1175,9 @@ static int omap_aes_probe(struct platform_device *pdev)
        spin_lock_init(&dd->lock);
 
        INIT_LIST_HEAD(&dd->list);
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_add_tail(&dd->list, &dev_list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        /* Initialize crypto engine */
        dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1264,9 +1264,9 @@ static int omap_aes_remove(struct platform_device *pdev)
        if (!dd)
                return -ENODEV;
 
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_del(&dd->list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
index 31bdb1d..a4cc6bf 100644 (file)
@@ -210,7 +210,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
        buf = sg_virt(sg);
        pages = get_order(len);
 
-       if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
+       if (orig && (flags & OMAP_CRYPTO_DATA_COPIED))
                omap_crypto_copy_data(sg, orig, offset, len);
 
        if (flags & OMAP_CRYPTO_DATA_COPIED)
index bc86313..be77656 100644 (file)
@@ -1033,9 +1033,9 @@ static int omap_des_probe(struct platform_device *pdev)
 
 
        INIT_LIST_HEAD(&dd->list);
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_add_tail(&dd->list, &dev_list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        /* Initialize des crypto engine */
        dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1094,9 +1094,9 @@ static int omap_des_remove(struct platform_device *pdev)
        if (!dd)
                return -ENODEV;
 
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_del(&dd->list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
index dd53ad9..f6bf53c 100644 (file)
 #define FLAGS_FINAL            1
 #define FLAGS_DMA_ACTIVE       2
 #define FLAGS_OUTPUT_READY     3
-#define FLAGS_INIT             4
 #define FLAGS_CPU              5
 #define FLAGS_DMA_READY                6
 #define FLAGS_AUTO_XOR         7
@@ -368,24 +367,6 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
                        hash[i] = le32_to_cpup((__le32 *)in + i);
 }
 
-static int omap_sham_hw_init(struct omap_sham_dev *dd)
-{
-       int err;
-
-       err = pm_runtime_resume_and_get(dd->dev);
-       if (err < 0) {
-               dev_err(dd->dev, "failed to get sync: %d\n", err);
-               return err;
-       }
-
-       if (!test_bit(FLAGS_INIT, &dd->flags)) {
-               set_bit(FLAGS_INIT, &dd->flags);
-               dd->err = 0;
-       }
-
-       return 0;
-}
-
 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
                                 int final, int dma)
 {
@@ -1093,11 +1074,14 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
        dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
                ctx->op, ctx->total, ctx->digcnt, final);
 
-       dd->req = req;
-
-       err = omap_sham_hw_init(dd);
-       if (err)
+       err = pm_runtime_resume_and_get(dd->dev);
+       if (err < 0) {
+               dev_err(dd->dev, "failed to get sync: %d\n", err);
                return err;
+       }
+
+       dd->err = 0;
+       dd->req = req;
 
        if (ctx->digcnt)
                dd->pdata->copy_hash(req, 0);
@@ -1736,7 +1720,7 @@ static void omap_sham_done_task(unsigned long data)
                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
                        goto finish;
        } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
-               if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
+               if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
                        omap_sham_update_dma_stop(dd);
                        if (dd->err) {
                                err = dd->err;
@@ -2129,7 +2113,6 @@ static int omap_sham_probe(struct platform_device *pdev)
        dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
 
        pm_runtime_enable(dev);
-       pm_runtime_irq_safe(dev);
 
        err = pm_runtime_get_sync(dev);
        if (err < 0) {
@@ -2144,9 +2127,9 @@ static int omap_sham_probe(struct platform_device *pdev)
                (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
                (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
 
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_add_tail(&dd->list, &sham.dev_list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
 
        dd->engine = crypto_engine_alloc_init(dev, 1);
        if (!dd->engine) {
@@ -2194,10 +2177,11 @@ err_algs:
 err_engine_start:
        crypto_engine_exit(dd->engine);
 err_engine:
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_del(&dd->list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
 err_pm:
+       pm_runtime_dont_use_autosuspend(dev);
        pm_runtime_disable(dev);
        if (!dd->polling_mode)
                dma_release_channel(dd->dma_lch);
@@ -2215,9 +2199,9 @@ static int omap_sham_remove(struct platform_device *pdev)
        dd = platform_get_drvdata(pdev);
        if (!dd)
                return -ENODEV;
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_del(&dd->list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
                        crypto_unregister_ahash(
@@ -2225,6 +2209,7 @@ static int omap_sham_remove(struct platform_device *pdev)
                        dd->pdata->algs_info[i].registered--;
                }
        tasklet_kill(&dd->done_task);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
        if (!dd->polling_mode)
@@ -2235,32 +2220,11 @@ static int omap_sham_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int omap_sham_suspend(struct device *dev)
-{
-       pm_runtime_put_sync(dev);
-       return 0;
-}
-
-static int omap_sham_resume(struct device *dev)
-{
-       int err = pm_runtime_resume_and_get(dev);
-       if (err < 0) {
-               dev_err(dev, "failed to get sync: %d\n", err);
-               return err;
-       }
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
-
 static struct platform_driver omap_sham_driver = {
        .probe  = omap_sham_probe,
        .remove = omap_sham_remove,
        .driver = {
                .name   = "omap-sham",
-               .pm     = &omap_sham_pm_ops,
                .of_match_table = omap_sham_of_match,
        },
 };
index 3524ddd..33d8e50 100644 (file)
@@ -161,7 +161,7 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
        ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
        return 0;
 }
@@ -210,21 +210,21 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
        hw_data->fw_mmp_name = ADF_4XXX_MMP;
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
        hw_data->uof_get_num_objs = uof_get_num_objs;
        hw_data->uof_get_name = uof_get_name;
        hw_data->uof_get_ae_mask = uof_get_ae_mask;
        hw_data->set_msix_rttable = set_msix_default_rttable;
        hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
 
        adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
 }
index a8805c8..359fb79 100644 (file)
@@ -221,16 +221,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Set DMA identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration.\n");
-                       ret = -EFAULT;
-                       goto out_err;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration.\n");
+               goto out_err;
        }
 
        /* Get accelerator capabilities mask */
index 1dd64af..3027c01 100644 (file)
@@ -111,11 +111,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C3XXX_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C3XXX_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -159,8 +154,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_C3XXX_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -193,8 +190,6 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->get_sram_bar_id = get_sram_bar_id;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sku = get_sku;
@@ -203,16 +198,18 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index fece8e3..86ee02a 100644 (file)
@@ -29,7 +29,6 @@
 #define ADF_C3XXX_ERRSSMSH_EN BIT(3)
 
 #define ADF_C3XXX_PF2VF_OFFSET(i)      (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C3XXX_VINTMSK_OFFSET(i)    (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
index 7fb3343..cc6e75d 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index 15f6b9b..3e69b52 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C3XXXIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C3XXXIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 7945a9c..f5de4ce 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_C3XXXIOV_ETR_BAR 0
 #define ADF_C3XXXIOV_ETR_MAX_BANKS 1
 #define ADF_C3XXXIOV_PF2VF_OFFSET      0x200
-#define ADF_C3XXXIOV_VINTMSK_OFFSET    0x208
 
 void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
index 067ca5e..1df1b86 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index 3033739..b023c80 100644 (file)
@@ -113,11 +113,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C62X_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C62X_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -161,8 +156,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_C62X_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -195,8 +192,6 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->get_sram_bar_id = get_sram_bar_id;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sku = get_sku;
@@ -205,16 +200,18 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 53d3cb5..e6664bd 100644 (file)
@@ -30,7 +30,6 @@
 #define ADF_C62X_ERRSSMSH_EN BIT(3)
 
 #define ADF_C62X_PF2VF_OFFSET(i)       (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C62X_VINTMSK_OFFSET(i)     (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
index 1f5de44..bf251df 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index d231583..3bee3e4 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C62XIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C62XIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index a6c04cf..794778c 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_C62XIOV_ETR_BAR 0
 #define ADF_C62XIOV_ETR_MAX_BANKS 1
 #define ADF_C62XIOV_PF2VF_OFFSET       0x200
-#define ADF_C62XIOV_VINTMSK_OFFSET     0x208
 
 void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
index 51ea88c..8103bd8 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index ac435b4..38c0af6 100644 (file)
@@ -18,8 +18,6 @@
 #define ADF_4XXX_DEVICE_NAME "4xxx"
 #define ADF_4XXX_PCI_DEVICE_ID 0x4940
 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -156,7 +154,6 @@ struct adf_hw_device_data {
        u32 (*get_num_aes)(struct adf_hw_device_data *self);
        u32 (*get_num_accels)(struct adf_hw_device_data *self);
        u32 (*get_pf2vf_offset)(u32 i);
-       u32 (*get_vintmsk_offset)(u32 i);
        void (*get_arb_info)(struct arb_info *arb_csrs_info);
        void (*get_admin_info)(struct admin_info *admin_csrs_info);
        enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
@@ -174,7 +171,7 @@ struct adf_hw_device_data {
                                      bool enable);
        void (*enable_ints)(struct adf_accel_dev *accel_dev);
        void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
-       int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+       int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
        void (*reset_device)(struct adf_accel_dev *accel_dev);
        void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
        char *(*uof_get_name)(u32 obj_num);
@@ -227,7 +224,6 @@ struct adf_fw_loader_data {
 
 struct adf_accel_vf_info {
        struct adf_accel_dev *accel_dev;
-       struct tasklet_struct vf2pf_bh_tasklet;
        struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
        struct ratelimit_state vf2pf_ratelimit;
        u32 vf_nr;
@@ -249,6 +245,8 @@ struct adf_accel_dev {
        struct adf_accel_pci accel_pci_dev;
        union {
                struct {
+                       /* protects VF2PF interrupts access */
+                       spinlock_t vf2pf_ints_lock;
                        /* vf_info is non-zero when SR-IOV is init'ed */
                        struct adf_accel_vf_info *vf_info;
                } pf;
index d2ae293..ed3e40b 100644 (file)
@@ -194,7 +194,7 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev)
 EXPORT_SYMBOL_GPL(adf_enable_aer);
 
 /**
- * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * adf_disable_aer() - Disable Advance Error Reporting for acceleration device
  * @accel_dev:  Pointer to acceleration device.
  *
  * Function disables PCI Advance Error Reporting for the
index c614765..4261749 100644 (file)
@@ -193,22 +193,23 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                  u32 vf_mask);
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+                                     u32 vf_mask);
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                 u32 vf_mask);
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
 
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
 int adf_init_pf_wq(void);
 void adf_exit_pf_wq(void);
 int adf_init_vf_wq(void);
 void adf_exit_vf_wq(void);
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
 #else
-static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
-       return 0;
-}
+#define adf_sriov_configure NULL
 
 static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 {
@@ -222,12 +223,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 }
 
-static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
 {
        return 0;
 }
 
-static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
 {
 }
 
@@ -249,5 +250,9 @@ static inline void adf_exit_vf_wq(void)
 {
 }
 
+static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+}
+
 #endif
 #endif
index 744c403..60bc7b9 100644 (file)
@@ -61,6 +61,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
        struct service_hndl *service;
        struct list_head *list_itr;
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int ret;
 
        if (!hw_data) {
                dev_err(&GET_DEV(accel_dev),
@@ -88,8 +89,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
                return -EFAULT;
        }
 
-       hw_data->enable_ints(accel_dev);
-
        if (adf_ae_init(accel_dev)) {
                dev_err(&GET_DEV(accel_dev),
                        "Failed to initialise Acceleration Engine\n");
@@ -110,6 +109,13 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
        }
        set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
 
+       hw_data->enable_ints(accel_dev);
+       hw_data->enable_error_correction(accel_dev);
+
+       ret = hw_data->enable_pfvf_comms(accel_dev);
+       if (ret)
+               return ret;
+
        /*
         * Subservice initialisation is divided into two stages: init and start.
         * This is to facilitate any ordering dependencies between services
@@ -126,9 +132,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
                set_bit(accel_dev->accel_id, service->init_status);
        }
 
-       hw_data->enable_error_correction(accel_dev);
-       hw_data->enable_vf2pf_comms(accel_dev);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(adf_dev_init);
index e3ad558..c678d5c 100644 (file)
 #include "adf_transport_access_macros.h"
 #include "adf_transport_internal.h"
 
+#define ADF_MAX_NUM_VFS        32
+#define ADF_ERRSOU3    (0x3A000 + 0x0C)
+#define ADF_ERRSOU5    (0x3A000 + 0xD8)
+#define ADF_ERRMSK3    (0x3A000 + 0x1C)
+#define ADF_ERRMSK5    (0x3A000 + 0xDC)
+#define ADF_ERR_REG_VF2PF_L(vf_src)    (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_ERR_REG_VF2PF_U(vf_src)    (((vf_src) & 0x0000FFFF) << 16)
+
 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -71,14 +79,23 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                struct adf_hw_device_data *hw_data = accel_dev->hw_device;
                struct adf_bar *pmisc =
                        &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
-               void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-               u32 vf_mask;
+               void __iomem *pmisc_addr = pmisc->virt_addr;
+               u32 errsou3, errsou5, errmsk3, errmsk5;
+               unsigned long vf_mask;
 
                /* Get the interrupt sources triggered by VFs */
-               vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
-                           0x0000FFFF) << 16) |
-                         ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
-                           0x01FFFE00) >> 9);
+               errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
+               errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
+               vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
+               vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
+
+               /* To avoid adding duplicate entries to work queue, clear
+                * vf_int_mask_sets bits that are already masked in ERRMSK register.
+                */
+               errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
+               errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
+               vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
+               vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
 
                if (vf_mask) {
                        struct adf_accel_vf_info *vf_info;
@@ -86,15 +103,13 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                        int i;
 
                        /* Disable VF2PF interrupts for VFs with pending ints */
-                       adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+                       adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
 
                        /*
-                        * Schedule tasklets to handle VF2PF interrupt BHs
-                        * unless the VF is malicious and is attempting to
-                        * flood the host OS with VF2PF interrupts.
+                        * Handle VF2PF interrupt unless the VF is malicious and
+                        * is attempting to flood the host OS with VF2PF interrupts.
                         */
-                       for_each_set_bit(i, (const unsigned long *)&vf_mask,
-                                        (sizeof(vf_mask) * BITS_PER_BYTE)) {
+                       for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
                                vf_info = accel_dev->pf.vf_info + i;
 
                                if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
@@ -104,8 +119,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                                        continue;
                                }
 
-                               /* Tasklet will re-enable ints from this VF */
-                               tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+                               adf_schedule_vf2pf_handler(vf_info);
                                irq_handled = true;
                        }
 
index a1b77bd..976b9ab 100644 (file)
 #define ADF_DH895XCC_ERRMSK5   (ADF_DH895XCC_EP_OFFSET + 0xDC)
 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
 
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_bar_addr =
-               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
-       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
-}
-
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_bar_addr =
-               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
-       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
-}
-
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
-                                u32 vf_mask)
+static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                         u32 vf_mask)
 {
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct adf_bar *pmisc =
@@ -55,7 +35,17 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
        }
 }
 
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       __adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                          u32 vf_mask)
 {
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct adf_bar *pmisc =
@@ -78,6 +68,22 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
        }
 }
 
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+       __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+}
+
 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 {
        struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@@ -186,7 +192,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(adf_iov_putmsg);
 
 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
 {
@@ -216,7 +221,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
                         (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
                          ADF_PF2VF_MSGTYPE_SHIFT) |
-                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                        (ADF_PFVF_COMPAT_THIS_VERSION <<
                          ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
 
                dev_dbg(&GET_DEV(accel_dev),
@@ -226,19 +231,19 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                if (vf_compat_ver < hw_data->min_iov_compat_ver) {
                        dev_err(&GET_DEV(accel_dev),
                                "VF (vers %d) incompatible with PF (vers %d)\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
-               } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+               } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
                        dev_err(&GET_DEV(accel_dev),
                                "VF (vers %d) compat with PF (vers %d) unkn.\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
                } else {
                        dev_dbg(&GET_DEV(accel_dev),
                                "VF (vers %d) compatible with PF (vers %d)\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_COMPATIBLE <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
                }
@@ -251,7 +256,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
                         (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
                          ADF_PF2VF_MSGTYPE_SHIFT) |
-                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                        (ADF_PFVF_COMPAT_THIS_VERSION <<
                          ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
                resp |= ADF_PF2VF_VF_COMPATIBLE <<
                        ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
@@ -284,6 +289,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
 
        /* re-enable interrupt on PF from this VF */
        adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+
        return;
 err:
        dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
@@ -313,8 +319,10 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
 
        msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
        msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
-       msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
-       BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+       msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+       BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+       reinit_completion(&accel_dev->vf.iov_msg_completion);
 
        /* Send request from VF to PF */
        ret = adf_iov_putmsg(accel_dev, msg, 0);
@@ -338,14 +346,16 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
                break;
        case ADF_PF2VF_VF_COMPAT_UNKNOWN:
                /* VF is newer than PF and decides whether it is compatible */
-               if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+               if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
+                       accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
                        break;
+               }
                fallthrough;
        case ADF_PF2VF_VF_INCOMPATIBLE:
                dev_err(&GET_DEV(accel_dev),
                        "PF (vers %d) and VF (vers %d) are not compatible\n",
                        accel_dev->vf.pf_version,
-                       ADF_PFVF_COMPATIBILITY_VERSION);
+                       ADF_PFVF_COMPAT_THIS_VERSION);
                return -EINVAL;
        default:
                dev_err(&GET_DEV(accel_dev),
index 0690c03..ffd43aa 100644 (file)
@@ -52,7 +52,7 @@
  * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
  */
 
-#define ADF_PFVF_COMPATIBILITY_VERSION         0x1     /* PF<->VF compat */
+#define ADF_PFVF_COMPAT_THIS_VERSION           0x1     /* PF<->VF compat */
 
 /* PF->VF messages */
 #define ADF_PF2VF_INT                          BIT(0)
index 8c822c2..90ec057 100644 (file)
@@ -24,9 +24,8 @@ static void adf_iov_send_resp(struct work_struct *work)
        kfree(pf2vf_resp);
 }
 
-static void adf_vf2pf_bh_handler(void *data)
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
 {
-       struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
        struct adf_pf2vf_resp *pf2vf_resp;
 
        pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
@@ -52,9 +51,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
                vf_info->accel_dev = accel_dev;
                vf_info->vf_nr = i;
 
-               tasklet_init(&vf_info->vf2pf_bh_tasklet,
-                            (void *)adf_vf2pf_bh_handler,
-                            (unsigned long)vf_info);
                mutex_init(&vf_info->pf2vf_lock);
                ratelimit_state_init(&vf_info->vf2pf_ratelimit,
                                     DEFAULT_RATELIMIT_INTERVAL,
@@ -110,8 +106,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
                hw_data->configure_iov_threads(accel_dev, false);
 
        for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
-               tasklet_disable(&vf->vf2pf_bh_tasklet);
-               tasklet_kill(&vf->vf2pf_bh_tasklet);
                mutex_destroy(&vf->pf2vf_lock);
        }
 
index e85bd62..3e25fac 100644 (file)
@@ -5,14 +5,14 @@
 #include "adf_pf2vf_msg.h"
 
 /**
- * adf_vf2pf_init() - send init msg to PF
+ * adf_vf2pf_notify_init() - send init msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
  * Function sends an init message from the VF to a PF
  *
  * Return: 0 on success, error code otherwise.
  */
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
 {
        u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
                (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -25,17 +25,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
        set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
        return 0;
 }
-EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
 
 /**
- * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
  * Function sends a shutdown message from the VF to a PF
  *
  * Return: void
  */
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
 {
        u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
            (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -45,4 +45,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
                        dev_err(&GET_DEV(accel_dev),
                                "Failed to send Shutdown event to PF\n");
 }
-EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
index 888388a..7828a65 100644 (file)
@@ -18,6 +18,7 @@
 #include "adf_pf2vf_msg.h"
 
 #define ADF_VINTSOU_OFFSET     0x204
+#define ADF_VINTMSK_OFFSET     0x208
 #define ADF_VINTSOU_BUN                BIT(0)
 #define ADF_VINTSOU_PF2VF      BIT(1)
 
@@ -28,6 +29,27 @@ struct adf_vf_stop_data {
        struct work_struct work;
 };
 
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
 static int adf_enable_msi(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -160,11 +182,21 @@ static irqreturn_t adf_isr(int irq, void *privdata)
        struct adf_bar *pmisc =
                        &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
        void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-       u32 v_int;
+       bool handled = false;
+       u32 v_int, v_mask;
 
        /* Read VF INT source CSR to determine the source of VF interrupt */
        v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
 
+       /* Read VF INT mask CSR to determine which sources are masked */
+       v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
+
+       /*
+        * Recompute v_int ignoring sources that are masked. This is to
+        * avoid rescheduling the tasklet for interrupts already handled
+        */
+       v_int &= ~v_mask;
+
        /* Check for PF2VF interrupt */
        if (v_int & ADF_VINTSOU_PF2VF) {
                /* Disable PF to VF interrupt */
@@ -172,7 +204,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
 
                /* Schedule tasklet to handle interrupt BH */
                tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
-               return IRQ_HANDLED;
+               handled = true;
        }
 
        /* Check bundle interrupt */
@@ -184,10 +216,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
                csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
                                                    bank->bank_number, 0);
                tasklet_hi_schedule(&bank->resp_handler);
-               return IRQ_HANDLED;
+               handled = true;
        }
 
-       return IRQ_NONE;
+       return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
 static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
@@ -285,6 +317,30 @@ err_out:
 }
 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
 
+/**
+ * adf_flush_vf_wq() - Flush workqueue for VF
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables the PF/VF interrupts on the VF so that no new messages
+ * are received and flushes the workqueue 'adf_vf_stop_wq'.
+ *
+ * Return: void.
+ */
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+       adf_disable_pf2vf_interrupts(accel_dev);
+
+       flush_workqueue(adf_vf_stop_wq);
+}
+EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
+
+/**
+ * adf_init_vf_wq() - Init workqueue for VF
+ *
+ * Function init workqueue 'adf_vf_stop_wq' for VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
 int __init adf_init_vf_wq(void)
 {
        adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
index 7dd7cd6..0a9ce36 100644 (file)
@@ -131,11 +131,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_DH895XCC_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_DH895XCC_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -180,8 +175,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_DH895XCC_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -213,8 +210,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->get_num_aes = get_num_aes;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sram_bar_id = get_sram_bar_id;
@@ -224,15 +219,17 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_sbr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 4d61392..f99319c 100644 (file)
@@ -35,7 +35,6 @@
 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
 
 #define ADF_DH895XCC_PF2VF_OFFSET(i)   (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
index a9ec435..3976a81 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index f14fb82..7c6ed6b 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_DH895XCCIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_DH895XCCIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 2bfcc67..306ebb7 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_DH895XCCIOV_ETR_BAR 0
 #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
 #define ADF_DH895XCCIOV_PF2VF_OFFSET   0x200
-#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
 
 void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
index 29999da..99d90f3 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index 080955a..e2375d9 100644 (file)
@@ -187,9 +187,9 @@ static int virtcrypto_init_vqs(struct virtio_crypto *vi)
        if (ret)
                goto err_free;
 
-       get_online_cpus();
+       cpus_read_lock();
        virtcrypto_set_affinity(vi);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 
index 5fa6ae9..44736cb 100644 (file)
@@ -313,7 +313,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
                return -ENXIO;
 
        if (nr_pages < 0)
-               return nr_pages;
+               return -EINVAL;
 
        avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
                        kaddr, pfn);
index 26482c7..fc708be 100644 (file)
@@ -294,6 +294,14 @@ struct idxd_desc {
        struct idxd_wq *wq;
 };
 
+/*
+ * This is software defined error for the completion status. We overload the error code
+ * that will never appear in completion status and only SWERR register.
+ */
+enum idxd_completion_status {
+       IDXD_COMP_DESC_ABORT = 0xff,
+};
+
 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
 
@@ -482,4 +490,10 @@ static inline void perfmon_init(void) {}
 static inline void perfmon_exit(void) {}
 #endif
 
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
+{
+       idxd_dma_complete_txd(desc, reason);
+       idxd_free_desc(desc->wq, desc);
+}
+
 #endif
index c8ae41d..c0f4c04 100644 (file)
@@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
                spin_lock_init(&idxd->irq_entries[i].list_lock);
        }
 
+       idxd_msix_perm_setup(idxd);
+
        irq_entry = &idxd->irq_entries[0];
        rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
                                  0, "idxd-misc", irq_entry);
@@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        }
 
        idxd_unmask_error_interrupts(idxd);
-       idxd_msix_perm_setup(idxd);
        return 0;
 
  err_wq_irqs:
@@ -162,6 +163,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
  err_misc_irq:
        /* Disable error interrupt generation */
        idxd_mask_error_interrupts(idxd);
+       idxd_msix_perm_clear(idxd);
  err_irq_entries:
        pci_free_irq_vectors(pdev);
        dev_err(dev, "No usable interrupts\n");
@@ -758,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev)
        for (i = 0; i < msixcnt; i++) {
                irq_entry = &idxd->irq_entries[i];
                synchronize_irq(irq_entry->vector);
-               free_irq(irq_entry->vector, irq_entry);
                if (i == 0)
                        continue;
                idxd_flush_pending_llist(irq_entry);
                idxd_flush_work_list(irq_entry);
        }
-
-       idxd_msix_perm_clear(idxd);
-       idxd_release_int_handles(idxd);
-       pci_free_irq_vectors(pdev);
-       pci_iounmap(pdev, idxd->reg_base);
-       pci_disable_device(pdev);
-       destroy_workqueue(idxd->wq);
+       flush_workqueue(idxd->wq);
 }
 
 static void idxd_remove(struct pci_dev *pdev)
 {
        struct idxd_device *idxd = pci_get_drvdata(pdev);
+       struct idxd_irq_entry *irq_entry;
+       int msixcnt = pci_msix_vec_count(pdev);
+       int i;
 
        dev_dbg(&pdev->dev, "%s called\n", __func__);
        idxd_shutdown(pdev);
        if (device_pasid_enabled(idxd))
                idxd_disable_system_pasid(idxd);
        idxd_unregister_devices(idxd);
-       perfmon_pmu_remove(idxd);
+
+       for (i = 0; i < msixcnt; i++) {
+               irq_entry = &idxd->irq_entries[i];
+               free_irq(irq_entry->vector, irq_entry);
+       }
+       idxd_msix_perm_clear(idxd);
+       idxd_release_int_handles(idxd);
+       pci_free_irq_vectors(pdev);
+       pci_iounmap(pdev, idxd->reg_base);
        iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+       pci_disable_device(pdev);
+       destroy_workqueue(idxd->wq);
+       perfmon_pmu_remove(idxd);
+       device_unregister(&idxd->conf_dev);
 }
 
 static struct pci_driver idxd_pci_driver = {
index ae68e1e..4e3a719 100644 (file)
@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
        return false;
 }
 
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
-       idxd_dma_complete_txd(desc, reason);
-       idxd_free_desc(desc->wq, desc);
-}
-
 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
                                     enum irq_work_type wtype,
                                     int *processed, u64 data)
@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
                reason = IDXD_COMPLETE_DEV_FAIL;
 
        llist_for_each_entry_safe(desc, t, head, llnode) {
-               if (desc->completion->status) {
-                       if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+               u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+               if (status) {
+                       if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+                               complete_desc(desc, IDXD_COMPLETE_ABORT);
+                               (*processed)++;
+                               continue;
+                       }
+
+                       if (unlikely(status != DSA_COMP_SUCCESS))
                                match_fault(desc, data);
                        complete_desc(desc, reason);
                        (*processed)++;
@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
        spin_unlock_irqrestore(&irq_entry->list_lock, flags);
 
        list_for_each_entry(desc, &flist, list) {
-               if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+               u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+               if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+                       complete_desc(desc, IDXD_COMPLETE_ABORT);
+                       continue;
+               }
+
+               if (unlikely(status != DSA_COMP_SUCCESS))
                        match_fault(desc, data);
                complete_desc(desc, reason);
        }
index 19afb62..36c9c1a 100644 (file)
@@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
         * Descriptor completion vectors are 1...N for MSIX. We will round
         * robin through the N vectors.
         */
-       wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+       wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
        if (!idxd->int_handles) {
                desc->hw->int_handle = wq->vec_ptr;
        } else {
-               desc->vector = wq->vec_ptr;
                /*
                 * int_handles are only for descriptor completion. However for device
                 * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
@@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
        sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
 }
 
+static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+                                        struct idxd_desc *desc)
+{
+       struct idxd_desc *d, *n;
+
+       lockdep_assert_held(&ie->list_lock);
+       list_for_each_entry_safe(d, n, &ie->work_list, list) {
+               if (d == desc) {
+                       list_del(&d->list);
+                       return d;
+               }
+       }
+
+       /*
+        * At this point, the desc needs to be aborted is held by the completion
+        * handler where it has taken it off the pending list but has not added to the
+        * work list. It will be cleaned up by the interrupt handler when it sees the
+        * IDXD_COMP_DESC_ABORT for completion status.
+        */
+       return NULL;
+}
+
+static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+                            struct idxd_desc *desc)
+{
+       struct idxd_desc *d, *t, *found = NULL;
+       struct llist_node *head;
+       unsigned long flags;
+
+       desc->completion->status = IDXD_COMP_DESC_ABORT;
+       /*
+        * Grab the list lock so it will block the irq thread handler. This allows the
+        * abort code to locate the descriptor need to be aborted.
+        */
+       spin_lock_irqsave(&ie->list_lock, flags);
+       head = llist_del_all(&ie->pending_llist);
+       if (head) {
+               llist_for_each_entry_safe(d, t, head, llnode) {
+                       if (d == desc) {
+                               found = desc;
+                               continue;
+                       }
+                       list_add_tail(&desc->list, &ie->work_list);
+               }
+       }
+
+       if (!found)
+               found = list_abort_desc(wq, ie, desc);
+       spin_unlock_irqrestore(&ie->list_lock, flags);
+
+       if (found)
+               complete_desc(found, IDXD_COMPLETE_ABORT);
+}
+
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
 {
        struct idxd_device *idxd = wq->idxd;
+       struct idxd_irq_entry *ie = NULL;
        void __iomem *portal;
        int rc;
 
@@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
         * even on UP because the recipient is a device.
         */
        wmb();
+
+       /*
+        * Pending the descriptor to the lockless list for the irq_entry
+        * that we designated the descriptor to.
+        */
+       if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
+               ie = &idxd->irq_entries[desc->vector];
+               llist_add(&desc->llnode, &ie->pending_llist);
+       }
+
        if (wq_dedicated(wq)) {
                iosubmit_cmds512(portal, desc->hw, 1);
        } else {
@@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
                 * device is not accepting descriptor at all.
                 */
                rc = enqcmds(portal, desc->hw);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (ie)
+                               llist_abort_desc(wq, ie, desc);
                        return rc;
+               }
        }
 
        percpu_ref_put(&wq->wq_active);
-
-       /*
-        * Pending the descriptor to the lockless list for the irq_entry
-        * that we designated the descriptor to.
-        */
-       if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
-               int vec;
-
-               /*
-                * If the driver is on host kernel, it would be the value
-                * assigned to interrupt handle, which is index for MSIX
-                * vector. If it's guest then can't use the int_handle since
-                * that is the index to IMS for the entire device. The guest
-                * device local index will be used.
-                */
-               vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
-               llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
-       }
-
        return 0;
 }
index 0460d58..bb4df63 100644 (file)
@@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd)
 
                device_unregister(&group->conf_dev);
        }
-
-       device_unregister(&idxd->conf_dev);
 }
 
 int idxd_register_bus_type(void)
index 7f116bb..2ddc31e 100644 (file)
@@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
                dma_length += sg_dma_len(sg);
        }
 
+       imxdma_config_write(chan, &imxdmac->config, direction);
+
        switch (imxdmac->word_size) {
        case DMA_SLAVE_BUSWIDTH_4_BYTES:
                if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
index ec00b20..ac61ecd 100644 (file)
@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
                return NULL;
 
        ofdma_target = of_dma_find_controller(&dma_spec_target);
-       if (!ofdma_target)
-               return NULL;
+       if (!ofdma_target) {
+               ofdma->dma_router->route_free(ofdma->dma_router->dev,
+                                             route_data);
+               chan = ERR_PTR(-EPROBE_DEFER);
+               goto err;
+       }
 
        chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
        if (IS_ERR_OR_NULL(chan)) {
@@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
                }
        }
 
+err:
        /*
         * Need to put the node back since the ofdma->of_dma_route_allocate
         * has taken it for generating the new, translated dma_spec
index 8f7ceb6..1cc0690 100644 (file)
@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
 
 error:
        of_dma_controller_free(pdev->dev.of_node);
-       pm_runtime_put(&pdev->dev);
 error_pm:
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return ret;
 }
index f54ecb1..7dd1d3d 100644 (file)
@@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
 
        chan->config_init = false;
 
-       ret = pm_runtime_get_sync(dmadev->ddev.dev);
+       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
        if (ret < 0)
                return ret;
 
@@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev)
        struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
        int id, ret, scr;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index ef0d055..a421643 100644 (file)
@@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
 
        /* Set dma request */
        spin_lock_irqsave(&dmamux->lock, flags);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0) {
                spin_unlock_irqrestore(&dmamux->lock, flags);
                goto error;
@@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev)
        struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
        int i, ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
@@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev)
        if (ret < 0)
                return ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index 16b1965..d6b8a20 100644 (file)
@@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
        writel(0, xc->reg_ch_base + XDMAC_TSS);
 
        /* wait until transfer is stopped */
-       return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
-                                 !(val & XDMAC_STAT_TENF), 100, 1000);
+       return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
+                                        !(val & XDMAC_STAT_TENF), 100, 1000);
 }
 
 /* xc->vc.lock must be held by caller */
index 75c0b8e..4b9530a 100644 (file)
@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
  * @genlock: Support genlock mode
  * @err: Channel has errors
  * @idle: Check for channel idle
+ * @terminating: Check for channel being synchronized by user
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
@@ -431,6 +432,7 @@ struct xilinx_dma_chan {
        bool genlock;
        bool err;
        bool idle;
+       bool terminating;
        struct tasklet_struct tasklet;
        struct xilinx_vdma_config config;
        bool flush_on_fsync;
@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
                /* Run any dependencies, then free the descriptor */
                dma_run_dependencies(&desc->async_tx);
                xilinx_dma_free_tx_descriptor(chan, desc);
+
+               /*
+                * While we ran a callback the user called a terminate function,
+                * which takes care of cleaning up any remaining descriptors
+                */
+               if (chan->terminating)
+                       break;
        }
 
        spin_unlock_irqrestore(&chan->lock, flags);
@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
        if (desc->cyclic)
                chan->cyclic = true;
 
+       chan->terminating = false;
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return cookie;
@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
 
        xilinx_dma_chan_reset(chan);
        /* Remove and free all of the descriptors in the lists */
+       chan->terminating = true;
        xilinx_dma_free_descriptors(chan);
        chan->idle = true;
 
index 61c21bd..3a6d241 100644 (file)
@@ -539,10 +539,18 @@ module_platform_driver(altr_edac_driver);
  * trigger testing are different for each memory.
  */
 
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
 static const struct edac_device_prv_data ocramecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_L2C
 static const struct edac_device_prv_data l2ecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
 static const struct edac_device_prv_data a10_ocramecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_L2C
 static const struct edac_device_prv_data a10_l2ecc_data;
+#endif
 
 static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
 {
@@ -569,9 +577,9 @@ static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
        return ret_value;
 }
 
-static ssize_t altr_edac_device_trig(struct file *file,
-                                    const char __user *user_buf,
-                                    size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_device_trig(struct file *file, const char __user *user_buf,
+                     size_t count, loff_t *ppos)
 
 {
        u32 *ptemp, i, error_mask;
@@ -640,27 +648,27 @@ static ssize_t altr_edac_device_trig(struct file *file,
        return count;
 }
 
-static const struct file_operations altr_edac_device_inject_fops = {
+static const struct file_operations altr_edac_device_inject_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_device_trig,
        .llseek = generic_file_llseek,
 };
 
-static ssize_t altr_edac_a10_device_trig(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos);
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
+                         size_t count, loff_t *ppos);
 
-static const struct file_operations altr_edac_a10_device_inject_fops = {
+static const struct file_operations altr_edac_a10_device_inject_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_a10_device_trig,
        .llseek = generic_file_llseek,
 };
 
-static ssize_t altr_edac_a10_device_trig2(struct file *file,
-                                         const char __user *user_buf,
-                                         size_t count, loff_t *ppos);
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos);
 
-static const struct file_operations altr_edac_a10_device_inject2_fops = {
+static const struct file_operations altr_edac_a10_device_inject2_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_a10_device_trig2,
        .llseek = generic_file_llseek,
@@ -1697,9 +1705,9 @@ MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match);
  * Based on xgene_edac.c peripheral code.
  */
 
-static ssize_t altr_edac_a10_device_trig(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
+                         size_t count, loff_t *ppos)
 {
        struct edac_device_ctl_info *edac_dci = file->private_data;
        struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
@@ -1729,9 +1737,9 @@ static ssize_t altr_edac_a10_device_trig(struct file *file,
  * slightly. A few Arria10 peripherals can use this injection function.
  * Inject the error into the memory and then readback to trigger the IRQ.
  */
-static ssize_t altr_edac_a10_device_trig2(struct file *file,
-                                         const char __user *user_buf,
-                                         size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos)
 {
        struct edac_device_ctl_info *edac_dci = file->private_data;
        struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
@@ -1804,11 +1812,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
        regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
 
        bits = irq_status;
-       for_each_set_bit(bit, &bits, 32) {
-               irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
-               if (irq)
-                       generic_handle_irq(irq);
-       }
+       for_each_set_bit(bit, &bits, 32)
+               generic_handle_domain_irq(edac->domain, dberr * 32 + bit);
 
        chained_irq_exit(chip, desc);
 }
index f0d8f60..99b06a3 100644 (file)
@@ -571,8 +571,8 @@ EDAC_DCT_ATTR_SHOW(dbam0);
 EDAC_DCT_ATTR_SHOW(top_mem);
 EDAC_DCT_ATTR_SHOW(top_mem2);
 
-static ssize_t hole_show(struct device *dev, struct device_attribute *mattr,
-                        char *data)
+static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
+                             char *data)
 {
        struct mem_ctl_info *mci = to_mci(dev);
 
@@ -593,7 +593,7 @@ static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
-static DEVICE_ATTR(dram_hole, S_IRUGO, hole_show, NULL);
+static DEVICE_ATTR_RO(dram_hole);
 
 static struct attribute *dbg_attrs[] = {
        &dev_attr_dhar.attr,
@@ -802,16 +802,11 @@ static ssize_t inject_write_store(struct device *dev,
  * update NUM_INJ_ATTRS in case you add new members
  */
 
-static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
-                  inject_section_show, inject_section_store);
-static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
-                  inject_word_show, inject_word_store);
-static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
-                  inject_ecc_vector_show, inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IWUSR,
-                  NULL, inject_write_store);
-static DEVICE_ATTR(inject_read,  S_IWUSR,
-                  NULL, inject_read_store);
+static DEVICE_ATTR_RW(inject_section);
+static DEVICE_ATTR_RW(inject_word);
+static DEVICE_ATTR_RW(inject_ecc_vector);
+static DEVICE_ATTR_WO(inject_write);
+static DEVICE_ATTR_WO(inject_read);
 
 static struct attribute *inj_attrs[] = {
        &dev_attr_inject_section.attr,
index f6d462d..2c59756 100644 (file)
@@ -166,6 +166,7 @@ const char * const edac_mem_types[] = {
        [MEM_DDR5]      = "Unbuffered-DDR5",
        [MEM_NVDIMM]    = "Non-volatile-RAM",
        [MEM_WIO2]      = "Wide-IO-2",
+       [MEM_HBM2]      = "High-bandwidth-memory-Gen2",
 };
 EXPORT_SYMBOL_GPL(edac_mem_types);
 
index 6ce0ed2..83345bf 100644 (file)
 #define I10NM_GET_DIMMMTR(m, i, j)     \
        readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
        (i) * (m)->chan_mmio_sz + (j) * 4)
-#define I10NM_GET_MCDDRTCFG(m, i, j)   \
+#define I10NM_GET_MCDDRTCFG(m, i)      \
        readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
-       (i) * (m)->chan_mmio_sz + (j) * 4)
+       (i) * (m)->chan_mmio_sz)
 #define I10NM_GET_MCMTR(m, i)          \
        readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
        (i) * (m)->chan_mmio_sz)
 #define I10NM_GET_AMAP(m, i)           \
        readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \
        (i) * (m)->chan_mmio_sz)
+#define I10NM_GET_REG32(m, i, offset)  \
+       readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
+#define I10NM_GET_REG64(m, i, offset)  \
+       readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
+#define I10NM_SET_REG32(m, i, offset, v)       \
+       writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
 
 #define I10NM_GET_SCK_MMIO_BASE(reg)   (GET_BITFIELD(reg, 0, 28) << 23)
 #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
 #define I10NM_SAD_ENABLE(reg)          GET_BITFIELD(reg, 0, 0)
 #define I10NM_SAD_NM_CACHEABLE(reg)    GET_BITFIELD(reg, 5, 5)
 
+#define RETRY_RD_ERR_LOG_UC            BIT(1)
+#define RETRY_RD_ERR_LOG_NOOVER                BIT(14)
+#define RETRY_RD_ERR_LOG_EN            BIT(15)
+#define RETRY_RD_ERR_LOG_NOOVER_UC     (BIT(14) | BIT(1))
+#define RETRY_RD_ERR_LOG_OVER_UC_V     (BIT(2) | BIT(1) | BIT(0))
+
 static struct list_head *i10nm_edac_list;
 
+static struct res_config *res_cfg;
+static int retry_rd_err_log;
+
+static u32 offsets_scrub_icx[]  = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_scrub_spr[]  = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
+static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
+
+static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable)
+{
+       u32 s, d;
+
+       if (!imc->mbase)
+               return;
+
+       s = I10NM_GET_REG32(imc, chan, res_cfg->offsets_scrub[0]);
+       d = I10NM_GET_REG32(imc, chan, res_cfg->offsets_demand[0]);
+
+       if (enable) {
+               /* Save default configurations */
+               imc->chan[chan].retry_rd_err_log_s = s;
+               imc->chan[chan].retry_rd_err_log_d = d;
+
+               s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
+               s |=  RETRY_RD_ERR_LOG_EN;
+               d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
+               d |=  RETRY_RD_ERR_LOG_EN;
+       } else {
+               /* Restore default configurations */
+               if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
+                       s |=  RETRY_RD_ERR_LOG_UC;
+               if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
+                       s |=  RETRY_RD_ERR_LOG_NOOVER;
+               if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
+                       s &= ~RETRY_RD_ERR_LOG_EN;
+               if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
+                       d |=  RETRY_RD_ERR_LOG_UC;
+               if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
+                       d |=  RETRY_RD_ERR_LOG_NOOVER;
+               if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
+                       d &= ~RETRY_RD_ERR_LOG_EN;
+       }
+
+       I10NM_SET_REG32(imc, chan, res_cfg->offsets_scrub[0], s);
+       I10NM_SET_REG32(imc, chan, res_cfg->offsets_demand[0], d);
+}
+
+static void enable_retry_rd_err_log(bool enable)
+{
+       struct skx_dev *d;
+       int i, j;
+
+       edac_dbg(2, "\n");
+
+       list_for_each_entry(d, i10nm_edac_list, list)
+               for (i = 0; i < I10NM_NUM_IMC; i++)
+                       for (j = 0; j < I10NM_NUM_CHANNELS; j++)
+                               __enable_retry_rd_err_log(&d->imc[i], j, enable);
+}
+
+static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
+                                 int len, bool scrub_err)
+{
+       struct skx_imc *imc = &res->dev->imc[res->imc];
+       u32 log0, log1, log2, log3, log4;
+       u32 corr0, corr1, corr2, corr3;
+       u64 log2a, log5;
+       u32 *offsets;
+       int n;
+
+       if (!imc->mbase)
+               return;
+
+       offsets = scrub_err ? res_cfg->offsets_scrub : res_cfg->offsets_demand;
+
+       log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
+       log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
+       log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
+       log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
+       log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
+
+       if (res_cfg->type == SPR) {
+               log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
+               n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx]",
+                            log0, log1, log2a, log3, log4, log5);
+       } else {
+               log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
+               n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
+                            log0, log1, log2, log3, log4, log5);
+       }
+
+       corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
+       corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
+       corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
+       corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
+
+       if (len - n > 0)
+               snprintf(msg + n, len - n,
+                        " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
+                        corr0 & 0xffff, corr0 >> 16,
+                        corr1 & 0xffff, corr1 >> 16,
+                        corr2 & 0xffff, corr2 >> 16,
+                        corr3 & 0xffff, corr3 >> 16);
+
+       /* Clear status bits */
+       if (retry_rd_err_log == 2 && (log0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
+               log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
+               I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+       }
+}
+
 static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
                                           unsigned int dev, unsigned int fun)
 {
@@ -263,6 +386,8 @@ static struct res_config i10nm_cfg0 = {
        .ddr_chan_mmio_sz       = 0x4000,
        .sad_all_devfn          = PCI_DEVFN(29, 0),
        .sad_all_offset         = 0x108,
+       .offsets_scrub          = offsets_scrub_icx,
+       .offsets_demand         = offsets_demand_icx,
 };
 
 static struct res_config i10nm_cfg1 = {
@@ -272,6 +397,8 @@ static struct res_config i10nm_cfg1 = {
        .ddr_chan_mmio_sz       = 0x4000,
        .sad_all_devfn          = PCI_DEVFN(29, 0),
        .sad_all_offset         = 0x108,
+       .offsets_scrub          = offsets_scrub_icx,
+       .offsets_demand         = offsets_demand_icx,
 };
 
 static struct res_config spr_cfg = {
@@ -283,6 +410,8 @@ static struct res_config spr_cfg = {
        .support_ddr5           = true,
        .sad_all_devfn          = PCI_DEVFN(10, 0),
        .sad_all_offset         = 0x300,
+       .offsets_scrub          = offsets_scrub_spr,
+       .offsets_demand         = offsets_demand_spr,
 };
 
 static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -321,10 +450,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
 
                ndimms = 0;
                amap = I10NM_GET_AMAP(imc, i);
+               mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
                for (j = 0; j < imc->num_dimms; j++) {
                        dimm = edac_get_dimm(mci, i, j, 0);
                        mtr = I10NM_GET_DIMMMTR(imc, i, j);
-                       mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
                        edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
                                 mtr, mcddrtcfg, imc->mc, i, j);
 
@@ -422,6 +551,7 @@ static int __init i10nm_init(void)
                return -ENODEV;
 
        cfg = (struct res_config *)id->driver_data;
+       res_cfg = cfg;
 
        rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
        if (rc)
@@ -486,6 +616,12 @@ static int __init i10nm_init(void)
        mce_register_decode_chain(&i10nm_mce_dec);
        setup_i10nm_debug();
 
+       if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+               skx_set_decode(NULL, show_retry_rd_err_log);
+               if (retry_rd_err_log == 2)
+                       enable_retry_rd_err_log(true);
+       }
+
        i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
 
        return 0;
@@ -497,6 +633,13 @@ fail:
 static void __exit i10nm_exit(void)
 {
        edac_dbg(2, "\n");
+
+       if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+               skx_set_decode(NULL, NULL);
+               if (retry_rd_err_log == 2)
+                       enable_retry_rd_err_log(false);
+       }
+
        teardown_i10nm_debug();
        mce_unregister_decode_chain(&i10nm_mce_dec);
        skx_adxl_put();
@@ -506,5 +649,8 @@ static void __exit i10nm_exit(void)
 module_init(i10nm_init);
 module_exit(i10nm_exit);
 
+module_param(retry_rd_err_log, int, 0444);
+MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");
index 27d5692..67dbf4c 100644 (file)
@@ -1246,6 +1246,9 @@ static int __init mce_amd_init(void)
            c->x86_vendor != X86_VENDOR_HYGON)
                return -ENODEV;
 
+       if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+               return -ENODEV;
+
        if (boot_cpu_has(X86_FEATURE_SMCA)) {
                xec_mask = 0x3f;
                goto out;
index 4dbd465..1abc020 100644 (file)
@@ -230,7 +230,8 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
 #define SKX_ILV_TARGET(tgt)    ((tgt) & 7)
 
 static void skx_show_retry_rd_err_log(struct decoded_addr *res,
-                                     char *msg, int len)
+                                     char *msg, int len,
+                                     bool scrub_err)
 {
        u32 log0, log1, log2, log3, log4;
        u32 corr0, corr1, corr2, corr3;
index 5e83f59..19c17c5 100644 (file)
@@ -345,7 +345,10 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
        rows = numrow(mtr);
        cols = imc->hbm_mc ? 6 : numcol(mtr);
 
-       if (cfg->support_ddr5 && ((amap & 0x8) || imc->hbm_mc)) {
+       if (imc->hbm_mc) {
+               banks = 32;
+               mtype = MEM_HBM2;
+       } else if (cfg->support_ddr5 && (amap & 0x8)) {
                banks = 32;
                mtype = MEM_DDR5;
        } else {
@@ -529,6 +532,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
        bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
        bool overflow = GET_BITFIELD(m->status, 62, 62);
        bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+       bool scrub_err = false;
        bool recoverable;
        int len;
        u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
@@ -580,6 +584,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
                        break;
                case 4:
                        optype = "memory scrubbing error";
+                       scrub_err = true;
                        break;
                default:
                        optype = "reserved";
@@ -602,7 +607,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
        }
 
        if (skx_show_retry_rd_err_log)
-               skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len);
+               skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len, scrub_err);
 
        edac_dbg(0, "%s\n", skx_msg);
 
index 01f67e7..03ac067 100644 (file)
@@ -80,6 +80,8 @@ struct skx_dev {
                struct skx_channel {
                        struct pci_dev  *cdev;
                        struct pci_dev  *edev;
+                       u32 retry_rd_err_log_s;
+                       u32 retry_rd_err_log_d;
                        struct skx_dimm {
                                u8 close_pg;
                                u8 bank_xor_enable;
@@ -150,12 +152,15 @@ struct res_config {
        /* SAD device number and function number */
        unsigned int sad_all_devfn;
        int sad_all_offset;
+       /* Offsets of retry_rd_err_log registers */
+       u32 *offsets_scrub;
+       u32 *offsets_demand;
 };
 
 typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
                                 struct res_config *cfg);
 typedef bool (*skx_decode_f)(struct decoded_addr *res);
-typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len);
+typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err);
 
 int __init skx_adxl_get(void);
 void __exit skx_adxl_put(void);
index ed10da5..a5bf4c3 100644 (file)
@@ -212,10 +212,9 @@ static int tee_bnxt_fw_probe(struct device *dev)
 
        pvt_data.dev = dev;
 
-       fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
-                                   TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ);
        if (IS_ERR(fw_shm_pool)) {
-               dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+               dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n");
                err = PTR_ERR(fw_shm_pool);
                goto out_sess;
        }
@@ -242,6 +241,14 @@ static int tee_bnxt_fw_remove(struct device *dev)
        return 0;
 }
 
+static void tee_bnxt_fw_shutdown(struct device *dev)
+{
+       tee_shm_free(pvt_data.fw_shm_pool);
+       tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+       tee_client_close_context(pvt_data.ctx);
+       pvt_data.ctx = NULL;
+}
+
 static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
        {UUID_INIT(0x6272636D, 0x2019, 0x0716,
                    0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
@@ -257,6 +264,7 @@ static struct tee_client_driver tee_bnxt_fw_driver = {
                .bus            = &tee_bus_type,
                .probe          = tee_bnxt_fw_probe,
                .remove         = tee_bnxt_fw_remove,
+               .shutdown       = tee_bnxt_fw_shutdown,
        },
 };
 
index ea7ca74..73bdbd2 100644 (file)
@@ -221,7 +221,7 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
                return 0;
 
        n = 0;
-       len = CPER_REC_LEN - 1;
+       len = CPER_REC_LEN;
        if (mem->validation_bits & CPER_MEM_VALID_NODE)
                n += scnprintf(msg + n, len - n, "node: %d ", mem->node);
        if (mem->validation_bits & CPER_MEM_VALID_CARD)
@@ -258,13 +258,12 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
                n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ",
                               mem->responder_id);
        if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
-               scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
-                         mem->target_id);
+               n += scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
+                              mem->target_id);
        if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID)
-               scnprintf(msg + n, len - n, "chip_id: %d ",
-                         mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
+               n += scnprintf(msg + n, len - n, "chip_id: %d ",
+                              mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
 
-       msg[n] = '\0';
        return n;
 }
 
@@ -633,7 +632,7 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
        data_len = estatus->data_length;
 
        apei_estatus_for_each_section(estatus, gdata) {
-               if (sizeof(struct acpi_hest_generic_data) > data_len)
+               if (acpi_hest_get_size(gdata) > data_len)
                        return -EINVAL;
 
                record_size = acpi_hest_get_record_size(gdata);
index 7bf0a7a..2363fee 100644 (file)
@@ -35,15 +35,48 @@ efi_status_t check_platform_features(void)
 }
 
 /*
- * Although relocatable kernels can fix up the misalignment with respect to
- * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
- * sync with those recorded in the vmlinux when kaslr is disabled but the
- * image required relocation anyway. Therefore retain 2M alignment unless
- * KASLR is in use.
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
  */
-static u64 min_kimg_align(void)
+static bool check_image_region(u64 base, u64 size)
 {
-       return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+       unsigned long map_size, desc_size, buff_size;
+       efi_memory_desc_t *memory_map;
+       struct efi_boot_memmap map;
+       efi_status_t status;
+       bool ret = false;
+       int map_offset;
+
+       map.map =       &memory_map;
+       map.map_size =  &map_size;
+       map.desc_size = &desc_size;
+       map.desc_ver =  NULL;
+       map.key_ptr =   NULL;
+       map.buff_size = &buff_size;
+
+       status = efi_get_memory_map(&map);
+       if (status != EFI_SUCCESS)
+               return false;
+
+       for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+               efi_memory_desc_t *md = (void *)memory_map + map_offset;
+               u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+               /*
+                * Find the region that covers base, and return whether
+                * it covers base+size bytes.
+                */
+               if (base >= md->phys_addr && base < end) {
+                       ret = (base + size) <= end;
+                       break;
+               }
+       }
+
+       efi_bs_call(free_pool, memory_map);
+
+       return ret;
 }
 
 efi_status_t handle_kernel_image(unsigned long *image_addr,
@@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        unsigned long kernel_size, kernel_memsize = 0;
        u32 phys_seed = 0;
 
+       /*
+        * Although relocatable kernels can fix up the misalignment with
+        * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
+        * subtly out of sync with those recorded in the vmlinux when kaslr is
+        * disabled but the image required relocation anyway. Therefore retain
+        * 2M alignment if KASLR was explicitly disabled, even if it was not
+        * going to be activated to begin with.
+        */
+       u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
                if (!efi_nokaslr) {
                        status = efi_get_random_bytes(sizeof(phys_seed),
@@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        if (image->image_base != _text)
                efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
 
+       if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
+               efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
+                       EFI_KIMG_ALIGN >> 10);
+
        kernel_size = _edata - _text;
        kernel_memsize = kernel_size + (_end - _edata);
        *reserve_size = kernel_memsize;
@@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                 * If KASLR is enabled, and we have some randomness available,
                 * locate the kernel at a randomized offset in physical memory.
                 */
-               status = efi_random_alloc(*reserve_size, min_kimg_align(),
+               status = efi_random_alloc(*reserve_size, min_kimg_align,
                                          reserve_addr, phys_seed);
+               if (status != EFI_SUCCESS)
+                       efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
        } else {
                status = EFI_OUT_OF_RESOURCES;
        }
 
        if (status != EFI_SUCCESS) {
-               if (IS_ALIGNED((u64)_text, min_kimg_align())) {
+               if (!check_image_region((u64)_text, kernel_memsize)) {
+                       efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+               } else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
                        /*
                         * Just execute from wherever we were loaded by the
                         * UEFI PE/COFF loader if the alignment is suitable.
@@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                }
 
                status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
-                                                   ULONG_MAX, min_kimg_align());
+                                                   ULONG_MAX, min_kimg_align);
 
                if (status != EFI_SUCCESS) {
                        efi_err("Failed to relocate kernel\n");
index a408df4..724155b 100644 (file)
@@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
 
        region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
                         (u64)ULONG_MAX);
+       if (region_end < size)
+               return 0;
 
        first_slot = round_up(md->phys_addr, align);
        last_slot = round_down(region_end - size + 1, align);
index 9f937b1..60ccf3e 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/init.h>
 #include <linux/arm-smccc.h>
 #include <linux/kernel.h>
+#include <linux/platform_device.h>
 #include <asm/archrandom.h>
 
 static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
@@ -42,3 +43,19 @@ u32 arm_smccc_get_version(void)
        return smccc_version;
 }
 EXPORT_SYMBOL_GPL(arm_smccc_get_version);
+
+static int __init smccc_devices_init(void)
+{
+       struct platform_device *pdev;
+
+       if (smccc_trng_available) {
+               pdev = platform_device_register_simple("smccc_trng", -1,
+                                                      NULL, 0);
+               if (IS_ERR(pdev))
+                       pr_err("smccc_trng: could not register device: %ld\n",
+                              PTR_ERR(pdev));
+       }
+
+       return 0;
+}
+device_initcall(smccc_devices_init);
index 4299145..587c82b 100644 (file)
@@ -953,6 +953,8 @@ static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
                return 0;
 
        priv->cpu = target;
+       perf_pmu_migrate_context(&priv->pmu, cpu, target);
+
        return 0;
 }
 
index fab5710..81abd89 100644 (file)
@@ -520,6 +520,14 @@ config GPIO_REG
          A 32-bit single register GPIO fixed in/out implementation.  This
          can be used to represent any register as a set of GPIO signals.
 
+config GPIO_ROCKCHIP
+       tristate "Rockchip GPIO support"
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
+       select GPIOLIB_IRQCHIP
+       default ARCH_ROCKCHIP
+       help
+         Say yes here to support GPIO on Rockchip SoCs.
+
 config GPIO_SAMA5D2_PIOBU
        tristate "SAMA5D2 PIOBU GPIO support"
        depends on MFD_SYSCON
index 32a3265..5243e2d 100644 (file)
@@ -128,6 +128,7 @@ obj-$(CONFIG_GPIO_RDA)                      += gpio-rda.o
 obj-$(CONFIG_GPIO_RDC321X)             += gpio-rdc321x.o
 obj-$(CONFIG_GPIO_REALTEK_OTTO)                += gpio-realtek-otto.o
 obj-$(CONFIG_GPIO_REG)                 += gpio-reg.o
+obj-$(CONFIG_GPIO_ROCKCHIP)    += gpio-rockchip.o
 obj-$(CONFIG_ARCH_SA1100)              += gpio-sa1100.o
 obj-$(CONFIG_GPIO_SAMA5D2_PIOBU)       += gpio-sama5d2-piobu.o
 obj-$(CONFIG_GPIO_SCH311X)             += gpio-sch311x.o
index 71c0bea..6bf4104 100644 (file)
@@ -336,8 +336,8 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
        unsigned long gpio;
 
        for_each_set_bit(gpio, &irq_mask, 2)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain,
-                       19 + gpio*24));
+               generic_handle_domain_irq(chip->irq.domain,
+                                         19 + gpio*24);
 
        raw_spin_lock(&dio48egpio->lock);
 
index b132afa..34be7dd 100644 (file)
@@ -223,8 +223,8 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
                for_each_set_bit(bit_num, &irq_mask, 8) {
                        gpio = bit_num + boundary * 8;
 
-                       generic_handle_irq(irq_find_mapping(chip->irq.domain,
-                               gpio));
+                       generic_handle_domain_irq(chip->irq.domain,
+                                                 gpio);
                }
        }
 
index 55b4029..c68ed1a 100644 (file)
@@ -208,7 +208,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
        int gpio;
 
        for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
+               generic_handle_domain_irq(chip->irq.domain, gpio);
 
        raw_spin_lock(&idio16gpio->lock);
 
index b7932ec..b59fae9 100644 (file)
@@ -201,9 +201,8 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
              (readl(mm_gc->regs + ALTERA_GPIO_EDGE_CAP) &
              readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
                writel(status, mm_gc->regs + ALTERA_GPIO_EDGE_CAP);
-               for_each_set_bit(i, &status, mm_gc->gc.ngpio) {
-                       generic_handle_irq(irq_find_mapping(irqdomain, i));
-               }
+               for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+                       generic_handle_domain_irq(irqdomain, i);
        }
 
        chained_irq_exit(chip, desc);
@@ -228,9 +227,9 @@ static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
        status = readl(mm_gc->regs + ALTERA_GPIO_DATA);
        status &= readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
 
-       for_each_set_bit(i, &status, mm_gc->gc.ngpio) {
-               generic_handle_irq(irq_find_mapping(irqdomain, i));
-       }
+       for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+               generic_handle_domain_irq(irqdomain, i);
+
        chained_irq_exit(chip, desc);
 }
 
index 64e54f8..a99ece1 100644 (file)
@@ -392,7 +392,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct aspeed_sgpio *data = gpiochip_get_data(gc);
-       unsigned int i, p, girq;
+       unsigned int i, p;
        unsigned long reg;
 
        chained_irq_enter(ic, desc);
@@ -402,11 +402,8 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
 
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
 
-               for_each_set_bit(p, &reg, 32) {
-                       girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
-                       generic_handle_irq(girq);
-               }
-
+               for_each_set_bit(p, &reg, 32)
+                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
        }
 
        chained_irq_exit(ic, desc);
index b966f5e..3c8f20c 100644 (file)
@@ -661,7 +661,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct aspeed_gpio *data = gpiochip_get_data(gc);
-       unsigned int i, p, girq, banks;
+       unsigned int i, p, banks;
        unsigned long reg;
        struct aspeed_gpio *gpio = gpiochip_get_data(gc);
 
@@ -673,11 +673,8 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
 
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
 
-               for_each_set_bit(p, &reg, 32) {
-                       girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
-                       generic_handle_irq(girq);
-               }
-
+               for_each_set_bit(p, &reg, 32)
+                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
        }
 
        chained_irq_exit(ic, desc);
index 9b780dc..3958c6d 100644 (file)
@@ -204,11 +204,8 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
 
        raw_spin_unlock_irqrestore(&ctrl->lock, flags);
 
-       if (pending) {
-               for_each_set_bit(irq, &pending, gc->ngpio)
-                       generic_handle_irq(
-                               irq_linear_revmap(gc->irq.domain, irq));
-       }
+       for_each_set_bit(irq, &pending, gc->ngpio)
+               generic_handle_domain_irq(gc->irq.domain, irq);
 
        chained_irq_exit(irqchip, desc);
 }
index 1e6b427..d329a14 100644 (file)
@@ -466,9 +466,6 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
                    (~(readl(reg_base + GPIO_INT_MASK(bank_id)))))) {
                for_each_set_bit(bit, &sta, 32) {
                        int hwirq = GPIO_PER_BANK * bank_id + bit;
-                       int child_irq =
-                               irq_find_mapping(bank->kona_gpio->irq_domain,
-                                                hwirq);
                        /*
                         * Clear interrupt before handler is called so we don't
                         * miss any interrupt occurred during executing them.
@@ -476,7 +473,8 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
                        writel(readl(reg_base + GPIO_INT_STATUS(bank_id)) |
                               BIT(bit), reg_base + GPIO_INT_STATUS(bank_id));
                        /* Invoke interrupt handler */
-                       generic_handle_irq(child_irq);
+                       generic_handle_domain_irq(bank->kona_gpio->irq_domain,
+                                                 hwirq);
                }
        }
 
index fcfc1a1..74b7c91 100644 (file)
@@ -277,15 +277,14 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
        unsigned long status;
 
        while ((status = brcmstb_gpio_get_active_irqs(bank))) {
-               unsigned int irq, offset;
+               unsigned int offset;
 
                for_each_set_bit(offset, &status, 32) {
                        if (offset >= bank->width)
                                dev_warn(&priv->pdev->dev,
                                         "IRQ for invalid GPIO (bank=%d, offset=%d)\n",
                                         bank->id, offset);
-                       irq = irq_linear_revmap(domain, hwbase + offset);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(domain, hwbase + offset);
                }
        }
 }
index 4ab3fcd..562f8f7 100644 (file)
@@ -133,7 +133,7 @@ static void cdns_gpio_irq_handler(struct irq_desc *desc)
                ~ioread32(cgpio->regs + CDNS_GPIO_IRQ_MASK);
 
        for_each_set_bit(hwirq, &status, chip->ngpio)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, hwirq));
+               generic_handle_domain_irq(chip->irq.domain, hwirq);
 
        chained_irq_exit(irqchip, desc);
 }
index 6f21385..cb5afaa 100644 (file)
@@ -369,8 +369,7 @@ static void gpio_irq_handler(struct irq_desc *desc)
                         */
                        hw_irq = (bank_num / 2) * 32 + bit;
 
-                       generic_handle_irq(
-                               irq_find_mapping(d->irq_domain, hw_irq));
+                       generic_handle_domain_irq(d->irq_domain, hw_irq);
                }
        }
        chained_irq_exit(irq_desc_get_chip(desc), desc);
index 4c5f6d0..026903e 100644 (file)
@@ -395,7 +395,7 @@ static struct irq_chip dln2_gpio_irqchip = {
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                            const void *data, int len)
 {
-       int pin, irq;
+       int pin, ret;
 
        const struct {
                __le16 count;
@@ -416,24 +416,20 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                return;
        }
 
-       irq = irq_find_mapping(dln2->gpio.irq.domain, pin);
-       if (!irq) {
-               dev_err(dln2->gpio.parent, "pin %d not mapped to IRQ\n", pin);
-               return;
-       }
-
        switch (dln2->irq_type[pin]) {
        case DLN2_GPIO_EVENT_CHANGE_RISING:
-               if (event->value)
-                       generic_handle_irq(irq);
+               if (!event->value)
+                       return;
                break;
        case DLN2_GPIO_EVENT_CHANGE_FALLING:
-               if (!event->value)
-                       generic_handle_irq(irq);
+               if (event->value)
+                       return;
                break;
-       default:
-               generic_handle_irq(irq);
        }
+
+       ret = generic_handle_domain_irq(dln2->gpio.irq.domain, pin);
+       if (unlikely(ret))
+               dev_err(dln2->gpio.parent, "pin %d not mapped to IRQ\n", pin);
 }
 
 static int dln2_gpio_probe(struct platform_device *pdev)
index 17a243c..90b336e 100644 (file)
@@ -173,7 +173,7 @@ static irqreturn_t em_gio_irq_handler(int irq, void *dev_id)
        while ((pending = em_gio_read(p, GIO_MST))) {
                offset = __ffs(pending);
                em_gio_write(p, GIO_IIR, BIT(offset));
-               generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
+               generic_handle_domain_irq(p->irq_domain, offset);
                irqs_handled++;
        }
 
index ef148b2..2e17797 100644 (file)
@@ -128,13 +128,13 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
         */
        stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
        for_each_set_bit(offset, &stat, 8)
-               generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
-                                                   offset));
+               generic_handle_domain_irq(epg->gc[0].gc.irq.domain,
+                                         offset);
 
        stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
        for_each_set_bit(offset, &stat, 8)
-               generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
-                                                   offset));
+               generic_handle_domain_irq(epg->gc[1].gc.irq.domain,
+                                         offset);
 
        chained_irq_exit(irqchip, desc);
 }
index 4031164..b90a45c 100644 (file)
@@ -149,8 +149,7 @@ static void ftgpio_gpio_irq_handler(struct irq_desc *desc)
        stat = readl(g->base + GPIO_INT_STAT_RAW);
        if (stat)
                for_each_set_bit(offset, &stat, gc->ngpio)
-                       generic_handle_irq(irq_find_mapping(gc->irq.domain,
-                                                           offset));
+                       generic_handle_domain_irq(gc->irq.domain, offset);
 
        chained_irq_exit(irqchip, desc);
 }
index ad3d4da..3caabef 100644 (file)
@@ -186,8 +186,8 @@ static void hisi_gpio_irq_handler(struct irq_desc *desc)
 
        chained_irq_enter(irq_c, desc);
        for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
-               generic_handle_irq(irq_find_mapping(hisi_gpio->chip.irq.domain,
-                                                   hwirq));
+               generic_handle_domain_irq(hisi_gpio->chip.irq.domain,
+                                         hwirq);
        chained_irq_exit(irq_c, desc);
 }
 
index 4a17599..641719a 100644 (file)
@@ -97,11 +97,8 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc)
 
        chained_irq_enter(chip, desc);
 
-       for_each_set_bit(hwirq, &pending, 32) {
-               int irq = irq_find_mapping(hlwd->gpioc.irq.domain, hwirq);
-
-               generic_handle_irq(irq);
-       }
+       for_each_set_bit(hwirq, &pending, 32)
+               generic_handle_domain_irq(hlwd->gpioc.irq.domain, hwirq);
 
        chained_irq_exit(chip, desc);
 }
index 22f3ce2..42c4d9d 100644 (file)
@@ -359,12 +359,8 @@ static void mrfld_irq_handler(struct irq_desc *desc)
                /* Only interrupts that are enabled */
                pending &= enabled;
 
-               for_each_set_bit(gpio, &pending, 32) {
-                       unsigned int irq;
-
-                       irq = irq_find_mapping(gc->irq.domain, base + gpio);
-                       generic_handle_irq(irq);
-               }
+               for_each_set_bit(gpio, &pending, 32)
+                       generic_handle_domain_irq(gc->irq.domain, base + gpio);
        }
 
        chained_irq_exit(irqchip, desc);
index 4b9157a..67dc389 100644 (file)
@@ -120,7 +120,7 @@ static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
        mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
                & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
        for_each_set_bit(i, &mask, 32)
-               generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 31 - i));
+               generic_handle_domain_irq(mpc8xxx_gc->irq, 31 - i);
 
        return IRQ_HANDLED;
 }
@@ -405,7 +405,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
 
        ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
                               mpc8xxx_gpio_irq_cascade,
-                              IRQF_SHARED, "gpio-cascade",
+                              IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
                               mpc8xxx_gc);
        if (ret) {
                dev_err(&pdev->dev,
index 82fb20d..10c0a9b 100644 (file)
@@ -95,9 +95,7 @@ mediatek_gpio_irq_handler(int irq, void *data)
        pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
 
        for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
-               u32 map = irq_find_mapping(gc->irq.domain, bit);
-
-               generic_handle_irq(map);
+               generic_handle_domain_irq(gc->irq.domain, bit);
                mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
                ret |= IRQ_HANDLED;
        }
index b9fdf05..c871602 100644 (file)
@@ -241,7 +241,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
                if (port->both_edges & (1 << irqoffset))
                        mxc_flip_edge(port, irqoffset);
 
-               generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
+               generic_handle_domain_irq(port->domain, irqoffset);
 
                irq_stat &= ~(1 << irqoffset);
        }
index 31a336b..c5166cd 100644 (file)
@@ -157,7 +157,7 @@ static void mxs_gpio_irq_handler(struct irq_desc *desc)
                if (port->both_edges & (1 << irqoffset))
                        mxs_flip_edge(port, irqoffset);
 
-               generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
+               generic_handle_domain_irq(port->domain, irqoffset);
                irq_stat &= ~(1 << irqoffset);
        }
 }
index ca23f72..415e8df 100644 (file)
@@ -611,8 +611,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
 
                        raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
 
-                       generic_handle_irq(irq_find_mapping(bank->chip.irq.domain,
-                                                           bit));
+                       generic_handle_domain_irq(bank->chip.irq.domain, bit);
 
                        raw_spin_unlock_irqrestore(&bank->wa_lock,
                                                   wa_lock_flags);
index 9acec76..71a13a3 100644 (file)
@@ -260,7 +260,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
                return IRQ_NONE;
 
        for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
+               generic_handle_domain_irq(chip->irq.domain, gpio);
 
        raw_spin_lock(&idio16gpio->lock);
 
index 2a07fd9..8a9b98f 100644 (file)
@@ -468,8 +468,7 @@ static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
        irq_mask = idio24gpio->irq_mask & irq_status;
 
        for_each_set_bit(gpio, &irq_mask, chip->ngpio - 24)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain,
-                       gpio + 24));
+               generic_handle_domain_irq(chip->irq.domain, gpio + 24);
 
        raw_spin_lock(&idio24gpio->lock);
 
index f1b53dd..4ecab70 100644 (file)
@@ -223,8 +223,8 @@ static void pl061_irq_handler(struct irq_desc *desc)
        pending = readb(pl061->base + GPIOMIS);
        if (pending) {
                for_each_set_bit(offset, &pending, PL061_GPIO_NR)
-                       generic_handle_irq(irq_find_mapping(gc->irq.domain,
-                                                           offset));
+                       generic_handle_domain_irq(gc->irq.domain,
+                                                 offset);
        }
 
        chained_irq_exit(irqchip, desc);
index 0cb6600..382468e 100644 (file)
@@ -455,9 +455,8 @@ static irqreturn_t pxa_gpio_demux_handler(int in_irq, void *d)
                        for_each_set_bit(n, &gedr, BITS_PER_LONG) {
                                loop = 1;
 
-                               generic_handle_irq(
-                                       irq_find_mapping(pchip->irqdomain,
-                                                        gpio + n));
+                               generic_handle_domain_irq(pchip->irqdomain,
+                                                         gpio + n);
                        }
                }
                handled += loop;
@@ -471,9 +470,9 @@ static irqreturn_t pxa_gpio_direct_handler(int in_irq, void *d)
        struct pxa_gpio_chip *pchip = d;
 
        if (in_irq == pchip->irq0) {
-               generic_handle_irq(irq_find_mapping(pchip->irqdomain, 0));
+               generic_handle_domain_irq(pchip->irqdomain, 0);
        } else if (in_irq == pchip->irq1) {
-               generic_handle_irq(irq_find_mapping(pchip->irqdomain, 1));
+               generic_handle_domain_irq(pchip->irqdomain, 1);
        } else {
                pr_err("%s() unknown irq %d\n", __func__, in_irq);
                return IRQ_NONE;
index e7092d5..b378aba 100644 (file)
@@ -213,8 +213,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
                          gpio_rcar_read(p, INTMSK))) {
                offset = __ffs(pending);
                gpio_rcar_write(p, INTCLR, BIT(offset));
-               generic_handle_irq(irq_find_mapping(p->gpio_chip.irq.domain,
-                                                   offset));
+               generic_handle_domain_irq(p->gpio_chip.irq.domain,
+                                         offset);
                irqs_handled++;
        }
 
index 28dcbb5..4638464 100644 (file)
@@ -181,7 +181,7 @@ static void rda_gpio_irq_handler(struct irq_desc *desc)
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
        unsigned long status;
-       u32 n, girq;
+       u32 n;
 
        chained_irq_enter(ic, desc);
 
@@ -189,10 +189,8 @@ static void rda_gpio_irq_handler(struct irq_desc *desc)
        /* Only lower 8 bits are capable of generating interrupts */
        status &= RDA_GPIO_IRQ_MASK;
 
-       for_each_set_bit(n, &status, RDA_GPIO_BANK_NR) {
-               girq = irq_find_mapping(chip->irq.domain, n);
-               generic_handle_irq(girq);
-       }
+       for_each_set_bit(n, &status, RDA_GPIO_BANK_NR)
+               generic_handle_domain_irq(chip->irq.domain, n);
 
        chained_irq_exit(ic, desc);
 }
index cb64fb5..eeeb39b 100644 (file)
@@ -196,7 +196,6 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
        struct irq_chip *irq_chip = irq_desc_get_chip(desc);
        unsigned int lines_done;
        unsigned int port_pin_count;
-       unsigned int irq;
        unsigned long status;
        int offset;
 
@@ -205,10 +204,8 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
        for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
                status = realtek_gpio_read_isr(ctrl, lines_done / 8);
                port_pin_count = min(gc->ngpio - lines_done, 8U);
-               for_each_set_bit(offset, &status, port_pin_count) {
-                       irq = irq_find_mapping(gc->irq.domain, offset);
-                       generic_handle_irq(irq);
-               }
+               for_each_set_bit(offset, &status, port_pin_count)
+                       generic_handle_domain_irq(gc->irq.domain, offset);
        }
 
        chained_irq_exit(irq_chip, desc);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
new file mode 100644 (file)
index 0000000..036b2d9
--- /dev/null
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl/core.h"
+#include "../pinctrl/pinctrl-rockchip.h"
+
+#define GPIO_TYPE_V1           (0)           /* GPIO Version ID reserved */
+#define GPIO_TYPE_V2           (0x01000C2B)  /* GPIO Version ID 0x01000C2B */
+
+static const struct rockchip_gpio_regs gpio_regs_v1 = {
+       .port_dr = 0x00,
+       .port_ddr = 0x04,
+       .int_en = 0x30,
+       .int_mask = 0x34,
+       .int_type = 0x38,
+       .int_polarity = 0x3c,
+       .int_status = 0x40,
+       .int_rawstatus = 0x44,
+       .debounce = 0x48,
+       .port_eoi = 0x4c,
+       .ext_port = 0x50,
+};
+
+static const struct rockchip_gpio_regs gpio_regs_v2 = {
+       .port_dr = 0x00,
+       .port_ddr = 0x08,
+       .int_en = 0x10,
+       .int_mask = 0x18,
+       .int_type = 0x20,
+       .int_polarity = 0x28,
+       .int_bothedge = 0x30,
+       .int_status = 0x50,
+       .int_rawstatus = 0x58,
+       .debounce = 0x38,
+       .dbclk_div_en = 0x40,
+       .dbclk_div_con = 0x48,
+       .port_eoi = 0x60,
+       .ext_port = 0x70,
+       .version_id = 0x78,
+};
+
+static inline void gpio_writel_v2(u32 val, void __iomem *reg)
+{
+       writel((val & 0xffff) | 0xffff0000, reg);
+       writel((val >> 16) | 0xffff0000, reg + 0x4);
+}
+
+static inline u32 gpio_readl_v2(void __iomem *reg)
+{
+       return readl(reg + 0x4) << 16 | readl(reg);
+}
+
+static inline void rockchip_gpio_writel(struct rockchip_pin_bank *bank,
+                                       u32 value, unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+
+       if (bank->gpio_type == GPIO_TYPE_V2)
+               gpio_writel_v2(value, reg);
+       else
+               writel(value, reg);
+}
+
+static inline u32 rockchip_gpio_readl(struct rockchip_pin_bank *bank,
+                                     unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+       u32 value;
+
+       if (bank->gpio_type == GPIO_TYPE_V2)
+               value = gpio_readl_v2(reg);
+       else
+               value = readl(reg);
+
+       return value;
+}
+
+static inline void rockchip_gpio_writel_bit(struct rockchip_pin_bank *bank,
+                                           u32 bit, u32 value,
+                                           unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+       u32 data;
+
+       if (bank->gpio_type == GPIO_TYPE_V2) {
+               if (value)
+                       data = BIT(bit % 16) | BIT(bit % 16 + 16);
+               else
+                       data = BIT(bit % 16 + 16);
+               writel(data, bit >= 16 ? reg + 0x4 : reg);
+       } else {
+               data = readl(reg);
+               data &= ~BIT(bit);
+               if (value)
+                       data |= BIT(bit);
+               writel(data, reg);
+       }
+}
+
+static inline u32 rockchip_gpio_readl_bit(struct rockchip_pin_bank *bank,
+                                         u32 bit, unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+       u32 data;
+
+       if (bank->gpio_type == GPIO_TYPE_V2) {
+               data = readl(bit >= 16 ? reg + 0x4 : reg);
+               data >>= bit % 16;
+       } else {
+               data = readl(reg);
+               data >>= bit;
+       }
+
+       return data & (0x1);
+}
+
+static int rockchip_gpio_get_direction(struct gpio_chip *chip,
+                                      unsigned int offset)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
+       u32 data;
+
+       data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
+       if (data & BIT(offset))
+               return GPIO_LINE_DIRECTION_OUT;
+
+       return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rockchip_gpio_set_direction(struct gpio_chip *chip,
+                                      unsigned int offset, bool input)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
+       unsigned long flags;
+       u32 data = input ? 0 : 1;
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+       rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       return 0;
+}
+
+static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
+                             int value)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+       rockchip_gpio_writel_bit(bank, offset, value, bank->gpio_regs->port_dr);
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+}
+
+static int rockchip_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       u32 data;
+
+       data = readl(bank->reg_base + bank->gpio_regs->ext_port);
+       data >>= offset;
+       data &= 1;
+
+       return data;
+}
+
+static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
+                                     unsigned int offset,
+                                     unsigned int debounce)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       const struct rockchip_gpio_regs *reg = bank->gpio_regs;
+       unsigned long flags, div_reg, freq, max_debounce;
+       bool div_debounce_support;
+       unsigned int cur_div_reg;
+       u64 div;
+
+       if (!IS_ERR(bank->db_clk)) {
+               div_debounce_support = true;
+               freq = clk_get_rate(bank->db_clk);
+               max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
+               if (debounce > max_debounce)
+                       return -EINVAL;
+
+               div = debounce * freq;
+               div_reg = DIV_ROUND_CLOSEST_ULL(div, 2 * USEC_PER_SEC) - 1;
+       } else {
+               div_debounce_support = false;
+       }
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+
+       /* Only the v1 needs to configure div_en and div_con for dbclk */
+       if (debounce) {
+               if (div_debounce_support) {
+                       /* Configure the max debounce from consumers */
+                       cur_div_reg = readl(bank->reg_base +
+                                           reg->dbclk_div_con);
+                       if (cur_div_reg < div_reg)
+                               writel(div_reg, bank->reg_base +
+                                      reg->dbclk_div_con);
+                       rockchip_gpio_writel_bit(bank, offset, 1,
+                                                reg->dbclk_div_en);
+               }
+
+               rockchip_gpio_writel_bit(bank, offset, 1, reg->debounce);
+       } else {
+               if (div_debounce_support)
+                       rockchip_gpio_writel_bit(bank, offset, 0,
+                                                reg->dbclk_div_en);
+
+               rockchip_gpio_writel_bit(bank, offset, 0, reg->debounce);
+       }
+
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       /* Enable or disable dbclk at last */
+       if (div_debounce_support) {
+               if (debounce)
+                       clk_prepare_enable(bank->db_clk);
+               else
+                       clk_disable_unprepare(bank->db_clk);
+       }
+
+       return 0;
+}
+
+static int rockchip_gpio_direction_input(struct gpio_chip *gc,
+                                        unsigned int offset)
+{
+       return rockchip_gpio_set_direction(gc, offset, true);
+}
+
+static int rockchip_gpio_direction_output(struct gpio_chip *gc,
+                                         unsigned int offset, int value)
+{
+       rockchip_gpio_set(gc, offset, value);
+
+       return rockchip_gpio_set_direction(gc, offset, false);
+}
+
+/*
+ * gpiolib set_config callback function. The setting of the pin
+ * mux function as 'gpio output' will be handled by the pinctrl subsystem
+ * interface.
+ */
+static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+                                 unsigned long config)
+{
+       enum pin_config_param param = pinconf_to_config_param(config);
+
+       switch (param) {
+       case PIN_CONFIG_INPUT_DEBOUNCE:
+               rockchip_gpio_set_debounce(gc, offset, true);
+               /*
+                * Rockchip's gpio could only support up to one period
+                * of the debounce clock(pclk), which is far away from
+                * satisftying the requirement, as pclk is usually near
+                * 100MHz shared by all peripherals. So the fact is it
+                * has crippled debounce capability could only be useful
+                * to prevent any spurious glitches from waking up the system
+                * if the gpio is conguired as wakeup interrupt source. Let's
+                * still return -ENOTSUPP as before, to make sure the caller
+                * of gpiod_set_debounce won't change its behaviour.
+                */
+               return -ENOTSUPP;
+       default:
+               return -ENOTSUPP;
+       }
+}
+
+/*
+ * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
+ * and a virtual IRQ, if not already present.
+ */
+static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       unsigned int virq;
+
+       if (!bank->domain)
+               return -ENXIO;
+
+       virq = irq_create_mapping(bank->domain, offset);
+
+       return (virq) ? : -ENXIO;
+}
+
+static const struct gpio_chip rockchip_gpiolib_chip = {
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
+       .set = rockchip_gpio_set,
+       .get = rockchip_gpio_get,
+       .get_direction  = rockchip_gpio_get_direction,
+       .direction_input = rockchip_gpio_direction_input,
+       .direction_output = rockchip_gpio_direction_output,
+       .set_config = rockchip_gpio_set_config,
+       .to_irq = rockchip_gpio_to_irq,
+       .owner = THIS_MODULE,
+};
+
+static void rockchip_irq_demux(struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
+       u32 pend;
+
+       dev_dbg(bank->dev, "got irq for bank %s\n", bank->name);
+
+       chained_irq_enter(chip, desc);
+
+       pend = readl_relaxed(bank->reg_base + bank->gpio_regs->int_status);
+
+       while (pend) {
+               unsigned int irq, virq;
+
+               irq = __ffs(pend);
+               pend &= ~BIT(irq);
+               virq = irq_find_mapping(bank->domain, irq);
+
+               if (!virq) {
+                       dev_err(bank->dev, "unmapped irq %d\n", irq);
+                       continue;
+               }
+
+               dev_dbg(bank->dev, "handling irq %d\n", irq);
+
+               /*
+                * Triggering IRQ on both rising and falling edge
+                * needs manual intervention.
+                */
+               if (bank->toggle_edge_mode & BIT(irq)) {
+                       u32 data, data_old, polarity;
+                       unsigned long flags;
+
+                       data = readl_relaxed(bank->reg_base +
+                                            bank->gpio_regs->ext_port);
+                       do {
+                               raw_spin_lock_irqsave(&bank->slock, flags);
+
+                               polarity = readl_relaxed(bank->reg_base +
+                                                        bank->gpio_regs->int_polarity);
+                               if (data & BIT(irq))
+                                       polarity &= ~BIT(irq);
+                               else
+                                       polarity |= BIT(irq);
+                               writel(polarity,
+                                      bank->reg_base +
+                                      bank->gpio_regs->int_polarity);
+
+                               raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+                               data_old = data;
+                               data = readl_relaxed(bank->reg_base +
+                                                    bank->gpio_regs->ext_port);
+                       } while ((data & BIT(irq)) != (data_old & BIT(irq)));
+               }
+
+               generic_handle_irq(virq);
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+       u32 mask = BIT(d->hwirq);
+       u32 polarity;
+       u32 level;
+       u32 data;
+       unsigned long flags;
+       int ret = 0;
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+
+       rockchip_gpio_writel_bit(bank, d->hwirq, 0,
+                                bank->gpio_regs->port_ddr);
+
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       if (type & IRQ_TYPE_EDGE_BOTH)
+               irq_set_handler_locked(d, handle_edge_irq);
+       else
+               irq_set_handler_locked(d, handle_level_irq);
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+
+       level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
+       polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
+
+       switch (type) {
+       case IRQ_TYPE_EDGE_BOTH:
+               if (bank->gpio_type == GPIO_TYPE_V2) {
+                       bank->toggle_edge_mode &= ~mask;
+                       rockchip_gpio_writel_bit(bank, d->hwirq, 1,
+                                                bank->gpio_regs->int_bothedge);
+                       goto out;
+               } else {
+                       bank->toggle_edge_mode |= mask;
+                       level |= mask;
+
+                       /*
+                        * Determine gpio state. If 1 next interrupt should be
+                        * falling otherwise rising.
+                        */
+                       data = readl(bank->reg_base + bank->gpio_regs->ext_port);
+                       if (data & mask)
+                               polarity &= ~mask;
+                       else
+                               polarity |= mask;
+               }
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               bank->toggle_edge_mode &= ~mask;
+               level |= mask;
+               polarity |= mask;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               bank->toggle_edge_mode &= ~mask;
+               level |= mask;
+               polarity &= ~mask;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               bank->toggle_edge_mode &= ~mask;
+               level &= ~mask;
+               polarity |= mask;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               bank->toggle_edge_mode &= ~mask;
+               level &= ~mask;
+               polarity &= ~mask;
+               break;
+       default:
+               ret = -EINVAL;
+               goto out;
+       }
+
+       rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
+       rockchip_gpio_writel(bank, polarity, bank->gpio_regs->int_polarity);
+out:
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       return ret;
+}
+
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       bank->saved_masks = irq_reg_readl(gc, bank->gpio_regs->int_mask);
+       irq_reg_writel(gc, ~gc->wake_active, bank->gpio_regs->int_mask);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       irq_reg_writel(gc, bank->saved_masks, bank->gpio_regs->int_mask);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+       irq_gc_mask_clr_bit(d);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+       irq_gc_mask_set_bit(d);
+}
+
+static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
+{
+       unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+       struct irq_chip_generic *gc;
+       int ret;
+
+       bank->domain = irq_domain_add_linear(bank->of_node, 32,
+                                       &irq_generic_chip_ops, NULL);
+       if (!bank->domain) {
+               dev_warn(bank->dev, "could not init irq domain for bank %s\n",
+                        bank->name);
+               return -EINVAL;
+       }
+
+       ret = irq_alloc_domain_generic_chips(bank->domain, 32, 1,
+                                            "rockchip_gpio_irq",
+                                            handle_level_irq,
+                                            clr, 0, 0);
+       if (ret) {
+               dev_err(bank->dev, "could not alloc generic chips for bank %s\n",
+                       bank->name);
+               irq_domain_remove(bank->domain);
+               return -EINVAL;
+       }
+
+       gc = irq_get_domain_generic_chip(bank->domain, 0);
+       if (bank->gpio_type == GPIO_TYPE_V2) {
+               gc->reg_writel = gpio_writel_v2;
+               gc->reg_readl = gpio_readl_v2;
+       }
+
+       gc->reg_base = bank->reg_base;
+       gc->private = bank;
+       gc->chip_types[0].regs.mask = bank->gpio_regs->int_mask;
+       gc->chip_types[0].regs.ack = bank->gpio_regs->port_eoi;
+       gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+       gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+       gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+       gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+       gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
+       gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+       gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+       gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
+       gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
+       gc->wake_enabled = IRQ_MSK(bank->nr_pins);
+
+       /*
+        * Linux assumes that all interrupts start out disabled/masked.
+        * Our driver only uses the concept of masked and always keeps
+        * things enabled, so for us that's all masked and all enabled.
+        */
+       rockchip_gpio_writel(bank, 0xffffffff, bank->gpio_regs->int_mask);
+       rockchip_gpio_writel(bank, 0xffffffff, bank->gpio_regs->port_eoi);
+       rockchip_gpio_writel(bank, 0xffffffff, bank->gpio_regs->int_en);
+       gc->mask_cache = 0xffffffff;
+
+       irq_set_chained_handler_and_data(bank->irq,
+                                        rockchip_irq_demux, bank);
+
+       return 0;
+}
+
+static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
+{
+       struct gpio_chip *gc;
+       int ret;
+
+       bank->gpio_chip = rockchip_gpiolib_chip;
+
+       gc = &bank->gpio_chip;
+       gc->base = bank->pin_base;
+       gc->ngpio = bank->nr_pins;
+       gc->label = bank->name;
+       gc->parent = bank->dev;
+#ifdef CONFIG_OF_GPIO
+       gc->of_node = of_node_get(bank->of_node);
+#endif
+
+       ret = gpiochip_add_data(gc, bank);
+       if (ret) {
+               dev_err(bank->dev, "failed to add gpiochip %s, %d\n",
+                       gc->label, ret);
+               return ret;
+       }
+
+       /*
+        * For DeviceTree-supported systems, the gpio core checks the
+        * pinctrl's device node for the "gpio-ranges" property.
+        * If it is present, it takes care of adding the pin ranges
+        * for the driver. In this case the driver can skip ahead.
+        *
+        * In order to remain compatible with older, existing DeviceTree
+        * files which don't set the "gpio-ranges" property or systems that
+        * utilize ACPI the driver has to call gpiochip_add_pin_range().
+        */
+       if (!of_property_read_bool(bank->of_node, "gpio-ranges")) {
+               struct device_node *pctlnp = of_get_parent(bank->of_node);
+               struct pinctrl_dev *pctldev = NULL;
+
+               if (!pctlnp)
+                       return -ENODATA;
+
+               pctldev = of_pinctrl_get(pctlnp);
+               if (!pctldev)
+                       return -ENODEV;
+
+               ret = gpiochip_add_pin_range(gc, dev_name(pctldev->dev), 0,
+                                            gc->base, gc->ngpio);
+               if (ret) {
+                       dev_err(bank->dev, "Failed to add pin range\n");
+                       goto fail;
+               }
+       }
+
+       ret = rockchip_interrupts_register(bank);
+       if (ret) {
+               dev_err(bank->dev, "failed to register interrupt, %d\n", ret);
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       gpiochip_remove(&bank->gpio_chip);
+
+       return ret;
+}
+
+static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
+{
+       struct resource res;
+       int id = 0;
+
+       if (of_address_to_resource(bank->of_node, 0, &res)) {
+               dev_err(bank->dev, "cannot find IO resource for bank\n");
+               return -ENOENT;
+       }
+
+       bank->reg_base = devm_ioremap_resource(bank->dev, &res);
+       if (IS_ERR(bank->reg_base))
+               return PTR_ERR(bank->reg_base);
+
+       bank->irq = irq_of_parse_and_map(bank->of_node, 0);
+       if (!bank->irq)
+               return -EINVAL;
+
+       bank->clk = of_clk_get(bank->of_node, 0);
+       if (IS_ERR(bank->clk))
+               return PTR_ERR(bank->clk);
+
+       clk_prepare_enable(bank->clk);
+       id = readl(bank->reg_base + gpio_regs_v2.version_id);
+
+       /* If not gpio v2, that is default to v1. */
+       if (id == GPIO_TYPE_V2) {
+               bank->gpio_regs = &gpio_regs_v2;
+               bank->gpio_type = GPIO_TYPE_V2;
+               bank->db_clk = of_clk_get(bank->of_node, 1);
+               if (IS_ERR(bank->db_clk)) {
+                       dev_err(bank->dev, "cannot find debounce clk\n");
+                       clk_disable_unprepare(bank->clk);
+                       return -EINVAL;
+               }
+       } else {
+               bank->gpio_regs = &gpio_regs_v1;
+               bank->gpio_type = GPIO_TYPE_V1;
+       }
+
+       return 0;
+}
+
+static struct rockchip_pin_bank *
+rockchip_gpio_find_bank(struct pinctrl_dev *pctldev, int id)
+{
+       struct rockchip_pinctrl *info;
+       struct rockchip_pin_bank *bank;
+       int i, found = 0;
+
+       info = pinctrl_dev_get_drvdata(pctldev);
+       bank = info->ctrl->pin_banks;
+       for (i = 0; i < info->ctrl->nr_banks; i++, bank++) {
+               if (bank->bank_num == id) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       return found ? bank : NULL;
+}
+
+static int rockchip_gpio_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *pctlnp = of_get_parent(np);
+       struct pinctrl_dev *pctldev = NULL;
+       struct rockchip_pin_bank *bank = NULL;
+       static int gpio;
+       int id, ret;
+
+       if (!np || !pctlnp)
+               return -ENODEV;
+
+       pctldev = of_pinctrl_get(pctlnp);
+       if (!pctldev)
+               return -EPROBE_DEFER;
+
+       id = of_alias_get_id(np, "gpio");
+       if (id < 0)
+               id = gpio++;
+
+       bank = rockchip_gpio_find_bank(pctldev, id);
+       if (!bank)
+               return -EINVAL;
+
+       bank->dev = dev;
+       bank->of_node = np;
+
+       raw_spin_lock_init(&bank->slock);
+
+       ret = rockchip_get_bank_data(bank);
+       if (ret)
+               return ret;
+
+       ret = rockchip_gpiolib_register(bank);
+       if (ret) {
+               clk_disable_unprepare(bank->clk);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, bank);
+       dev_info(dev, "probed %pOF\n", np);
+
+       return 0;
+}
+
+static int rockchip_gpio_remove(struct platform_device *pdev)
+{
+       struct rockchip_pin_bank *bank = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(bank->clk);
+       gpiochip_remove(&bank->gpio_chip);
+
+       return 0;
+}
+
+static const struct of_device_id rockchip_gpio_match[] = {
+       { .compatible = "rockchip,gpio-bank", },
+       { .compatible = "rockchip,rk3188-gpio-bank0" },
+       { },
+};
+
+static struct platform_driver rockchip_gpio_driver = {
+       .probe          = rockchip_gpio_probe,
+       .remove         = rockchip_gpio_remove,
+       .driver         = {
+               .name   = "rockchip-gpio",
+               .of_match_table = rockchip_gpio_match,
+       },
+};
+
+static int __init rockchip_gpio_init(void)
+{
+       return platform_driver_register(&rockchip_gpio_driver);
+}
+postcore_initcall(rockchip_gpio_init);
+
+static void __exit rockchip_gpio_exit(void)
+{
+       platform_driver_unregister(&rockchip_gpio_driver);
+}
+module_exit(rockchip_gpio_exit);
+
+MODULE_DESCRIPTION("Rockchip gpio driver");
+MODULE_ALIAS("platform:rockchip-gpio");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, rockchip_gpio_match);
index a6f0421..0600f71 100644 (file)
@@ -259,7 +259,7 @@ static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
 
        pending = (resume_status << sch->resume_base) | core_status;
        for_each_set_bit(offset, &pending, sch->chip.ngpio)
-               generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+               generic_handle_domain_irq(gc->irq.domain, offset);
 
        /* Set returning value depending on whether we handled an interrupt */
        ret = pending ? ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
index aed988e..c2a2c76 100644 (file)
@@ -84,7 +84,7 @@ static irqreturn_t sdv_gpio_pub_irq_handler(int irq, void *data)
                return IRQ_NONE;
 
        for_each_set_bit(irq_bit, &irq_stat, 32)
-               generic_handle_irq(irq_find_mapping(sd->id, irq_bit));
+               generic_handle_domain_irq(sd->id, irq_bit);
 
        return IRQ_HANDLED;
 }
index 25c37ed..9dd9dab 100644 (file)
@@ -189,7 +189,7 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
        struct gpio_chip *chip = irq_desc_get_handler_data(desc);
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct sprd_gpio *sprd_gpio = gpiochip_get_data(chip);
-       u32 bank, n, girq;
+       u32 bank, n;
 
        chained_irq_enter(ic, desc);
 
@@ -198,13 +198,9 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
                unsigned long reg = readl_relaxed(base + SPRD_GPIO_MIS) &
                        SPRD_GPIO_BANK_MASK;
 
-               for_each_set_bit(n, &reg, SPRD_GPIO_BANK_NR) {
-                       girq = irq_find_mapping(chip->irq.domain,
-                                               bank * SPRD_GPIO_BANK_NR + n);
-
-                       generic_handle_irq(girq);
-               }
-
+               for_each_set_bit(n, &reg, SPRD_GPIO_BANK_NR)
+                       generic_handle_domain_irq(chip->irq.domain,
+                                                 bank * SPRD_GPIO_BANK_NR + n);
        }
        chained_irq_exit(ic, desc);
 }
index 866201c..718a508 100644 (file)
@@ -100,7 +100,7 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
        int i;
 
        for_each_set_bit(i, &bits, 32)
-               generic_handle_irq(irq_find_mapping(tb10x_gpio->domain, i));
+               generic_handle_domain_irq(tb10x_gpio->domain, i);
 
        return IRQ_HANDLED;
 }
index 0025f61..7f5bc10 100644 (file)
@@ -408,6 +408,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
                lvl = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
 
                for_each_set_bit(pin, &sta, 8) {
+                       int ret;
+
                        tegra_gpio_writel(tgi, 1 << pin,
                                          GPIO_INT_CLR(tgi, gpio));
 
@@ -420,11 +422,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
                                chained_irq_exit(chip, desc);
                        }
 
-                       irq = irq_find_mapping(domain, gpio + pin);
-                       if (WARN_ON(irq == 0))
-                               continue;
-
-                       generic_handle_irq(irq);
+                       ret = generic_handle_domain_irq(domain, gpio + pin);
+                       WARN_RATELIMIT(ret, "hwirq = %d", gpio + pin);
                }
        }
 
index d38980b..05c90d7 100644 (file)
@@ -456,7 +456,7 @@ static void tegra186_gpio_irq(struct irq_desc *desc)
 
        for (i = 0; i < gpio->soc->num_ports; i++) {
                const struct tegra_gpio_port *port = &gpio->soc->ports[i];
-               unsigned int pin, irq;
+               unsigned int pin;
                unsigned long value;
                void __iomem *base;
 
@@ -469,11 +469,8 @@ static void tegra186_gpio_irq(struct irq_desc *desc)
                value = readl(base + TEGRA186_GPIO_INTERRUPT_STATUS(1));
 
                for_each_set_bit(pin, &value, port->pins) {
-                       irq = irq_find_mapping(domain, offset + pin);
-                       if (WARN_ON(irq == 0))
-                               continue;
-
-                       generic_handle_irq(irq);
+                       int ret = generic_handle_domain_irq(domain, offset + pin);
+                       WARN_RATELIMIT(ret, "hwirq = %d", offset + pin);
                }
 
 skip:
index 5022e0a..5b10322 100644 (file)
@@ -183,7 +183,7 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
        struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
        struct irq_chip *irq_chip = irq_desc_get_chip(desc);
        unsigned long irq_bits;
-       int i = 0, child_irq;
+       int i = 0;
        u8 irq_status;
 
        chained_irq_enter(irq_chip, desc);
@@ -192,11 +192,9 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
        tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
 
        irq_bits = irq_status;
-       for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
-               child_irq = irq_find_mapping(gpio->chip.irq.domain,
-                                            i + TQMX86_NGPO);
-               generic_handle_irq(child_irq);
-       }
+       for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
+               generic_handle_domain_irq(gpio->chip.irq.domain,
+                                         i + TQMX86_NGPO);
 
        chained_irq_exit(irq_chip, desc);
 }
@@ -238,8 +236,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
        struct resource *res;
        int ret, irq;
 
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
+       irq = platform_get_irq_optional(pdev, 0);
+       if (irq < 0 && irq != -ENXIO)
                return irq;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -278,7 +276,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
 
        pm_runtime_enable(&pdev->dev);
 
-       if (irq) {
+       if (irq > 0) {
                struct irq_chip *irq_chip = &gpio->irq_chip;
                u8 irq_status;
 
index 58776f2..e0f2b67 100644 (file)
@@ -149,7 +149,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
        for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
                vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
 
-               generic_handle_irq(irq_find_mapping(port->gc.irq.domain, pin));
+               generic_handle_domain_irq(port->gc.irq.domain, pin);
        }
 
        chained_irq_exit(chip, desc);
index 2d89d05..bb02a82 100644 (file)
@@ -339,8 +339,8 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
                for_each_set_bit(port, &int_pending, 3) {
                        int_id = inb(ws16c48gpio->base + 8 + port);
                        for_each_set_bit(gpio, &int_id, 8)
-                               generic_handle_irq(irq_find_mapping(
-                                       chip->irq.domain, gpio + 8*port));
+                               generic_handle_domain_irq(chip->irq.domain,
+                                                         gpio + 8*port);
                }
 
                int_pending = inb(ws16c48gpio->base + 6) & 0x7;
index ad5489a..fa9b4d8 100644 (file)
@@ -185,7 +185,7 @@ static irqreturn_t iproc_gpio_irq_handler(int irq, void *data)
                int_bits = level | event;
 
                for_each_set_bit(bit, &int_bits, gc->ngpio)
-                       generic_handle_irq(irq_linear_revmap(gc->irq.domain, bit));
+                       generic_handle_domain_irq(gc->irq.domain, bit);
        }
 
        return int_bits ? IRQ_HANDLED : IRQ_NONE;
index c329c3a..a1b6633 100644 (file)
@@ -538,7 +538,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
 
        for_each_set_bit(bit, all, 64) {
                irq_offset = xgpio_from_bit(chip, bit);
-               generic_handle_irq(irq_find_mapping(gc->irq.domain, irq_offset));
+               generic_handle_domain_irq(gc->irq.domain, irq_offset);
        }
 
        chained_irq_exit(irqchip, desc);
index d7b16bb..0d94d3a 100644 (file)
@@ -216,8 +216,7 @@ static void xlp_gpio_generic_handler(struct irq_desc *desc)
                }
 
                if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ))
-                       generic_handle_irq(irq_find_mapping(
-                                               priv->chip.irq.domain, gpio));
+                       generic_handle_domain_irq(priv->chip.irq.domain, gpio);
        }
        chained_irq_exit(irqchip, desc);
 }
index f0cb8cc..06c6401 100644 (file)
@@ -628,12 +628,8 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
        if (!pending)
                return;
 
-       for_each_set_bit(offset, &pending, 32) {
-               unsigned int gpio_irq;
-
-               gpio_irq = irq_find_mapping(irqdomain, offset + bank_offset);
-               generic_handle_irq(gpio_irq);
-       }
+       for_each_set_bit(offset, &pending, 32)
+               generic_handle_domain_irq(irqdomain, offset + bank_offset);
 }
 
 /**
index 6cc0d4f..a9ce3b2 100644 (file)
@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
  */
 bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
 {
-#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
                        return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
index 3b5d131..8f53837 100644 (file)
@@ -468,6 +468,46 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
        return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
 }
 
+/*
+ * Helper function to query RAS EEPROM address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if vbios supports ras rom address reporting
+ */
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       int index;
+       u16 data_offset, size;
+       union firmware_info *firmware_info;
+       u8 frev, crev;
+
+       if (i2c_address == NULL)
+               return false;
+
+       *i2c_address = 0;
+
+       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+                       firmwareinfo);
+
+       if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+                               index, &size, &frev, &crev, &data_offset)) {
+               /* support firmware_info 3.4 + */
+               if ((frev == 3 && crev >=4) || (frev > 3)) {
+                       firmware_info = (union firmware_info *)
+                               (mode_info->atom_context->bios + data_offset);
+                       *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+               }
+       }
+
+       if (*i2c_address != 0)
+               return true;
+
+       return false;
+}
+
+
 union smu_info {
        struct atom_smu_info_v3_1 v31;
 };
index 1bbbb19..751248b 100644 (file)
@@ -36,6 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
index f3fd5ec..f944ed8 100644 (file)
@@ -2777,12 +2777,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
 
-       mutex_lock(&adev->gfx.gfx_off_mutex);
-       if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
-                       adev->gfx.gfx_off_state = true;
-       }
-       mutex_unlock(&adev->gfx.gfx_off_mutex);
+       WARN_ON_ONCE(adev->gfx.gfx_off_state);
+       WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
+
+       if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+               adev->gfx.gfx_off_state = true;
 }
 
 /**
index 43e7b61..ada7bc1 100644 (file)
@@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                  ip->major, ip->minor,
                                  ip->revision);
 
+                       if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+                               adev->vcn.num_vcn_inst++;
+
                        for (k = 0; k < num_base_address; k++) {
                                /*
                                 * convert the endianness of base addresses in place,
@@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
        struct harvest_table *harvest_info;
-       int i;
+       int i, vcn_harvest_count = 0;
 
        bhdr = (struct binary_header *)adev->mman.discovery_bin;
        harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
@@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 
                switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
                case VCN_HWID:
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+                       vcn_harvest_count++;
                        break;
                case DMU_HWID:
                        adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
                        break;
                }
        }
+       if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+       }
 }
 
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
index 361b86b..971c5b8 100644 (file)
@@ -1213,6 +1213,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
        {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
 
+       /* BEIGE_GOBY */
+       {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+
        {0, 0, 0}
 };
 
@@ -1564,6 +1571,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
                pci_ignore_hotplug(pdev);
                pci_set_power_state(pdev, PCI_D3cold);
                drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+       } else if (amdgpu_device_supports_boco(drm_dev)) {
+               /* nothing to do */
        } else if (amdgpu_device_supports_baco(drm_dev)) {
                amdgpu_device_baco_enter(drm_dev);
        }
index a0be077..b4ced45 100644 (file)
@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
        mutex_lock(&adev->gfx.gfx_off_mutex);
 
-       if (!enable)
-               adev->gfx.gfx_off_req_count++;
-       else if (adev->gfx.gfx_off_req_count > 0)
+       if (enable) {
+               /* If the count is already 0, it means there's an imbalance bug somewhere.
+                * Note that the bug may be in a different caller than the one which triggers the
+                * WARN_ON_ONCE.
+                */
+               if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
+                       goto unlock;
+
                adev->gfx.gfx_off_req_count--;
 
-       if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
-       } else if (!enable && adev->gfx.gfx_off_state) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
-                       adev->gfx.gfx_off_state = false;
+               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+       } else {
+               if (adev->gfx.gfx_off_req_count == 0) {
+                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+                       if (adev->gfx.gfx_off_state &&
+                           !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+                               adev->gfx.gfx_off_state = false;
 
-                       if (adev->gfx.funcs->init_spm_golden) {
-                               dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
-                               amdgpu_gfx_init_spm_golden(adev);
+                               if (adev->gfx.funcs->init_spm_golden) {
+                                       dev_dbg(adev->dev,
+                                               "GFXOFF is disabled, re-init SPM golden settings\n");
+                                       amdgpu_gfx_init_spm_golden(adev);
+                               }
                        }
                }
+
+               adev->gfx.gfx_off_req_count++;
        }
 
+unlock:
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
 
index 83af307..cd2e18f 100644 (file)
@@ -502,7 +502,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 
        } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
                   adev->irq.virq[src_id]) {
-               generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
+               generic_handle_domain_irq(adev->irq.domain, src_id);
 
        } else if (!adev->irq.client[client_id].sources) {
                DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
index 795fa74..92c8e6e 100644 (file)
@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                        return -EINVAL;
        }
 
-       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
-        * See function amdgpu_display_supported_domains()
-        */
-       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
-
        if (bo->tbo.pin_count) {
                uint32_t mem_type = bo->tbo.resource->mem_type;
                uint32_t mem_flags = bo->tbo.resource->placement;
@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                return 0;
        }
 
+       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+        * See function amdgpu_display_supported_domains()
+        */
+       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
+
        if (bo->tbo.base.import_attach)
                dma_buf_pin(bo->tbo.base.import_attach);
 
index f40c871..38222de 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu_ras.h"
 #include <linux/bits.h>
 #include "atom.h"
+#include "amdgpu_atomfirmware.h"
 
 #define EEPROM_I2C_TARGET_ADDR_VEGA20          0xA0
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS                0xA8
@@ -96,6 +97,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
        if (!i2c_addr)
                return false;
 
+       if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)i2c_addr))
+               return true;
+
        switch (adev->asic_type) {
        case CHIP_VEGA20:
                *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
index 59e0fef..acfa207 100644 (file)
@@ -54,11 +54,12 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 {
        struct drm_mm_node *node;
 
-       if (!res) {
+       if (!res || res->mem_type == TTM_PL_SYSTEM) {
                cur->start = start;
                cur->size = size;
                cur->remaining = size;
                cur->node = NULL;
+               WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
                return;
        }
 
index 044076e..6a23c68 100644 (file)
@@ -1295,6 +1295,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
                return false;
 }
 
+static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
+{
+       if ((adev->asic_type == CHIP_RENOIR) &&
+           (adev->gfx.me_fw_version >= 0x000000a5) &&
+           (adev->gfx.me_feature_version >= 52))
+               return true;
+       else
+               return false;
+}
+
 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
 {
        if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -3675,7 +3685,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
-               WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+               /* If GC has entered CGPG, ringing doorbell > first page
+                * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
+                * workaround this issue. And this change has to align with firmware
+                * update.
+                */
+               if (check_if_enlarge_doorbell_range(adev))
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+                                       (adev->doorbell.size - 4));
+               else
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
                                        (adev->doorbell_index.userqueue_end * 2) << 2);
        }
 
index c7b364e..e883731 100644 (file)
@@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
        pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
                 start + size - 1, nattr);
 
+       /* Flush pending deferred work to avoid racing with deferred actions from
+        * previous memory map changes (e.g. munmap). Concurrent memory map changes
+        * can still race with get_attr because we don't hold the mmap lock. But that
+        * would be a race condition in the application anyway, and undefined
+        * behaviour is acceptable in that case.
+        */
+       flush_work(&p->svms.deferred_list_work);
+
        mmap_read_lock(mm);
        if (!svm_range_is_valid(mm, start, size)) {
                pr_debug("invalid range\n");
index b53f49a..afa96c8 100644 (file)
@@ -1548,6 +1548,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        }
 
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
@@ -1561,7 +1562,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                         adev->dm.dmcub_fw_version);
        }
 
-       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
        dmub_srv = adev->dm.dmub_srv;
@@ -9605,7 +9605,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                } else if (amdgpu_freesync_vid_mode && aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
-                       set_freesync_fixed_config(dm_new_crtc_state);
+                       struct drm_display_mode *high_mode;
+
+                       high_mode = get_highest_refresh_rate_mode(aconnector, false);
+                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+                               set_freesync_fixed_config(dm_new_crtc_state);
+                       }
                }
 
                ret = dm_atomic_get_state(state, &dm_state);
index 40f617b..4aba0e8 100644 (file)
@@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
                handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
 
                /*allocate a new amdgpu_dm_irq_handler_data*/
-               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+               handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
                if (!handler_data_add) {
                        DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
                        return;
index c6f494f..6185f94 100644 (file)
@@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(
        for (i = 0; i < context->stream_count; i++) {
                const struct dc_stream_state *stream = context->streams[i];
 
+               /* Extend the WA to DP for Linux*/
                if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
                                stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
-                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
+                               stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
                        tmds_present = true;
        }
 
index 605e297..a30283f 100644 (file)
@@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc)
        if (dc->hwss.z10_restore)
                dc->hwss.z10_restore(dc);
 }
+
+void dc_z10_save_init(struct dc *dc)
+{
+       if (dc->hwss.z10_save_init)
+               dc->hwss.z10_save_init(dc);
+}
 #endif
 /*
  * Applies given context to HW and copy it into current context.
index 9fb8c46..a6d0fd2 100644 (file)
@@ -3602,29 +3602,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
 bool dp_retrieve_lttpr_cap(struct dc_link *link)
 {
        uint8_t lttpr_dpcd_data[6];
-       bool vbios_lttpr_enable = false;
-       bool vbios_lttpr_interop = false;
-       struct dc_bios *bios = link->dc->ctx->dc_bios;
+       bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+       bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
        enum dc_status status = DC_ERROR_UNEXPECTED;
        bool is_lttpr_present = false;
 
        memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
-       /* Query BIOS to determine if LTTPR functionality is forced on by system */
-       if (bios->funcs->get_lttpr_caps) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_lttpr_enable = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
-               vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
-       }
-
-       if (bios->funcs->get_lttpr_interop) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_interop_enabled = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
-               vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
-       }
 
        /*
         * Logic to determine LTTPR mode
index f2b39ec..cde8ed2 100644 (file)
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
                 */
                memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
                dc->vm_pa_config.valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               dc_z10_save_init(dc);
+#endif
        }
 
        return num_vmids;
index 8dcea8f..21d7828 100644 (file)
@@ -183,6 +183,8 @@ struct dc_caps {
        unsigned int cursor_cache_size;
        struct dc_plane_cap planes[MAX_PLANES];
        struct dc_color_caps color;
+       bool vbios_lttpr_aware;
+       bool vbios_lttpr_enable;
 };
 
 struct dc_bug_wa {
@@ -1336,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
 bool dc_set_psr_allow_active(struct dc *dc, bool enable);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 void dc_z10_restore(struct dc *dc);
+void dc_z10_save_init(struct dc *dc);
 #endif
 
 bool dc_enable_dmub_notifications(struct dc *dc);
index 7fa9fc6..f6e747f 100644 (file)
@@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
 
        REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
                        MASTER_UPDATE_LOCK_DB_X,
-                       h_blank_start - 200 - 1,
+                       (h_blank_start - 200 - 1) / optc1->opp_count,
                        MASTER_UPDATE_LOCK_DB_Y,
                        v_blank_start - 1);
 }
index 596c97d..28e15eb 100644 (file)
@@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
                }
                pri_pipe->next_odm_pipe = sec_pipe;
                sec_pipe->prev_odm_pipe = pri_pipe;
-               ASSERT(sec_pipe->top_pipe == NULL);
 
                if (!sec_pipe->top_pipe)
                        sec_pipe->stream_res.opp = pool->opps[pipe_idx];
@@ -2617,6 +2616,26 @@ static bool dcn30_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               if (ctx->dc_bios->funcs->get_lttpr_interop) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_interop_enabled = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+                                       &is_vbios_interop_enabled);
+                       dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 9776d17..912285f 100644 (file)
@@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
        dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
 }
 
-static void calculate_wm_set_for_vlevel(
-               int vlevel,
-               struct wm_range_table_entry *table_entry,
-               struct dcn_watermarks *wm_set,
-               struct display_mode_lib *dml,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt)
-{
-       double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
-       ASSERT(vlevel < dml->soc.num_states);
-       /* only pipe 0 is read for voltage and dcf/soc clocks */
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
-       pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
-       dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
-       dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
-       dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
-       wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
-       wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
-       dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel_req)
-{
-       int i, pipe_idx;
-       int vlevel, vlevel_max;
-       struct wm_range_table_entry *table_entry;
-       struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
-       ASSERT(bw_params);
-
-       vlevel_max = bw_params->clk_table.num_entries - 1;
-
-       /* WM Set D */
-       table_entry = &bw_params->wm_table.entries[WM_D];
-       if (table_entry->wm_type == WM_TYPE_RETRAINING)
-               vlevel = 0;
-       else
-               vlevel = vlevel_max;
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set C */
-       table_entry = &bw_params->wm_table.entries[WM_C];
-       vlevel = min(max(vlevel_req, 2), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set B */
-       table_entry = &bw_params->wm_table.entries[WM_B];
-       vlevel = min(max(vlevel_req, 1), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       /* WM Set A */
-       table_entry = &bw_params->wm_table.entries[WM_A];
-       vlevel = min(vlevel_req, vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
-               if (!context->res_ctx.pipe_ctx[i].stream)
-                       continue;
-
-               pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
-               pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
-               if (dc->config.forced_clocks) {
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
-               }
-               if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
-               if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
-               pipe_idx++;
-       }
-
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
 static struct resource_funcs dcn301_res_pool_funcs = {
        .destroy = dcn301_destroy_resource_pool,
        .link_enc_create = dcn301_link_encoder_create,
        .panel_cntl_create = dcn301_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
-       .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
        .update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
index 833ab13..dc7823d 100644 (file)
@@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
 
                .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
                .num_states = 1,
-               .sr_exit_time_us = 26.5,
-               .sr_enter_plus_exit_time_us = 31,
+               .sr_exit_time_us = 35.5,
+               .sr_enter_plus_exit_time_us = 40,
                .urgent_latency_us = 4.0,
                .urgent_latency_pixel_data_only_us = 4.0,
                .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
index 6ac6faf..8a2119d 100644 (file)
@@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
                        &pipe_ctx->stream_res.encoder_info_frame);
        }
 }
+void dcn31_z10_save_init(struct dc *dc)
+{
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+       cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+       dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+}
 
 void dcn31_z10_restore(struct dc *dc)
 {
index 40dfebe..140435e 100644 (file)
@@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
 void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
 
 void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
 
 void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
 int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
index aaf2dbd..b30d923 100644 (file)
@@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
        .z10_restore = dcn31_z10_restore,
+       .z10_save_init = dcn31_z10_save_init,
        .is_abm_supported = dcn31_is_abm_supported,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
        .update_visual_confirm_color = dcn20_update_visual_confirm_color,
index 38c010a..cd3248d 100644 (file)
@@ -1968,6 +1968,22 @@ static bool dcn31_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               /* interop bit is implicit */
+               {
+                       dc->caps.vbios_lttpr_aware = true;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 5ab008e..ad5f2ad 100644 (file)
@@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
                        int width, int height, int offset);
 
        void (*z10_restore)(struct dc *dc);
+       void (*z10_save_init)(struct dc *dc);
 
        void (*update_visual_confirm_color)(struct dc *dc,
                        struct pipe_ctx *pipe_ctx,
index 7c4734f..7fafb8d 100644 (file)
@@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type {
         * DCN hardware restore.
         */
        DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
+
+       /**
+        * DCN hardware save.
+        */
+       DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
 };
 
 /**
index 973de34..27c7fa3 100644 (file)
@@ -267,11 +267,13 @@ void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
 
 bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub)
 {
-       uint32_t is_hw_init;
+       union dmub_fw_boot_status status;
+       uint32_t is_enable;
 
-       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init);
+       status.all = REG_READ(DMCUB_SCRATCH0);
+       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
 
-       return is_hw_init != 0;
+       return is_enable != 0 && status.bits.dal_fw;
 }
 
 bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
index 3811e58..4495545 100644 (file)
@@ -590,7 +590,7 @@ struct atom_firmware_info_v3_4 {
        uint8_t  board_i2c_feature_id;            // enum of atom_board_i2c_feature_id_def
        uint8_t  board_i2c_feature_gpio_id;       // i2c id find in gpio_lut data table gpio_id
        uint8_t  board_i2c_feature_slave_addr;
-       uint8_t  reserved3;
+       uint8_t  ras_rom_i2c_slave_addr;
        uint16_t bootup_mvddq_mv;
        uint16_t bootup_mvpp_mv;
        uint32_t zfbstartaddrin16mb;
index 3fea243..dc91eb6 100644 (file)
@@ -26,7 +26,7 @@
 #include "amdgpu_smu.h"
 
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07
 
 /* MP Apertures */
index 5627de7..c5e26d6 100644 (file)
@@ -111,7 +111,9 @@ typedef struct {
   uint32_t InWhisperMode        : 1;
   uint32_t spare0               : 1;
   uint32_t ZstateStatus         : 4;
-  uint32_t spare1               :12;
+  uint32_t spare1               : 4;
+  uint32_t DstateFun            : 4;
+  uint32_t DstateDev            : 4;
   // MP1_EXT_SCRATCH2
   uint32_t P2JobHandler         :24;
   uint32_t RsmuPmiP2FinishedCnt : 8;
index 2597910..02e8c6e 100644 (file)
@@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        return size;
 }
 
+static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       return (adev->pdev->device == 0x6860);
+}
+
 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
 {
        struct vega10_hwmgr *data = hwmgr->backend;
@@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
        }
 
 out:
-       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+       if (vega10_get_power_profile_mode_quirks(hwmgr))
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+                                               1 << power_profile_mode,
+                                               NULL);
+       else
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
                                                (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
                                                NULL);
+
        hwmgr->power_profile_mode = power_profile_mode;
 
        return 0;
index c751f71..d92dd2c 100644 (file)
@@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        uint32_t val;
 
-       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
-           powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
+       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) {
                val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
                smu_baco->platform_support =
                        (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
index 18681dc..bcaaa08 100644 (file)
@@ -256,7 +256,7 @@ static int vangogh_tables_init(struct smu_context *smu)
        return 0;
 
 err3_out:
-       kfree(smu_table->clocks_table);
+       kfree(smu_table->watermarks_table);
 err2_out:
        kfree(smu_table->gpu_metrics_table);
 err1_out:
index d299079..5d82891 100644 (file)
@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        req.request.sequence = req32.request.sequence;
        req.request.signal = req32.request.signal;
        err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
-       if (err)
-               return err;
 
        req32.reply.type = req.reply.type;
        req32.reply.sequence = req.reply.sequence;
@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        if (copy_to_user(argp, &req32, sizeof(req32)))
                return -EFAULT;
 
-       return 0;
+       return err;
 }
 
 #if defined(CONFIG_X86)
index be716b5..00dade4 100644 (file)
@@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
        }
 }
 
+/* Splitter enable for eDP MSO is limited to certain pipes. */
+static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+{
+       if (IS_ALDERLAKE_P(i915))
+               return BIT(PIPE_A) | BIT(PIPE_B);
+       else
+               return BIT(PIPE_A);
+}
+
 static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
@@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
        if (!pipe_config->splitter.enable)
                return;
 
-       /* Splitter enable is supported for pipe A only. */
-       if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+       if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
                pipe_config->splitter.enable = false;
                return;
        }
@@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
                return;
 
        if (crtc_state->splitter.enable) {
-               /* Splitter enable is supported for pipe A only. */
-               if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
-                       return;
-
                dss1 |= SPLITTER_ENABLE;
                dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
                if (crtc_state->splitter.link_count == 2)
@@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
                dig_port->hpd_pulse = intel_dp_hpd_pulse;
 
-               /* Splitter enable for eDP MSO is limited to certain pipes. */
-               if (dig_port->dp.mso_link_count) {
-                       encoder->pipe_mask = BIT(PIPE_A);
-                       if (IS_ALDERLAKE_P(dev_priv))
-                               encoder->pipe_mask |= BIT(PIPE_B);
-               }
+               if (dig_port->dp.mso_link_count)
+                       encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
        }
 
        /* In theory we don't need the encoder->type check, but leave it just in
index 2d5d217..0a8a239 100644 (file)
@@ -5746,16 +5746,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
 
        switch (crtc_state->pipe_bpp) {
        case 18:
-               val |= PIPEMISC_DITHER_6_BPC;
+               val |= PIPEMISC_6_BPC;
                break;
        case 24:
-               val |= PIPEMISC_DITHER_8_BPC;
+               val |= PIPEMISC_8_BPC;
                break;
        case 30:
-               val |= PIPEMISC_DITHER_10_BPC;
+               val |= PIPEMISC_10_BPC;
                break;
        case 36:
-               val |= PIPEMISC_DITHER_12_BPC;
+               /* Port output 12BPC defined for ADLP+ */
+               if (DISPLAY_VER(dev_priv) > 12)
+                       val |= PIPEMISC_12_BPC_ADLP;
                break;
        default:
                MISSING_CASE(crtc_state->pipe_bpp);
@@ -5808,15 +5810,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
 
        tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
 
-       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
-       case PIPEMISC_DITHER_6_BPC:
+       switch (tmp & PIPEMISC_BPC_MASK) {
+       case PIPEMISC_6_BPC:
                return 18;
-       case PIPEMISC_DITHER_8_BPC:
+       case PIPEMISC_8_BPC:
                return 24;
-       case PIPEMISC_DITHER_10_BPC:
+       case PIPEMISC_10_BPC:
                return 30;
-       case PIPEMISC_DITHER_12_BPC:
-               return 36;
+       /*
+        * PORT OUTPUT 12 BPC defined for ADLP+.
+        *
+        * TODO:
+        * For previous platforms with DSI interface, bits 5:7
+        * are used for storing pipe_bpp irrespective of dithering.
+        * Since the value of 12 BPC is not defined for these bits
+        * on older platforms, need to find a workaround for 12 BPC
+        * MIPI DSI HW readout.
+        */
+       case PIPEMISC_12_BPC_ADLP:
+               if (DISPLAY_VER(dev_priv) > 12)
+                       return 36;
+               fallthrough;
        default:
                MISSING_CASE(tmp);
                return 0;
index 4298ae6..86b7ac7 100644 (file)
@@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
        if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
            IS_BROXTON(i915)) {
                bxt_enable_dc9(i915);
-               /* Tweaked Wa_14010685332:icp,jsp,mcc */
-               if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
-                       intel_de_rmw(i915, SOUTH_CHICKEN1,
-                                    SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_enable_pc8(i915);
        }
+
+       /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+       if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+               intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
 }
 
 void intel_display_power_resume_early(struct drm_i915_private *i915)
@@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
            IS_BROXTON(i915)) {
                gen9_sanitize_dc_state(i915);
                bxt_disable_dc9(i915);
-               /* Tweaked Wa_14010685332:icp,jsp,mcc */
-               if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
-                       intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_disable_pc8(i915);
        }
+
+       /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+       if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+               intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
 }
 
 void intel_display_power_suspend(struct drm_i915_private *i915)
index 6cc03b9..862c1df 100644 (file)
@@ -3850,23 +3850,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
 
 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 val;
 
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
                return;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
-                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
-               drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
                return;
-       }
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
-                              DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
-               drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+                              DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
                return;
-       }
 
        if (val & HDMI_LINK_STATUS_CHANGED)
                intel_dp_handle_hdmi_link_status_change(intel_dp);
index 08bceae..053a3c2 100644 (file)
@@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
 
        return lttpr_count;
 }
-EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
 
 static u8 dp_voltage_max(u8 preemph)
 {
index c4a126c..1257f4f 100644 (file)
@@ -127,6 +127,15 @@ static void intel_timeline_fini(struct rcu_head *rcu)
 
        i915_vma_put(timeline->hwsp_ggtt);
        i915_active_fini(&timeline->active);
+
+       /*
+        * A small race exists between intel_gt_retire_requests_timeout and
+        * intel_timeline_exit which could result in the syncmap not getting
+        * free'd. Rather than work to hard to seal this race, simply cleanup
+        * the syncmap on fini.
+        */
+       i915_syncmap_free(&timeline->sync);
+
        kfree(timeline);
 }
 
index 06024d3..cde0a47 100644 (file)
@@ -3149,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(_MMIO(0xb110), D_BDW);
+       MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
 
        MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
                D_BDW_PLUS, NULL, force_nonpriv_write);
index b8ac807..f776c47 100644 (file)
@@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
        {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
        {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
        {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+       {RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
+       {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
        {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
        {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
        {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
index 77f1911..3acb0b6 100644 (file)
@@ -138,7 +138,7 @@ void i915_globals_unpark(void)
        atomic_inc(&active);
 }
 
-static void __exit __i915_globals_flush(void)
+static void  __i915_globals_flush(void)
 {
        atomic_inc(&active); /* skip shrinking */
 
@@ -148,7 +148,7 @@ static void __exit __i915_globals_flush(void)
        atomic_dec(&active);
 }
 
-void __exit i915_globals_exit(void)
+void i915_globals_exit(void)
 {
        GEM_BUG_ON(atomic_read(&active));
 
index 35c97c3..9666646 100644 (file)
@@ -727,9 +727,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
        if (GRAPHICS_VER(m->i915) >= 12) {
                int i;
 
-               for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+               for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+                       /*
+                        * SFC_DONE resides in the VD forcewake domain, so it
+                        * only exists if the corresponding VCS engine is
+                        * present.
+                        */
+                       if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+                               continue;
+
                        err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
                                   gt->sfc_done[i]);
+               }
 
                err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
        }
@@ -1581,6 +1590,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
 
        if (GRAPHICS_VER(i915) >= 12) {
                for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+                       /*
+                        * SFC_DONE resides in the VD forcewake domain, so it
+                        * only exists if the corresponding VCS engine is
+                        * present.
+                        */
+                       if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+                               continue;
+
                        gt->sfc_done[i] =
                                intel_uncore_read(uncore, GEN12_SFC_DONE(i));
                }
index c039431..c3816f5 100644 (file)
@@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
-{
-       struct intel_uncore *uncore = &dev_priv->uncore;
-
-       /*
-        * Wa_14010685332:cnp/cmp,tgp,adp
-        * TODO: Clarify which platforms this applies to
-        * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
-        * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
-        */
-       if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
-           (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
-               intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
-                                SBCLK_RUN_REFCLK_DIS);
-               intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-       }
-}
-
 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
        if (HAS_PCH_SPLIT(dev_priv))
                ibx_irq_reset(dev_priv);
 
-       cnp_display_clock_wa(dev_priv);
 }
 
 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
 
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                GEN3_IRQ_RESET(uncore, SDE);
-
-       cnp_display_clock_wa(dev_priv);
 }
 
 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
index 83b500b..2880ec5 100644 (file)
@@ -1195,6 +1195,7 @@ static int __init i915_init(void)
        err = pci_register_driver(&i915_pci_driver);
        if (err) {
                i915_pmu_exit();
+               i915_globals_exit();
                return err;
        }
 
index 94fde5c..476bb3b 100644 (file)
@@ -422,7 +422,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN12_HCP_SFC_LOCK_ACK_BIT           REG_BIT(1)
 #define   GEN12_HCP_SFC_USAGE_BIT                      REG_BIT(0)
 
-#define GEN12_SFC_DONE(n)              _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE(n)              _MMIO(0x1cc000 + (n) * 0x1000)
 #define GEN12_SFC_DONE_MAX             4
 
 #define RING_PP_DIR_BASE(base)         _MMIO((base) + 0x228)
@@ -6163,11 +6163,17 @@ enum {
 #define   PIPEMISC_HDR_MODE_PRECISION  (1 << 23) /* icl+ */
 #define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
 #define   PIPEMISC_PIXEL_ROUNDING_TRUNC        REG_BIT(8) /* tgl+ */
-#define   PIPEMISC_DITHER_BPC_MASK     (7 << 5)
-#define   PIPEMISC_DITHER_8_BPC                (0 << 5)
-#define   PIPEMISC_DITHER_10_BPC       (1 << 5)
-#define   PIPEMISC_DITHER_6_BPC                (2 << 5)
-#define   PIPEMISC_DITHER_12_BPC       (3 << 5)
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define   PIPEMISC_BPC_MASK            (7 << 5)
+#define   PIPEMISC_8_BPC               (0 << 5)
+#define   PIPEMISC_10_BPC              (1 << 5)
+#define   PIPEMISC_6_BPC               (2 << 5)
+#define   PIPEMISC_12_BPC_ADLP         (4 << 5) /* adlp+ */
 #define   PIPEMISC_DITHER_ENABLE       (1 << 4)
 #define   PIPEMISC_DITHER_TYPE_MASK    (3 << 2)
 #define   PIPEMISC_DITHER_TYPE_SP      (0 << 2)
index 8710f55..bd1f9f0 100644 (file)
@@ -683,7 +683,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                break;
        }
 
-       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
+       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
 
        width = ipu_src_rect_width(new_state);
        height = drm_rect_height(&new_state->src) >> 16;
index 96ea1a2..f54392e 100644 (file)
@@ -203,6 +203,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
        unsigned long status, val, val1;
        int plane_id, dma0_state, dma1_state;
        struct kmb_drm_private *kmb = to_kmb(dev);
+       u32 ctrl = 0;
 
        status = kmb_read_lcd(kmb, LCD_INT_STATUS);
 
@@ -227,6 +228,19 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
                                kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
                                                    kmb->plane_status[plane_id].ctrl);
 
+                               ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+                               if (!(ctrl & (LCD_CTRL_VL1_ENABLE |
+                                   LCD_CTRL_VL2_ENABLE |
+                                   LCD_CTRL_GL1_ENABLE |
+                                   LCD_CTRL_GL2_ENABLE))) {
+                                       /* If no LCD layers are using DMA,
+                                        * then disable DMA pipelined AXI read
+                                        * transactions.
+                                        */
+                                       kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
+                                                           LCD_CTRL_PIPELINE_DMA);
+                               }
+
                                kmb->plane_status[plane_id].disable = false;
                        }
                }
@@ -411,10 +425,10 @@ static const struct drm_driver kmb_driver = {
        .fops = &fops,
        DRM_GEM_CMA_DRIVER_OPS_VMAP,
        .name = "kmb-drm",
-       .desc = "KEEMBAY DISPLAY DRIVER ",
-       .date = "20201008",
-       .major = 1,
-       .minor = 0,
+       .desc = "KEEMBAY DISPLAY DRIVER",
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
 };
 
 static int kmb_remove(struct platform_device *pdev)
index 02e8067..ebbaa5f 100644 (file)
 #define KMB_MAX_HEIGHT                 1080 /*Max height in pixels */
 #define KMB_MIN_WIDTH                   1920 /*Max width in pixels */
 #define KMB_MIN_HEIGHT                  1080 /*Max height in pixels */
+
+#define DRIVER_DATE                    "20210223"
+#define DRIVER_MAJOR                   1
+#define DRIVER_MINOR                   1
+
 #define KMB_LCD_DEFAULT_CLK            200000000
 #define KMB_SYS_CLK_MHZ                        500
 
index d5b6195..ecee678 100644 (file)
@@ -427,8 +427,14 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
 
        kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
 
-       /* FIXME no doc on how to set output format,these values are
-        * taken from the Myriadx tests
+       /* Enable pipeline AXI read transactions for the DMA
+        * after setting graphics layers. This must be done
+        * in a separate write cycle.
+        */
+       kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
+       /* FIXME no doc on how to set output format, these values are taken
+        * from the Myriadx tests
         */
        out_format |= LCD_OUTF_FORMAT_RGB888;
 
@@ -526,6 +532,11 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
                plane->id = i;
        }
 
+       /* Disable pipeline AXI read transactions for the DMA
+        * prior to setting graphics layers
+        */
+       kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
        return primary;
 cleanup:
        drmm_kfree(drm, plane);
index 6f4c80b..473f5bb 100644 (file)
@@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
 
 static int mtk_disp_color_remove(struct platform_device *pdev)
 {
+       component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
        return 0;
 }
 
index fa9d799..5326989 100644 (file)
@@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
 
 static int mtk_disp_ovl_remove(struct platform_device *pdev)
 {
+       component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
        return 0;
 }
 
index bced555..e94738f 100644 (file)
@@ -605,11 +605,15 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
                                       struct drm_crtc_state *crtc_state,
                                       struct drm_connector_state *conn_state)
 {
-       struct mtk_dpi *dpi = bridge->driver_private;
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
        unsigned int out_bus_format;
 
        out_bus_format = bridge_state->output_bus_cfg.format;
 
+       if (out_bus_format == MEDIA_BUS_FMT_FIXED)
+               if (dpi->conf->num_output_fmts)
+                       out_bus_format = dpi->conf->output_fmts[0];
+
        dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
                bridge_state->input_bus_cfg.format,
                bridge_state->output_bus_cfg.format);
index 474efb8..735efe7 100644 (file)
@@ -532,13 +532,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
                               struct drm_atomic_state *state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       const struct drm_plane_helper_funcs *plane_helper_funcs =
-                       plane->helper_private;
 
        if (!mtk_crtc->enabled)
                return;
 
-       plane_helper_funcs->atomic_update(plane, state);
        mtk_drm_crtc_update_config(mtk_crtc, false);
 }
 
index 75bc00e..50d2056 100644 (file)
@@ -34,6 +34,7 @@
 
 #define DISP_AAL_EN                            0x0000
 #define DISP_AAL_SIZE                          0x0030
+#define DISP_AAL_OUTPUT_SIZE                   0x04d8
 
 #define DISP_DITHER_EN                         0x0000
 #define DITHER_EN                              BIT(0)
@@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w,
        struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
        mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
+       mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE);
 }
 
 static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
index b5582dc..e6dcb34 100644 (file)
@@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
                                                   true, true);
 }
 
+static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+                                      struct mtk_plane_state *mtk_plane_state)
+{
+       struct drm_framebuffer *fb = new_state->fb;
+       struct drm_gem_object *gem;
+       struct mtk_drm_gem_obj *mtk_gem;
+       unsigned int pitch, format;
+       dma_addr_t addr;
+
+       gem = fb->obj[0];
+       mtk_gem = to_mtk_gem_obj(gem);
+       addr = mtk_gem->dma_addr;
+       pitch = fb->pitches[0];
+       format = fb->format->format;
+
+       addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+       addr += (new_state->src.y1 >> 16) * pitch;
+
+       mtk_plane_state->pending.enable = true;
+       mtk_plane_state->pending.pitch = pitch;
+       mtk_plane_state->pending.format = format;
+       mtk_plane_state->pending.addr = addr;
+       mtk_plane_state->pending.x = new_state->dst.x1;
+       mtk_plane_state->pending.y = new_state->dst.y1;
+       mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
+       mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
+       mtk_plane_state->pending.rotation = new_state->rotation;
+}
+
 static void mtk_plane_atomic_async_update(struct drm_plane *plane,
                                          struct drm_atomic_state *state)
 {
@@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_h = new_state->src_h;
        plane->state->src_w = new_state->src_w;
        swap(plane->state->fb, new_state->fb);
-       new_plane_state->pending.async_dirty = true;
 
+       mtk_plane_update_new_state(new_state, new_plane_state);
+       wmb(); /* Make sure the above parameters are set before update */
+       new_plane_state->pending.async_dirty = true;
        mtk_drm_crtc_async_update(new_state->crtc, plane, state);
 }
 
@@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
                                                                           plane);
        struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
-       struct drm_crtc *crtc = new_state->crtc;
-       struct drm_framebuffer *fb = new_state->fb;
-       struct drm_gem_object *gem;
-       struct mtk_drm_gem_obj *mtk_gem;
-       unsigned int pitch, format;
-       dma_addr_t addr;
 
-       if (!crtc || WARN_ON(!fb))
+       if (!new_state->crtc || WARN_ON(!new_state->fb))
                return;
 
        if (!new_state->visible) {
@@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
                return;
        }
 
-       gem = fb->obj[0];
-       mtk_gem = to_mtk_gem_obj(gem);
-       addr = mtk_gem->dma_addr;
-       pitch = fb->pitches[0];
-       format = fb->format->format;
-
-       addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
-       addr += (new_state->src.y1 >> 16) * pitch;
-
-       mtk_plane_state->pending.enable = true;
-       mtk_plane_state->pending.pitch = pitch;
-       mtk_plane_state->pending.format = format;
-       mtk_plane_state->pending.addr = addr;
-       mtk_plane_state->pending.x = new_state->dst.x1;
-       mtk_plane_state->pending.y = new_state->dst.y1;
-       mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
-       mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
-       mtk_plane_state->pending.rotation = new_state->rotation;
+       mtk_plane_update_new_state(new_state, mtk_plane_state);
        wmb(); /* Make sure the above parameters are set before update */
        mtk_plane_state->pending.dirty = true;
 }
index 446e796..0f3cafa 100644 (file)
 #define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
 #define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
 
+/* osd1 HDR */
+#define OSD1_HDR2_CTRL 0x38a0
+#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN       BIT(13)
+#define OSD1_HDR2_CTRL_REG_ONLY_MAT            BIT(16)
+
 /* osd2 scaler */
 #define OSD2_VSC_PHASE_STEP 0x3d00
 #define OSD2_VSC_INI_PHASE 0x3d01
index aede0c6..259f3e6 100644 (file)
@@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv)
        if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
            meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
                meson_viu_load_matrix(priv);
-       else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+       else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
                meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
                                               true);
+               /* fix green/pink color distortion from vendor u-boot */
+               writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
+                               OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
+                               priv->io_base + _REG(OSD1_HDR2_CTRL));
+       }
 
        /* Initialize OSD1 fifo control register */
        reg = VIU_OSD_DDR_PRIORITY_URGENT |
index 6b0a7bc..b466784 100644 (file)
@@ -45,20 +45,13 @@ static void dpu_mdss_irq(struct irq_desc *desc)
 
        while (interrupts) {
                irq_hw_number_t hwirq = fls(interrupts) - 1;
-               unsigned int mapping;
                int rc;
 
-               mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
-                                          hwirq);
-               if (mapping == 0) {
-                       DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
-                       break;
-               }
-
-               rc = generic_handle_irq(mapping);
+               rc = generic_handle_domain_irq(dpu_mdss->irq_controller.domain,
+                                              hwirq);
                if (rc < 0) {
-                       DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
-                                 hwirq, mapping, rc);
+                       DRM_ERROR("handle irq fail: irq=%lu rc=%d\n",
+                                 hwirq, rc);
                        break;
                }
 
index 09bd46a..2f4895b 100644 (file)
@@ -50,8 +50,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
        while (intr) {
                irq_hw_number_t hwirq = fls(intr) - 1;
 
-               generic_handle_irq(irq_find_mapping(
-                               mdp5_mdss->irqcontroller.domain, hwirq));
+               generic_handle_domain_irq(mdp5_mdss->irqcontroller.domain, hwirq);
                intr &= ~(1 << hwirq);
        }
 
index f949767..bcb0310 100644 (file)
@@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                interlock[NV50_DISP_INTERLOCK_CORE] = 0;
        }
 
+       /* Finish updating head(s)...
+        *
+        * NVD is rather picky about both where window assignments can change,
+        * *and* about certain core and window channel states matching.
+        *
+        * The EFI GOP driver on newer GPUs configures window channels with a
+        * different output format to what we do, and the core channel update
+        * in the assign_windows case above would result in a state mismatch.
+        *
+        * Delay some of the head update until after that point to workaround
+        * the issue.  This only affects the initial modeset.
+        *
+        * TODO: handle this better when adding flexible window mapping
+        */
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+               struct nv50_head *head = nv50_head(crtc);
+
+               NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+                         asyh->set.mask, asyh->clr.mask);
+
+               if (asyh->set.mask) {
+                       nv50_head_flush_set_wndw(head, asyh);
+                       interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+               }
+       }
+
        /* Update plane(s). */
        for_each_new_plane_in_state(state, plane, new_plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
index ec361d1..d66f972 100644 (file)
@@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
 }
 
 void
-nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       if (asyh->set.view   ) head->func->view    (head, asyh);
-       if (asyh->set.mode   ) head->func->mode    (head, asyh);
-       if (asyh->set.core   ) head->func->core_set(head, asyh);
        if (asyh->set.olut   ) {
                asyh->olut.offset = nv50_lut_load(&head->olut,
                                                  asyh->olut.buffer,
@@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
                                                  asyh->olut.load);
                head->func->olut_set(head, asyh);
        }
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       if (asyh->set.view   ) head->func->view    (head, asyh);
+       if (asyh->set.mode   ) head->func->mode    (head, asyh);
+       if (asyh->set.core   ) head->func->core_set(head, asyh);
        if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.base   ) head->func->base    (head, asyh);
        if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
index dae841d..0bac6be 100644 (file)
@@ -21,6 +21,7 @@ struct nv50_head {
 
 struct nv50_head *nv50_head_create(struct drm_device *, int index);
 void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
 void nv50_head_flush_clr(struct nv50_head *head,
                         struct nv50_head_atom *asyh, bool flush);
 
index 0b86c44..59759c4 100644 (file)
@@ -4,7 +4,8 @@
 
 struct nv_device_v0 {
        __u8  version;
-       __u8  pad01[7];
+       __u8  priv;
+       __u8  pad02[6];
        __u64 device;   /* device identifier, ~0 for client default */
 };
 
index ba2c28e..c68cc95 100644 (file)
@@ -61,8 +61,6 @@
 #define NV10_CHANNEL_DMA                              /* cl506b.h */ 0x0000006e
 #define NV17_CHANNEL_DMA                              /* cl506b.h */ 0x0000176e
 #define NV40_CHANNEL_DMA                              /* cl506b.h */ 0x0000406e
-#define NV50_CHANNEL_DMA                              /* cl506e.h */ 0x0000506e
-#define G82_CHANNEL_DMA                               /* cl826e.h */ 0x0000826e
 
 #define NV50_CHANNEL_GPFIFO                           /* cl506f.h */ 0x0000506f
 #define G82_CHANNEL_GPFIFO                            /* cl826f.h */ 0x0000826f
index 347d2c0..5d9395e 100644 (file)
@@ -9,7 +9,6 @@ struct nvif_client {
        const struct nvif_driver *driver;
        u64 version;
        u8 route;
-       bool super;
 };
 
 int  nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
index 8e85b93..7a3af05 100644 (file)
@@ -11,7 +11,7 @@ struct nvif_driver {
        void (*fini)(void *priv);
        int (*suspend)(void *priv);
        int (*resume)(void *priv);
-       int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+       int (*ioctl)(void *priv, void *data, u32 size, void **hack);
        void __iomem *(*map)(void *priv, u64 handle, u32 size);
        void (*unmap)(void *priv, void __iomem *ptr, u32 size);
        bool keep;
index 5d7017f..2f86606 100644 (file)
@@ -13,7 +13,6 @@ struct nvkm_client {
        struct nvkm_client_notify *notify[32];
        struct rb_root objroot;
 
-       bool super;
        void *data;
        int (*ntfy)(const void *, u32, const void *, u32);
 
index 71ed147..f52918a 100644 (file)
@@ -4,5 +4,5 @@
 #include <core/os.h>
 struct nvkm_client;
 
-int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
+int nvkm_ioctl(struct nvkm_client *, void *, u32, void **);
 #endif
index 0911e73..70e7887 100644 (file)
@@ -15,7 +15,6 @@ struct nvkm_vma {
        u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
        bool used:1; /* Region allocated. */
        bool part:1; /* Region was split from an allocated region by map(). */
-       bool user:1; /* Region user-allocated. */
        bool busy:1; /* Region busy (for temporarily preventing user access). */
        bool mapped:1; /* Region contains valid pages. */
        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
index b45ec30..4107b70 100644 (file)
@@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        }
 
        client->route = NVDRM_OBJECT_ABI16;
-       client->super = true;
        ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
                               NV_DMA_IN_MEMORY, &args, sizeof(args),
                               &ntfy->object);
-       client->super = false;
        client->route = NVDRM_OBJECT_NVIF;
        if (ret)
                goto done;
index 4036260..80099ef 100644 (file)
@@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
        struct nouveau_channel *chan = *pchan;
        if (chan) {
                struct nouveau_cli *cli = (void *)chan->user.client;
-               bool super;
-
-               if (cli) {
-                       super = cli->base.super;
-                       cli->base.super = true;
-               }
 
                if (chan->fence)
                        nouveau_fence(chan->drm)->context_del(chan);
@@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                        nouveau_bo_unpin(chan->push.buffer);
                nouveau_bo_ref(NULL, &chan->push.buffer);
                kfree(chan);
-
-               if (cli)
-                       cli->base.super = super;
        }
        *pchan = NULL;
 }
@@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
                    struct nouveau_channel **pchan)
 {
        struct nouveau_cli *cli = (void *)device->object.client;
-       bool super;
        int ret;
 
        /* hack until fencenv50 is fixed, and agp access relaxed */
-       super = cli->base.super;
-       cli->base.super = true;
-
        ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
        if (ret) {
                NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
                ret = nouveau_channel_dma(drm, device, pchan);
                if (ret) {
                        NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
-                       goto done;
+                       return ret;
                }
        }
 
@@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
        if (ret) {
                NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
                nouveau_channel_del(pchan);
-               goto done;
+               return ret;
        }
 
        ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
        if (ret)
                nouveau_channel_del(pchan);
 
-done:
-       cli->base.super = super;
        return ret;
 }
 
index a616cf4..ba4cd5f 100644 (file)
@@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
        ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
                               &(struct nv_device_v0) {
                                        .device = ~0,
+                                       .priv = true,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
@@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
        if (ret)
                goto done;
 
-       cli->base.super = false;
-
        fpriv->driver_priv = cli;
 
        mutex_lock(&drm->client.mutex);
index 0de6549..2ca3207 100644 (file)
@@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem,
                struct gf100_vmm_map_v0 gf100;
        } args;
        u32 argc = 0;
-       bool super;
-       int ret;
 
        switch (vmm->object.oclass) {
        case NVIF_CLASS_VMM_NV04:
@@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem,
                return -ENOSYS;
        }
 
-       super = vmm->object.client->super;
-       vmm->object.client->super = true;
-       ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
-                          &mem->mem, 0);
-       vmm->object.client->super = super;
-       return ret;
+       return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
 }
 
 void
@@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
        struct nouveau_drm *drm = cli->drm;
        struct nvif_mmu *mmu = &cli->mmu;
        struct nvif_mem_ram_v0 args = {};
-       bool super = cli->base.super;
        u8 type;
        int ret;
 
@@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
                args.dma = tt->dma_address;
 
        mutex_lock(&drm->master.lock);
-       cli->base.super = true;
        ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
                                 reg->num_pages << PAGE_SHIFT,
                                 &args, sizeof(args), &mem->mem);
-       cli->base.super = super;
        mutex_unlock(&drm->master.lock);
        return ret;
 }
@@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
        struct nouveau_cli *cli = mem->cli;
        struct nouveau_drm *drm = cli->drm;
        struct nvif_mmu *mmu = &cli->mmu;
-       bool super = cli->base.super;
        u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
        int ret;
 
        mutex_lock(&drm->master.lock);
-       cli->base.super = true;
        switch (cli->mem->oclass) {
        case NVIF_CLASS_MEM_GF100:
                ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
@@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
                WARN_ON(1);
                break;
        }
-       cli->base.super = super;
        mutex_unlock(&drm->master.lock);
 
        reg->start = mem->mem.addr >> PAGE_SHIFT;
index b3f29b1..52f5793 100644 (file)
@@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size)
 }
 
 static int
-nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
 {
-       return nvkm_ioctl(priv, super, data, size, hack);
+       return nvkm_ioctl(priv, data, size, hack);
 }
 
 static int
index 82b583f..b0c3422 100644 (file)
@@ -237,14 +237,11 @@ void
 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
 {
        if (limit > start) {
-               bool super = svmm->vmm->vmm.object.client->super;
-               svmm->vmm->vmm.object.client->super = true;
                nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
                                 &(struct nvif_vmm_pfnclr_v0) {
                                        .addr = start,
                                        .size = limit - start,
                                 }, sizeof(struct nvif_vmm_pfnclr_v0));
-               svmm->vmm->vmm.object.client->super = super;
        }
 }
 
@@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
                NVIF_VMM_PFNMAP_V0_A |
                NVIF_VMM_PFNMAP_V0_HOST;
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
-       svmm->vmm->vmm.object.client->super = false;
        mutex_unlock(&svmm->mutex);
 
        unlock_page(page);
@@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
 
        nouveau_hmm_convert_pfn(drm, &range, args);
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
-       svmm->vmm->vmm.object.client->super = false;
        mutex_unlock(&svmm->mutex);
 
 out:
@@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
 
        mutex_lock(&svmm->mutex);
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
                                npages * sizeof(args->p.phys[0]), NULL);
-       svmm->vmm->vmm.object.client->super = false;
 
        mutex_unlock(&svmm->mutex);
 }
index 9dc10b1..5da1f4d 100644 (file)
@@ -32,6 +32,9 @@
 #include <nvif/event.h>
 #include <nvif/ioctl.h>
 
+#include <nvif/class.h>
+#include <nvif/cl0080.h>
+
 struct usif_notify_p {
        struct drm_pending_event base;
        struct {
@@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object)
 }
 
 static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
 {
        struct nouveau_cli *cli = nouveau_cli(f);
        struct nvif_client *client = &cli->base;
@@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
        struct usif_object *object;
        int ret = -ENOSYS;
 
+       if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
+               return ret;
+
+       switch (args->v0.oclass) {
+       case NV_DMA_FROM_MEMORY:
+       case NV_DMA_TO_MEMORY:
+       case NV_DMA_IN_MEMORY:
+               return -EINVAL;
+       case NV_DEVICE: {
+               union {
+                       struct nv_device_v0 v0;
+               } *args = data;
+
+               if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
+                       return ret;
+
+               args->v0.priv = false;
+               break;
+       }
+       default:
+               if (!parent_abi16)
+                       return -EINVAL;
+               break;
+       }
+
        if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
                return -ENOMEM;
        list_add(&object->head, &cli->objects);
 
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
-               object->route = args->v0.route;
-               object->token = args->v0.token;
-               args->v0.route = NVDRM_OBJECT_USIF;
-               args->v0.token = (unsigned long)(void *)object;
-               ret = nvif_client_ioctl(client, argv, argc);
-               args->v0.token = object->token;
-               args->v0.route = object->route;
+       object->route = args->v0.route;
+       object->token = args->v0.token;
+       args->v0.route = NVDRM_OBJECT_USIF;
+       args->v0.token = (unsigned long)(void *)object;
+       ret = nvif_client_ioctl(client, argv, argc);
+       if (ret) {
+               usif_object_dtor(object);
+               return ret;
        }
 
-       if (ret)
-               usif_object_dtor(object);
-       return ret;
+       args->v0.token = object->token;
+       args->v0.route = object->route;
+       return 0;
 }
 
 int
@@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
                struct nvif_ioctl_v0 v0;
        } *argv = data;
        struct usif_object *object;
+       bool abi16 = false;
        u8 owner;
        int ret;
 
@@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
                        mutex_unlock(&cli->mutex);
                        goto done;
                }
+
+               abi16 = true;
        }
 
        switch (argv->v0.type) {
        case NVIF_IOCTL_V0_NEW:
-               ret = usif_object_new(filp, data, size, argv, argc);
+               ret = usif_object_new(filp, data, size, argv, argc, abi16);
                break;
        case NVIF_IOCTL_V0_NTFY_NEW:
                ret = usif_notify_new(filp, data, size, argv, argc);
index 12644f8..a3264a0 100644 (file)
@@ -32,7 +32,7 @@
 int
 nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
 {
-       return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
+       return client->driver->ioctl(client->object.priv, data, size, NULL);
 }
 
 int
@@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
        client->object.client = client;
        client->object.handle = ~0;
        client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
-       client->super = true;
        client->driver = parent->driver;
 
        if (ret == 0) {
index 671a5c0..dce1ece 100644 (file)
@@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
        } else
                return -ENOSYS;
 
-       return client->driver->ioctl(client->object.priv, client->super,
-                                    data, size, hack);
+       return client->driver->ioctl(client->object.priv, data, size, hack);
 }
 
 void
index d777df5..735cb68 100644 (file)
@@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
 }
 
 int
-nvkm_ioctl(struct nvkm_client *client, bool supervisor,
-          void *data, u32 size, void **hack)
+nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
 {
        struct nvkm_object *object = &client->object;
        union {
@@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
        } *args = data;
        int ret = -ENOSYS;
 
-       client->super = supervisor;
        nvif_ioctl(object, "size %d\n", size);
 
        if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
index b930f53..93ddf63 100644 (file)
@@ -2624,6 +2624,26 @@ nv174_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
 };
 
+static const struct nvkm_device_chip
+nv177_chipset = {
+       .name = "GA107",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gpio     = { 0x00000001, ga102_gpio_new },
+       .i2c      = { 0x00000001, gm200_i2c_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mc       = { 0x00000001, ga100_mc_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .privring = { 0x00000001, gm200_privring_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .top      = { 0x00000001, ga100_top_new },
+       .disp     = { 0x00000001, ga102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x168: device->chip = &nv168_chipset; break;
                case 0x172: device->chip = &nv172_chipset; break;
                case 0x174: device->chip = &nv174_chipset; break;
+               case 0x177: device->chip = &nv177_chipset; break;
                default:
                        if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
                                switch (device->chipset) {
index fea9d8f..f28894f 100644 (file)
@@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
                return ret;
 
        /* give priviledged clients register access */
-       if (client->super)
+       if (args->v0.priv)
                func = &nvkm_udevice_super;
        else
                func = &nvkm_udevice;
index 55fbfe2..9669472 100644 (file)
@@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
        return ret;
 }
 
-static void
+void
 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
        struct nvkm_dp *dp = nvkm_dp(outp);
index 428b3f4..e484d0c 100644 (file)
@@ -32,6 +32,7 @@ struct nvkm_dp {
 
 int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
                struct nvkm_outp **);
+void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
 
 /* DPCD Receiver Capabilities */
 #define DPCD_RC00_DPCD_REV                                              0x00000
index dffcac2..129982f 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "outp.h"
+#include "dp.h"
 #include "ior.h"
 
 #include <subdev/bios.h>
@@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
        if (!ior->arm.head || ior->arm.proto != proto) {
                OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
                         ior->arm.proto, proto);
+
+               /* The EFI GOP driver on Ampere can leave unused DP links routed,
+                * which we don't expect.  The DisableLT IED script *should* get
+                * us back to where we need to be.
+                */
+               if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
+                       nvkm_dp_disable(outp, ior);
+
                return;
        }
 
index d20cc06..797131e 100644 (file)
@@ -26,7 +26,6 @@
 #include <core/client.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
-#include <subdev/instmem.h>
 
 #include <nvif/cl0002.h>
 #include <nvif/unpack.h>
@@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
        union {
                struct nv_dma_v0 v0;
        } *args = *pdata;
-       struct nvkm_device *device = dma->engine.subdev.device;
-       struct nvkm_client *client = oclass->client;
        struct nvkm_object *parent = oclass->parent;
-       struct nvkm_instmem *instmem = device->imem;
-       struct nvkm_fb *fb = device->fb;
        void *data = *pdata;
        u32 size = *psize;
        int ret = -ENOSYS;
@@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
                dmaobj->target = NV_MEM_TARGET_VM;
                break;
        case NV_DMA_V0_TARGET_VRAM:
-               if (!client->super) {
-                       if (dmaobj->limit >= fb->ram->size - instmem->reserved)
-                               return -EACCES;
-                       if (device->card_type >= NV_50)
-                               return -EACCES;
-               }
                dmaobj->target = NV_MEM_TARGET_VRAM;
                break;
        case NV_DMA_V0_TARGET_PCI:
-               if (!client->super)
-                       return -EACCES;
                dmaobj->target = NV_MEM_TARGET_PCI;
                break;
        case NV_DMA_V0_TARGET_PCI_US:
        case NV_DMA_V0_TARGET_AGP:
-               if (!client->super)
-                       return -EACCES;
                dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
                break;
        default:
index 90e9a09..3209eb7 100644 (file)
@@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o
 nvkm-y += nvkm/engine/fifo/dmanv10.o
 nvkm-y += nvkm/engine/fifo/dmanv17.o
 nvkm-y += nvkm/engine/fifo/dmanv40.o
-nvkm-y += nvkm/engine/fifo/dmanv50.o
-nvkm-y += nvkm/engine/fifo/dmag84.o
 
 nvkm-y += nvkm/engine/fifo/gpfifonv50.o
 nvkm-y += nvkm/engine/fifo/gpfifog84.o
index af8bdf2..3a95730 100644 (file)
@@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
 int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
                       const struct nvkm_oclass *, struct nv50_fifo_chan *);
 
-extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
 extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
 extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
deleted file mode 100644 (file)
index fc34cdd..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl826e.h>
-#include <nvif/unpack.h>
-
-static int
-g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
-                void *data, u32 size, struct nvkm_object **pobject)
-{
-       struct nvkm_object *parent = oclass->parent;
-       union {
-               struct g82_channel_dma_v0 v0;
-       } *args = data;
-       struct nv50_fifo *fifo = nv50_fifo(base);
-       struct nv50_fifo_chan *chan;
-       int ret = -ENOSYS;
-
-       nvif_ioctl(parent, "create channel dma size %d\n", size);
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-               nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
-                                  "pushbuf %llx offset %016llx\n",
-                          args->v0.version, args->v0.vmm, args->v0.pushbuf,
-                          args->v0.offset);
-               if (!args->v0.pushbuf)
-                       return -EINVAL;
-       } else
-               return ret;
-
-       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-               return -ENOMEM;
-       *pobject = &chan->base.object;
-
-       ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
-                                oclass, chan);
-       if (ret)
-               return ret;
-
-       args->v0.chid = chan->base.chid;
-
-       nvkm_kmap(chan->ramfc);
-       nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
-       nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
-       nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
-       nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
-       nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
-       nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
-       nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
-       nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-                                    (4 << 24) /* SEARCH_FULL */ |
-                                    (chan->ramht->gpuobj->node->offset >> 4));
-       nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
-       nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
-       nvkm_done(chan->ramfc);
-       return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-g84_fifo_dma_oclass = {
-       .base.oclass = G82_CHANNEL_DMA,
-       .base.minver = 0,
-       .base.maxver = 0,
-       .ctor = g84_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
deleted file mode 100644 (file)
index 8043718..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl506e.h>
-#include <nvif/unpack.h>
-
-static int
-nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
-                 void *data, u32 size, struct nvkm_object **pobject)
-{
-       struct nvkm_object *parent = oclass->parent;
-       union {
-               struct nv50_channel_dma_v0 v0;
-       } *args = data;
-       struct nv50_fifo *fifo = nv50_fifo(base);
-       struct nv50_fifo_chan *chan;
-       int ret = -ENOSYS;
-
-       nvif_ioctl(parent, "create channel dma size %d\n", size);
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-               nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
-                                  "pushbuf %llx offset %016llx\n",
-                          args->v0.version, args->v0.vmm, args->v0.pushbuf,
-                          args->v0.offset);
-               if (!args->v0.pushbuf)
-                       return -EINVAL;
-       } else
-               return ret;
-
-       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-               return -ENOMEM;
-       *pobject = &chan->base.object;
-
-       ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
-                                 oclass, chan);
-       if (ret)
-               return ret;
-
-       args->v0.chid = chan->base.chid;
-
-       nvkm_kmap(chan->ramfc);
-       nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
-       nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
-       nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
-       nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
-       nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
-       nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
-       nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
-       nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-                                    (4 << 24) /* SEARCH_FULL */ |
-                                    (chan->ramht->gpuobj->node->offset >> 4));
-       nvkm_done(chan->ramfc);
-       return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-nv50_fifo_dma_oclass = {
-       .base.oclass = NV50_CHANNEL_DMA,
-       .base.minver = 0,
-       .base.maxver = 0,
-       .ctor = nv50_fifo_dma_new,
-};
index c0a7d0f..3885c38 100644 (file)
@@ -119,7 +119,6 @@ g84_fifo = {
        .uevent_init = g84_fifo_uevent_init,
        .uevent_fini = g84_fifo_uevent_fini,
        .chan = {
-               &g84_fifo_dma_oclass,
                &g84_fifo_gpfifo_oclass,
                NULL
        },
index b6900a5..ae6c4d8 100644 (file)
@@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gk104_fifo_gpfifo_new_(fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index ee4967b..743791c 100644 (file)
@@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index abef7fb..99aafa1 100644 (file)
@@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index be94156..a08742c 100644 (file)
@@ -136,7 +136,6 @@ nv50_fifo = {
        .pause = nv04_fifo_pause,
        .start = nv04_fifo_start,
        .chan = {
-               &nv50_fifo_dma_oclass,
                &nv50_fifo_gpfifo_oclass,
                NULL
        },
index fac2f9a..e530bb8 100644 (file)
@@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
 
        object = nvkm_object_search(client, handle, &nvkm_umem);
        if (IS_ERR(object)) {
-               if (client->super && client != master) {
+               if (client != master) {
                        spin_lock(&master->lock);
                        list_for_each_entry(umem, &master->umem, head) {
                                if (umem->object.object == handle) {
@@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
                }
        } else {
                umem = nvkm_umem(object);
-               if (!umem->priv || client->super)
-                       memory = nvkm_memory_ref(umem->memory);
+               memory = nvkm_memory_ref(umem->memory);
        }
 
        return memory ? memory : ERR_PTR(-ENOENT);
@@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
        nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
        umem->mmu = mmu;
        umem->type = mmu->type[type].type;
-       umem->priv = oclass->client->super;
        INIT_LIST_HEAD(&umem->head);
        *pobject = &umem->object;
 
index 85cf692..d56a594 100644 (file)
@@ -8,7 +8,6 @@ struct nvkm_umem {
        struct nvkm_object object;
        struct nvkm_mmu *mmu;
        u8 type:8;
-       bool priv:1;
        bool mappable:1;
        bool io:1;
 
index 0e4b894..6870fda 100644 (file)
@@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
 {
        struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
 
-       if (mmu->func->mem.user.oclass && oclass->client->super) {
+       if (mmu->func->mem.user.oclass) {
                if (index-- == 0) {
                        oclass->base = mmu->func->mem.user;
                        oclass->ctor = nvkm_umem_new;
index c43b824..d6a1f8d 100644 (file)
@@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
 static int
 nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_pfnclr_v0 v0;
        } *args = argv;
@@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       if (!client->super)
-               return -ENOENT;
-
        if (size) {
                mutex_lock(&vmm->mutex);
                ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
@@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 static int
 nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_pfnmap_v0 v0;
        } *args = argv;
@@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       if (!client->super)
-               return -ENOENT;
-
        if (size) {
                mutex_lock(&vmm->mutex);
                ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
@@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 static int
 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_unmap_v0 v0;
        } *args = argv;
@@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto done;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto done;
        }
 
@@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto fail;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto fail;
        }
 
@@ -230,7 +219,6 @@ fail:
 static int
 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_put_v0 v0;
        } *args = argv;
@@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto done;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto done;
        }
 
@@ -268,7 +255,6 @@ done:
 static int
 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_get_v0 v0;
        } *args = argv;
@@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                return ret;
 
        args->v0.addr = vma->addr;
-       vma->user = !client->super;
        return ret;
 }
 
index 710f3f8..8bf00b3 100644 (file)
@@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
        new->refd = vma->refd;
        new->used = vma->used;
        new->part = vma->part;
-       new->user = vma->user;
        new->busy = vma->busy;
        new->mapped = vma->mapped;
        list_add(&new->head, &vma->head);
@@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
 static void
 nvkm_vma_dump(struct nvkm_vma *vma)
 {
-       printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+       printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
               vma->addr, (u64)vma->size,
               vma->used ? '-' : 'F',
               vma->mapref ? 'R' : '-',
@@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma)
               vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
               vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
               vma->part ? 'P' : '-',
-              vma->user ? 'U' : '-',
               vma->busy ? 'B' : '-',
               vma->mapped ? 'M' : '-',
               vma->memory);
@@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
        vma->mapref = true;
        vma->sparse = false;
        vma->used = true;
-       vma->user = true;
        nvkm_vmm_node_insert(vmm, vma);
        list_add_tail(&vma->head, &vmm->list);
        return 0;
@@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
        vma->page = NVKM_VMA_PAGE_NONE;
        vma->refd = NVKM_VMA_PAGE_NONE;
        vma->used = false;
-       vma->user = false;
        nvkm_vmm_put_region(vmm, vma);
 }
 
index f02abd9..b5e7337 100644 (file)
@@ -534,15 +534,13 @@ int
 gp100_vmm_mthd(struct nvkm_vmm *vmm,
               struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
 {
-       if (client->super) {
-               switch (mthd) {
-               case GP100_VMM_VN_FAULT_REPLAY:
-                       return gp100_vmm_fault_replay(vmm, argv, argc);
-               case GP100_VMM_VN_FAULT_CANCEL:
-                       return gp100_vmm_fault_cancel(vmm, argv, argc);
-               default:
-                       break;
-               }
+       switch (mthd) {
+       case GP100_VMM_VN_FAULT_REPLAY:
+               return gp100_vmm_fault_replay(vmm, argv, argc);
+       case GP100_VMM_VN_FAULT_CANCEL:
+               return gp100_vmm_fault_cancel(vmm, argv, argc);
+       default:
+               break;
        }
        return -EINVAL;
 }
index 74e3b46..2df59b3 100644 (file)
@@ -78,9 +78,7 @@ static int ttm_global_init(void)
 
        ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
        if (IS_ERR(ttm_debugfs_root)) {
-               ret = PTR_ERR(ttm_debugfs_root);
                ttm_debugfs_root = NULL;
-               goto out;
        }
 
        /* Limit the number of pages in the pool to about 50% of the total
index d1cef3b..5652d98 100644 (file)
@@ -492,7 +492,7 @@ struct vmw_private {
        resource_size_t vram_start;
        resource_size_t vram_size;
        resource_size_t prim_bb_mem;
-       void __iomem *rmmio;
+       u32 __iomem *rmmio;
        u32 *fifo_mem;
        resource_size_t fifo_mem_size;
        uint32_t fb_max_width;
index d166ee2..1183185 100644 (file)
@@ -1003,19 +1003,16 @@ err_cpmem:
 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
 {
        unsigned long status;
-       int i, bit, irq;
+       int i, bit;
 
        for (i = 0; i < num_regs; i++) {
 
                status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
                status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
 
-               for_each_set_bit(bit, &status, 32) {
-                       irq = irq_linear_revmap(ipu->domain,
-                                               regs[i] * 32 + bit);
-                       if (irq)
-                               generic_handle_irq(irq);
-               }
+               for_each_set_bit(bit, &status, 32)
+                       generic_handle_domain_irq(ipu->domain,
+                                                 regs[i] * 32 + bit);
        }
 }
 
index a1c85d1..82b244c 100644 (file)
@@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = {
        .bits_per_pixel = 16,
 };
 
-#define Y_OFFSET(pix, x, y)    ((x) + pix->width * (y))
-#define U_OFFSET(pix, x, y)    ((pix->width * pix->height) +           \
-                                (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define V_OFFSET(pix, x, y)    ((pix->width * pix->height) +           \
-                                (pix->width * pix->height / 4) +       \
-                                (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define U2_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
-                                (pix->width * (y) / 2) + (x) / 2)
-#define V2_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
-                                (pix->width * pix->height / 2) +       \
-                                (pix->width * (y) / 2) + (x) / 2)
-#define UV_OFFSET(pix, x, y)   ((pix->width * pix->height) +   \
-                                (pix->width * ((y) / 2)) + (x))
-#define UV2_OFFSET(pix, x, y)  ((pix->width * pix->height) +   \
-                                (pix->width * y) + (x))
+#define Y_OFFSET(pix, x, y)    ((x) + pix->bytesperline * (y))
+#define U_OFFSET(pix, x, y)    ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define V_OFFSET(pix, x, y)    ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * pix->height / 4) + \
+                                (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define U2_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * (y) / 2) + (x) / 2)
+#define V2_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * pix->height / 2) + \
+                                (pix->bytesperline * (y) / 2) + (x) / 2)
+#define UV_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * ((y) / 2)) + (x))
+#define UV2_OFFSET(pix, x, y)  ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * y) + (x))
 
 #define NUM_ALPHA_CHANNELS     7
 
index 10acece..e17790f 100644 (file)
@@ -866,15 +866,6 @@ config I2C_PCA_PLATFORM
          This driver can also be built as a module.  If so, the module
          will be called i2c-pca-platform.
 
-config I2C_PMCMSP
-       tristate "PMC MSP I2C TWI Controller"
-       depends on PMC_MSP || COMPILE_TEST
-       help
-         This driver supports the PMC TWI controller on MSP devices.
-
-         This driver can also be built as module. If so, the module
-         will be called i2c-pmcmsp.
-
 config I2C_PNX
        tristate "I2C bus support for Philips PNX and NXP LPC targets"
        depends on ARCH_LPC32XX || COMPILE_TEST
@@ -1402,4 +1393,15 @@ config I2C_FSI
          This driver can also be built as a module. If so, the module will be
          called as i2c-fsi.
 
+config I2C_VIRTIO
+        tristate "Virtio I2C Adapter"
+        select VIRTIO
+        help
+          If you say yes to this option, support will be included for the virtio
+          I2C adapter driver. The hardware can be emulated by any device model
+          software according to the virtio protocol.
+
+          This driver can also be built as a module. If so, the module
+          will be called i2c-virtio.
+
 endmenu
index 69e9963..1336b04 100644 (file)
@@ -86,7 +86,6 @@ obj-$(CONFIG_I2C_OMAP)                += i2c-omap.o
 obj-$(CONFIG_I2C_OWL)          += i2c-owl.o
 obj-$(CONFIG_I2C_PASEMI)       += i2c-pasemi.o
 obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
-obj-$(CONFIG_I2C_PMCMSP)       += i2c-pmcmsp.o
 obj-$(CONFIG_I2C_PNX)          += i2c-pnx.o
 obj-$(CONFIG_I2C_PXA)          += i2c-pxa.o
 obj-$(CONFIG_I2C_PXA_PCI)      += i2c-pxa-pci.o
@@ -146,5 +145,6 @@ obj-$(CONFIG_I2C_SIBYTE)    += i2c-sibyte.o
 obj-$(CONFIG_I2C_XGENE_SLIMPRO) += i2c-xgene-slimpro.o
 obj-$(CONFIG_SCx200_ACB)       += scx200_acb.o
 obj-$(CONFIG_I2C_FSI)          += i2c-fsi.o
+obj-$(CONFIG_I2C_VIRTIO)       += i2c-virtio.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
index e14edd2..2df9df5 100644 (file)
@@ -286,9 +286,7 @@ static int at91_twi_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-
-static int at91_twi_runtime_suspend(struct device *dev)
+static int __maybe_unused at91_twi_runtime_suspend(struct device *dev)
 {
        struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
 
@@ -299,7 +297,7 @@ static int at91_twi_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int at91_twi_runtime_resume(struct device *dev)
+static int __maybe_unused at91_twi_runtime_resume(struct device *dev)
 {
        struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
 
@@ -308,7 +306,7 @@ static int at91_twi_runtime_resume(struct device *dev)
        return clk_prepare_enable(twi_dev->clk);
 }
 
-static int at91_twi_suspend_noirq(struct device *dev)
+static int __maybe_unused at91_twi_suspend_noirq(struct device *dev)
 {
        if (!pm_runtime_status_suspended(dev))
                at91_twi_runtime_suspend(dev);
@@ -316,7 +314,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
        return 0;
 }
 
-static int at91_twi_resume_noirq(struct device *dev)
+static int __maybe_unused at91_twi_resume_noirq(struct device *dev)
 {
        struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
        int ret;
@@ -335,18 +333,13 @@ static int at91_twi_resume_noirq(struct device *dev)
        return 0;
 }
 
-static const struct dev_pm_ops at91_twi_pm = {
+static const struct dev_pm_ops __maybe_unused at91_twi_pm = {
        .suspend_noirq  = at91_twi_suspend_noirq,
        .resume_noirq   = at91_twi_resume_noirq,
        .runtime_suspend        = at91_twi_runtime_suspend,
        .runtime_resume         = at91_twi_runtime_resume,
 };
 
-#define at91_twi_pm_ops (&at91_twi_pm)
-#else
-#define at91_twi_pm_ops NULL
-#endif
-
 static struct platform_driver at91_twi_driver = {
        .probe          = at91_twi_probe,
        .remove         = at91_twi_remove,
@@ -354,7 +347,7 @@ static struct platform_driver at91_twi_driver = {
        .driver         = {
                .name   = "at91_i2c",
                .of_match_table = of_match_ptr(atmel_twi_dt_ids),
-               .pm     = at91_twi_pm_ops,
+               .pm     = pm_ptr(&at91_twi_pm),
        },
 };
 
index 1cceb68..b0eae94 100644 (file)
@@ -138,9 +138,9 @@ static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
 
        if (dma->xfer_in_progress) {
                if (dma->direction == DMA_FROM_DEVICE)
-                       dmaengine_terminate_all(dma->chan_rx);
+                       dmaengine_terminate_sync(dma->chan_rx);
                else
-                       dmaengine_terminate_all(dma->chan_tx);
+                       dmaengine_terminate_sync(dma->chan_tx);
                dma->xfer_in_progress = false;
        }
        if (dma->buf_mapped) {
index cceaf69..6304d1d 100644 (file)
@@ -1224,14 +1224,14 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
 
        disable_irq(iproc_i2c->irq);
 
+       tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
        /* disable all slave interrupts */
        tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
        tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
                        IE_S_ALL_INTERRUPT_SHIFT);
        iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
 
-       tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
        /* Erase the slave address programmed */
        tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
        tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
index 20aa339..805c771 100644 (file)
@@ -178,6 +178,7 @@ enum cdns_i2c_slave_state {
  * @clk:               Pointer to struct clk
  * @clk_rate_change_nb:        Notifier block for clock rate changes
  * @quirks:            flag for broken hold bit usage in r1p10
+ * @ctrl_reg:          Cached value of the control register.
  * @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register
  * @slave:             Registered slave instance.
  * @dev_mode:          I2C operating role(master/slave).
@@ -202,6 +203,7 @@ struct cdns_i2c {
        struct clk *clk;
        struct notifier_block clk_rate_change_nb;
        u32 quirks;
+       u32 ctrl_reg;
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
        u16 ctrl_reg_diva_divb;
        struct i2c_client *slave;
@@ -1071,10 +1073,11 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
        if (ret)
                return ret;
 
-       ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+       ctrl_reg = id->ctrl_reg;
        ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK);
        ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) |
                        (div_b << CDNS_I2C_CR_DIVB_SHIFT));
+       id->ctrl_reg = ctrl_reg;
        cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
        id->ctrl_reg_diva_divb = ctrl_reg & (CDNS_I2C_CR_DIVA_MASK |
@@ -1162,6 +1165,26 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
        return 0;
 }
 
+/**
+ * cdns_i2c_init -  Controller initialisation
+ * @id:                Device private data structure
+ *
+ * Initialise the i2c controller.
+ *
+ */
+static void cdns_i2c_init(struct cdns_i2c *id)
+{
+       cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+       /*
+        * Cadence I2C controller has a bug wherein it generates
+        * invalid read transaction after HW timeout in master receiver mode.
+        * HW timeout is not used by this driver and the interrupt is disabled.
+        * But the feature itself cannot be disabled. Hence maximum value
+        * is written to this register to reduce the chances of error.
+        */
+       cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+}
+
 /**
  * cdns_i2c_runtime_resume - Runtime resume
  * @dev:       Address of the platform_device structure
@@ -1180,6 +1203,7 @@ static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
                dev_err(dev, "Cannot enable clock.\n");
                return ret;
        }
+       cdns_i2c_init(xi2c);
 
        return 0;
 }
@@ -1279,7 +1303,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
        id->dev_mode = CDNS_I2C_MODE_MASTER;
        id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
 #endif
-       cdns_i2c_writereg(CDNS_I2C_CR_MASTER_EN_MASK, CDNS_I2C_CR_OFFSET);
+       id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS;
 
        ret = cdns_i2c_setclk(id->input_clk, id);
        if (ret) {
@@ -1294,15 +1318,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
                goto err_clk_dis;
        }
-
-       /*
-        * Cadence I2C controller has a bug wherein it generates
-        * invalid read transaction after HW timeout in master receiver mode.
-        * HW timeout is not used by this driver and the interrupt is disabled.
-        * But the feature itself cannot be disabled. Hence maximum value
-        * is written to this register to reduce the chances of error.
-        */
-       cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+       cdns_i2c_init(id);
 
        ret = i2c_add_adapter(&id->adap);
        if (ret < 0)
index fdc34d9..bf2a492 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/regmap.h>
 #include <linux/swab.h>
 #include <linux/types.h>
+#include <linux/units.h>
 
 #include "i2c-designware-core.h"
 
@@ -350,7 +351,7 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
                 *
                 * If your hardware is free from tHD;STA issue, try this one.
                 */
-               return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset;
+               return DIV_ROUND_CLOSEST(ic_clk * tSYMBOL, MICRO) - 8 + offset;
        else
                /*
                 * Conditional expression:
@@ -366,8 +367,7 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
                 * The reason why we need to take into account "tf" here,
                 * is the same as described in i2c_dw_scl_lcnt().
                 */
-               return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000
-                       - 3 + offset;
+               return DIV_ROUND_CLOSEST(ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset;
 }
 
 u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
@@ -383,7 +383,7 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
         * account the fall time of SCL signal (tf).  Default tf value
         * should be 0.3 us, for safety.
         */
-       return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset;
+       return DIV_ROUND_CLOSEST(ic_clk * (tLOW + tf), MICRO) - 1 + offset;
 }
 
 int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
index 6a53f75..60a2e75 100644 (file)
 
 #define DW_IC_ERR_TX_ABRT      0x1
 
-#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+#define DW_IC_TAR_10BITADDR_MASTER     BIT(12)
 
 #define DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH     (BIT(2) | BIT(3))
 #define DW_IC_COMP_PARAM_1_SPEED_MODE_MASK     GENMASK(3, 2)
@@ -245,7 +245,7 @@ struct dw_i2c_dev {
        struct clk              *clk;
        struct clk              *pclk;
        struct reset_control    *rst;
-       struct i2c_client               *slave;
+       struct i2c_client       *slave;
        u32                     (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
        int                     cmd_err;
        struct i2c_msg          *msgs;
index 4b37f28..2111366 100644 (file)
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
+#include <linux/units.h>
 
 #include "i2c-designware-core.h"
 
 static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
 {
-       return clk_get_rate(dev->clk)/1000;
+       return clk_get_rate(dev->clk) / KILO;
 }
 
 #ifdef CONFIG_ACPI
@@ -270,7 +271,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 
                if (!dev->sda_hold_time && t->sda_hold_ns)
                        dev->sda_hold_time =
-                               div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
+                               DIV_S64_ROUND_CLOSEST(clk_khz * t->sda_hold_ns, MICRO);
        }
 
        adap = &dev->adapter;
index 803dad7..a2add12 100644 (file)
@@ -379,7 +379,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dev);
 
        dev->irq = platform_get_irq(pdev, 0);
-       if (iic_force_poll)
+       if (dev->irq < 0 || iic_force_poll)
                dev->irq = 0;
 
        if (dev->irq) {
index aa00ba8..61ae58f 100644 (file)
@@ -413,7 +413,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
                return PTR_ERR(priv->regs);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq <= 0)
+       if (irq < 0)
                return irq;
 
        priv->clk = devm_clk_get(&pdev->dev, NULL);
index aa3f60e..89ae78e 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/platform_data/itco_wdt.h>
 #include <linux/pm_runtime.h>
+#include <linux/mutex.h>
 
 #if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
 #include <linux/gpio/machine.h>
@@ -503,19 +504,16 @@ static int i801_transaction(struct i801_priv *priv, int xact)
 
 static int i801_block_transaction_by_block(struct i801_priv *priv,
                                           union i2c_smbus_data *data,
-                                          char read_write, int command,
-                                          int hwpec)
+                                          char read_write, int command)
 {
-       int i, len;
-       int status;
-       int xact = hwpec ? SMBHSTCNT_PEC_EN : 0;
+       int i, len, status, xact;
 
        switch (command) {
        case I2C_SMBUS_BLOCK_PROC_CALL:
-               xact |= I801_BLOCK_PROC_CALL;
+               xact = I801_BLOCK_PROC_CALL;
                break;
        case I2C_SMBUS_BLOCK_DATA:
-               xact |= I801_BLOCK_DATA;
+               xact = I801_BLOCK_DATA;
                break;
        default:
                return -EOPNOTSUPP;
@@ -561,10 +559,6 @@ static void i801_isr_byte_done(struct i801_priv *priv)
                                        priv->len);
                                /* FIXME: Recover */
                                priv->len = I2C_SMBUS_BLOCK_MAX;
-                       } else {
-                               dev_dbg(&priv->pci_dev->dev,
-                                       "SMBus block read size is %d\n",
-                                       priv->len);
                        }
                        priv->data[-1] = priv->len;
                }
@@ -665,8 +659,7 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
  */
 static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
                                               union i2c_smbus_data *data,
-                                              char read_write, int command,
-                                              int hwpec)
+                                              char read_write, int command)
 {
        int i, len;
        int smbcmd;
@@ -764,9 +757,8 @@ static int i801_set_block_buffer_mode(struct i801_priv *priv)
 }
 
 /* Block transaction function */
-static int i801_block_transaction(struct i801_priv *priv,
-                                 union i2c_smbus_data *data, char read_write,
-                                 int command, int hwpec)
+static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
+                                 char read_write, int command)
 {
        int result = 0;
        unsigned char hostc;
@@ -802,11 +794,11 @@ static int i801_block_transaction(struct i801_priv *priv,
         && i801_set_block_buffer_mode(priv) == 0)
                result = i801_block_transaction_by_block(priv, data,
                                                         read_write,
-                                                        command, hwpec);
+                                                        command);
        else
                result = i801_block_transaction_byte_by_byte(priv, data,
                                                             read_write,
-                                                            command, hwpec);
+                                                            command);
 
        if (command == I2C_SMBUS_I2C_BLOCK_DATA
         && read_write == I2C_SMBUS_WRITE) {
@@ -917,8 +909,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
                       SMBAUXCTL(priv));
 
        if (block)
-               ret = i801_block_transaction(priv, data, read_write, size,
-                                            hwpec);
+               ret = i801_block_transaction(priv, data, read_write, size);
        else
                ret = i801_transaction(priv, xact);
 
@@ -1498,12 +1489,11 @@ static const struct itco_wdt_platform_data spt_tco_platform_data = {
        .version = 4,
 };
 
-static DEFINE_SPINLOCK(p2sb_spinlock);
-
 static struct platform_device *
 i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
                 struct resource *tco_res)
 {
+       static DEFINE_MUTEX(p2sb_mutex);
        struct resource *res;
        unsigned int devfn;
        u64 base64_addr;
@@ -1516,7 +1506,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
         * enumerated by the PCI subsystem, so we need to unhide/hide it
         * to lookup the P2SB BAR.
         */
-       spin_lock(&p2sb_spinlock);
+       mutex_lock(&p2sb_mutex);
 
        devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
 
@@ -1534,7 +1524,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
        /* Hide the P2SB device, if it was hidden before */
        if (hidden)
                pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
-       spin_unlock(&p2sb_spinlock);
+       mutex_unlock(&p2sb_mutex);
 
        res = &tco_res[1];
        if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
@@ -1634,7 +1624,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
                 * BIOS is accessing the host controller so prevent it from
                 * suspending automatically from now on.
                 */
-               pm_runtime_get_sync(&pdev->dev);
+               pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
        }
 
        if ((function & ACPI_IO_MASK) == ACPI_READ)
@@ -1674,11 +1664,6 @@ static void i801_acpi_remove(struct i801_priv *priv)
 
        acpi_remove_address_space_handler(adev->handle,
                ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
-
-       mutex_lock(&priv->acpi_lock);
-       if (priv->acpi_reserved)
-               pm_runtime_put(&priv->pci_dev->dev);
-       mutex_unlock(&priv->acpi_lock);
 }
 #else
 static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
@@ -1690,6 +1675,7 @@ static void i801_setup_hstcfg(struct i801_priv *priv)
        unsigned char hstcfg = priv->original_hstcfg;
 
        hstcfg &= ~SMBHSTCFG_I2C_EN;    /* SMBus timing */
+       hstcfg &= ~SMBHSTCNT_PEC_EN;    /* Disable software PEC */
        hstcfg |= SMBHSTCFG_HST_EN;
        pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
 }
index d5b5f08..3576b63 100644 (file)
@@ -423,7 +423,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
        return 0;
 
 err_submit:
-       dmaengine_terminate_all(dma->chan_using);
+       dmaengine_terminate_sync(dma->chan_using);
 err_desc:
        dma_unmap_single(chan_dev, dma->dma_buf,
                        dma->dma_len, dma->dma_data_dir);
@@ -894,7 +894,7 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
        if (time_left == 0) {
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_sync(dma->chan_using);
                return -ETIMEDOUT;
        }
 
@@ -949,7 +949,7 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
        if (time_left == 0) {
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_sync(dma->chan_using);
                return -ETIMEDOUT;
        }
 
index cfecaf1..4a6ff54 100644 (file)
@@ -469,16 +469,14 @@ iop3xx_i2c_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               ret = -ENXIO;
+               ret = irq;
                goto unmap;
        }
        ret = request_irq(irq, iop3xx_i2c_irq_handler, 0,
                                pdev->name, adapter_data);
 
-       if (ret) {
-               ret = -EIO;
+       if (ret)
                goto unmap;
-       }
 
        memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
        new_adapter->owner = THIS_MODULE;
index 4ca716e..477480d 100644 (file)
@@ -1211,7 +1211,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
                return PTR_ERR(i2c->pdmabase);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq <= 0)
+       if (irq < 0)
                return irq;
 
        init_completion(&i2c->msg_complete);
index f97243f..864a3f1 100644 (file)
@@ -290,14 +290,14 @@ read_init_dma_fail:
 select_init_dma_fail:
        dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
 select_init_pio_fail:
-       dmaengine_terminate_all(i2c->dmach);
+       dmaengine_terminate_sync(i2c->dmach);
        return -EINVAL;
 
 /* Write failpath. */
 write_init_dma_fail:
        dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
 write_init_pio_fail:
-       dmaengine_terminate_all(i2c->dmach);
+       dmaengine_terminate_sync(i2c->dmach);
        return -EINVAL;
 }
 
index a535889..231145c 100644 (file)
@@ -267,6 +267,16 @@ static void i2c_parport_attach(struct parport *port)
        int i;
        struct pardev_cb i2c_parport_cb;
 
+       if (type < 0) {
+               pr_warn("adapter type unspecified\n");
+               return;
+       }
+
+       if (type >= ARRAY_SIZE(adapter_parm)) {
+               pr_warn("invalid type (%d)\n", type);
+               return;
+       }
+
        for (i = 0; i < MAX_DEVICE; i++) {
                if (parport[i] == -1)
                        continue;
@@ -392,32 +402,8 @@ static struct parport_driver i2c_parport_driver = {
        .detach = i2c_parport_detach,
        .devmodel = true,
 };
-
-/* ----- Module loading, unloading and information ------------------------ */
-
-static int __init i2c_parport_init(void)
-{
-       if (type < 0) {
-               pr_warn("adapter type unspecified\n");
-               return -ENODEV;
-       }
-
-       if (type >= ARRAY_SIZE(adapter_parm)) {
-               pr_warn("invalid type (%d)\n", type);
-               return -ENODEV;
-       }
-
-       return parport_register_driver(&i2c_parport_driver);
-}
-
-static void __exit i2c_parport_exit(void)
-{
-       parport_unregister_driver(&i2c_parport_driver);
-}
+module_parport_driver(i2c_parport_driver);
 
 MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I2C bus over parallel port");
 MODULE_LICENSE("GPL");
-
-module_init(i2c_parport_init);
-module_exit(i2c_parport_exit);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
deleted file mode 100644 (file)
index 5d89c7c..0000000
+++ /dev/null
@@ -1,600 +0,0 @@
-/*
- * Specific bus support for PMC-TWI compliant implementation on MSP71xx.
- *
- * Copyright 2005-2007 PMC-Sierra, Inc.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
-#define DRV_NAME       "pmcmsptwi"
-
-#define MSP_TWI_SF_CLK_REG_OFFSET      0x00
-#define MSP_TWI_HS_CLK_REG_OFFSET      0x04
-#define MSP_TWI_CFG_REG_OFFSET         0x08
-#define MSP_TWI_CMD_REG_OFFSET         0x0c
-#define MSP_TWI_ADD_REG_OFFSET         0x10
-#define MSP_TWI_DAT_0_REG_OFFSET       0x14
-#define MSP_TWI_DAT_1_REG_OFFSET       0x18
-#define MSP_TWI_INT_STS_REG_OFFSET     0x1c
-#define MSP_TWI_INT_MSK_REG_OFFSET     0x20
-#define MSP_TWI_BUSY_REG_OFFSET                0x24
-
-#define MSP_TWI_INT_STS_DONE                   (1 << 0)
-#define MSP_TWI_INT_STS_LOST_ARBITRATION       (1 << 1)
-#define MSP_TWI_INT_STS_NO_RESPONSE            (1 << 2)
-#define MSP_TWI_INT_STS_DATA_COLLISION         (1 << 3)
-#define MSP_TWI_INT_STS_BUSY                   (1 << 4)
-#define MSP_TWI_INT_STS_ALL                    0x1f
-
-#define MSP_MAX_BYTES_PER_RW           8
-#define MSP_MAX_POLL                   5
-#define MSP_POLL_DELAY                 10
-#define MSP_IRQ_TIMEOUT                        (MSP_MAX_POLL * MSP_POLL_DELAY)
-
-/* IO Operation macros */
-#define pmcmsptwi_readl                __raw_readl
-#define pmcmsptwi_writel       __raw_writel
-
-/* TWI command type */
-enum pmcmsptwi_cmd_type {
-       MSP_TWI_CMD_WRITE       = 0,    /* Write only */
-       MSP_TWI_CMD_READ        = 1,    /* Read only */
-       MSP_TWI_CMD_WRITE_READ  = 2,    /* Write then Read */
-};
-
-/* The possible results of the xferCmd */
-enum pmcmsptwi_xfer_result {
-       MSP_TWI_XFER_OK = 0,
-       MSP_TWI_XFER_TIMEOUT,
-       MSP_TWI_XFER_BUSY,
-       MSP_TWI_XFER_DATA_COLLISION,
-       MSP_TWI_XFER_NO_RESPONSE,
-       MSP_TWI_XFER_LOST_ARBITRATION,
-};
-
-/* Corresponds to a PMCTWI clock configuration register */
-struct pmcmsptwi_clock {
-       u8 filter;      /* Bits 15:12,  default = 0x03 */
-       u16 clock;      /* Bits 9:0,    default = 0x001f */
-};
-
-struct pmcmsptwi_clockcfg {
-       struct pmcmsptwi_clock standard;  /* The standard/fast clock config */
-       struct pmcmsptwi_clock highspeed; /* The highspeed clock config */
-};
-
-/* Corresponds to the main TWI configuration register */
-struct pmcmsptwi_cfg {
-       u8 arbf;        /* Bits 15:12,  default=0x03 */
-       u8 nak;         /* Bits 11:8,   default=0x03 */
-       u8 add10;       /* Bit 7,       default=0x00 */
-       u8 mst_code;    /* Bits 6:4,    default=0x00 */
-       u8 arb;         /* Bit 1,       default=0x01 */
-       u8 highspeed;   /* Bit 0,       default=0x00 */
-};
-
-/* A single pmctwi command to issue */
-struct pmcmsptwi_cmd {
-       u16 addr;       /* The slave address (7 or 10 bits) */
-       enum pmcmsptwi_cmd_type type;   /* The command type */
-       u8 write_len;   /* Number of bytes in the write buffer */
-       u8 read_len;    /* Number of bytes in the read buffer */
-       u8 *write_data; /* Buffer of characters to send */
-       u8 *read_data;  /* Buffer to fill with incoming data */
-};
-
-/* The private data */
-struct pmcmsptwi_data {
-       void __iomem *iobase;                   /* iomapped base for IO */
-       int irq;                                /* IRQ to use (0 disables) */
-       struct completion wait;                 /* Completion for xfer */
-       struct mutex lock;                      /* Used for threadsafeness */
-       enum pmcmsptwi_xfer_result last_result; /* result of last xfer */
-};
-
-/* The default settings */
-static const struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = {
-       .standard = {
-               .filter = 0x3,
-               .clock  = 0x1f,
-       },
-       .highspeed = {
-               .filter = 0x3,
-               .clock  = 0x1f,
-       },
-};
-
-static const struct pmcmsptwi_cfg pmcmsptwi_defcfg = {
-       .arbf           = 0x03,
-       .nak            = 0x03,
-       .add10          = 0x00,
-       .mst_code       = 0x00,
-       .arb            = 0x01,
-       .highspeed      = 0x00,
-};
-
-static struct pmcmsptwi_data pmcmsptwi_data;
-
-static struct i2c_adapter pmcmsptwi_adapter;
-
-/* inline helper functions */
-static inline u32 pmcmsptwi_clock_to_reg(
-                       const struct pmcmsptwi_clock *clock)
-{
-       return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
-}
-
-static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
-{
-       return ((cfg->arbf & 0xf) << 12) |
-               ((cfg->nak & 0xf) << 8) |
-               ((cfg->add10 & 0x1) << 7) |
-               ((cfg->mst_code & 0x7) << 4) |
-               ((cfg->arb & 0x1) << 1) |
-               (cfg->highspeed & 0x1);
-}
-
-static inline void pmcmsptwi_reg_to_cfg(u32 reg, struct pmcmsptwi_cfg *cfg)
-{
-       cfg->arbf = (reg >> 12) & 0xf;
-       cfg->nak = (reg >> 8) & 0xf;
-       cfg->add10 = (reg >> 7) & 0x1;
-       cfg->mst_code = (reg >> 4) & 0x7;
-       cfg->arb = (reg >> 1) & 0x1;
-       cfg->highspeed = reg & 0x1;
-}
-
-/*
- * Sets the current clock configuration
- */
-static void pmcmsptwi_set_clock_config(const struct pmcmsptwi_clockcfg *cfg,
-                                       struct pmcmsptwi_data *data)
-{
-       mutex_lock(&data->lock);
-       pmcmsptwi_writel(pmcmsptwi_clock_to_reg(&cfg->standard),
-                               data->iobase + MSP_TWI_SF_CLK_REG_OFFSET);
-       pmcmsptwi_writel(pmcmsptwi_clock_to_reg(&cfg->highspeed),
-                               data->iobase + MSP_TWI_HS_CLK_REG_OFFSET);
-       mutex_unlock(&data->lock);
-}
-
-/*
- * Gets the current TWI bus configuration
- */
-static void pmcmsptwi_get_twi_config(struct pmcmsptwi_cfg *cfg,
-                                       struct pmcmsptwi_data *data)
-{
-       mutex_lock(&data->lock);
-       pmcmsptwi_reg_to_cfg(pmcmsptwi_readl(
-                               data->iobase + MSP_TWI_CFG_REG_OFFSET), cfg);
-       mutex_unlock(&data->lock);
-}
-
-/*
- * Sets the current TWI bus configuration
- */
-static void pmcmsptwi_set_twi_config(const struct pmcmsptwi_cfg *cfg,
-                                       struct pmcmsptwi_data *data)
-{
-       mutex_lock(&data->lock);
-       pmcmsptwi_writel(pmcmsptwi_cfg_to_reg(cfg),
-                               data->iobase + MSP_TWI_CFG_REG_OFFSET);
-       mutex_unlock(&data->lock);
-}
-
-/*
- * Parses the 'int_sts' register and returns a well-defined error code
- */
-static enum pmcmsptwi_xfer_result pmcmsptwi_get_result(u32 reg)
-{
-       if (reg & MSP_TWI_INT_STS_LOST_ARBITRATION) {
-               dev_dbg(&pmcmsptwi_adapter.dev,
-                       "Result: Lost arbitration\n");
-               return MSP_TWI_XFER_LOST_ARBITRATION;
-       } else if (reg & MSP_TWI_INT_STS_NO_RESPONSE) {
-               dev_dbg(&pmcmsptwi_adapter.dev,
-                       "Result: No response\n");
-               return MSP_TWI_XFER_NO_RESPONSE;
-       } else if (reg & MSP_TWI_INT_STS_DATA_COLLISION) {
-               dev_dbg(&pmcmsptwi_adapter.dev,
-                       "Result: Data collision\n");
-               return MSP_TWI_XFER_DATA_COLLISION;
-       } else if (reg & MSP_TWI_INT_STS_BUSY) {
-               dev_dbg(&pmcmsptwi_adapter.dev,
-                       "Result: Bus busy\n");
-               return MSP_TWI_XFER_BUSY;
-       }
-
-       dev_dbg(&pmcmsptwi_adapter.dev, "Result: Operation succeeded\n");
-       return MSP_TWI_XFER_OK;
-}
-
-/*
- * In interrupt mode, handle the interrupt.
- * NOTE: Assumes data->lock is held.
- */
-static irqreturn_t pmcmsptwi_interrupt(int irq, void *ptr)
-{
-       struct pmcmsptwi_data *data = ptr;
-
-       u32 reason = pmcmsptwi_readl(data->iobase +
-                                       MSP_TWI_INT_STS_REG_OFFSET);
-       pmcmsptwi_writel(reason, data->iobase + MSP_TWI_INT_STS_REG_OFFSET);
-
-       dev_dbg(&pmcmsptwi_adapter.dev, "Got interrupt 0x%08x\n", reason);
-       if (!(reason & MSP_TWI_INT_STS_DONE))
-               return IRQ_NONE;
-
-       data->last_result = pmcmsptwi_get_result(reason);
-       complete(&data->wait);
-
-       return IRQ_HANDLED;
-}
-
-/*
- * Probe for and register the device and return 0 if there is one.
- */
-static int pmcmsptwi_probe(struct platform_device *pldev)
-{
-       struct resource *res;
-       int rc = -ENODEV;
-
-       /* get the static platform resources */
-       res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pldev->dev, "IOMEM resource not found\n");
-               goto ret_err;
-       }
-
-       /* reserve the memory region */
-       if (!request_mem_region(res->start, resource_size(res),
-                               pldev->name)) {
-               dev_err(&pldev->dev,
-                       "Unable to get memory/io address region %pap\n",
-                       &res->start);
-               rc = -EBUSY;
-               goto ret_err;
-       }
-
-       /* remap the memory */
-       pmcmsptwi_data.iobase = ioremap(res->start,
-                                               resource_size(res));
-       if (!pmcmsptwi_data.iobase) {
-               dev_err(&pldev->dev,
-                       "Unable to ioremap address %pap\n", &res->start);
-               rc = -EIO;
-               goto ret_unreserve;
-       }
-
-       /* request the irq */
-       pmcmsptwi_data.irq = platform_get_irq(pldev, 0);
-       if (pmcmsptwi_data.irq) {
-               rc = request_irq(pmcmsptwi_data.irq, &pmcmsptwi_interrupt,
-                                IRQF_SHARED, pldev->name, &pmcmsptwi_data);
-               if (rc == 0) {
-                       /*
-                        * Enable 'DONE' interrupt only.
-                        *
-                        * If you enable all interrupts, you will get one on
-                        * error and another when the operation completes.
-                        * This way you only have to handle one interrupt,
-                        * but you can still check all result flags.
-                        */
-                       pmcmsptwi_writel(MSP_TWI_INT_STS_DONE,
-                                       pmcmsptwi_data.iobase +
-                                       MSP_TWI_INT_MSK_REG_OFFSET);
-               } else {
-                       dev_warn(&pldev->dev,
-                               "Could not assign TWI IRQ handler "
-                               "to irq %d (continuing with poll)\n",
-                               pmcmsptwi_data.irq);
-                       pmcmsptwi_data.irq = 0;
-               }
-       }
-
-       init_completion(&pmcmsptwi_data.wait);
-       mutex_init(&pmcmsptwi_data.lock);
-
-       pmcmsptwi_set_clock_config(&pmcmsptwi_defclockcfg, &pmcmsptwi_data);
-       pmcmsptwi_set_twi_config(&pmcmsptwi_defcfg, &pmcmsptwi_data);
-
-       printk(KERN_INFO DRV_NAME ": Registering MSP71xx I2C adapter\n");
-
-       pmcmsptwi_adapter.dev.parent = &pldev->dev;
-       platform_set_drvdata(pldev, &pmcmsptwi_adapter);
-       i2c_set_adapdata(&pmcmsptwi_adapter, &pmcmsptwi_data);
-
-       rc = i2c_add_adapter(&pmcmsptwi_adapter);
-       if (rc)
-               goto ret_unmap;
-
-       return 0;
-
-ret_unmap:
-       if (pmcmsptwi_data.irq) {
-               pmcmsptwi_writel(0,
-                       pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
-               free_irq(pmcmsptwi_data.irq, &pmcmsptwi_data);
-       }
-
-       iounmap(pmcmsptwi_data.iobase);
-
-ret_unreserve:
-       release_mem_region(res->start, resource_size(res));
-
-ret_err:
-       return rc;
-}
-
-/*
- * Release the device and return 0 if there is one.
- */
-static int pmcmsptwi_remove(struct platform_device *pldev)
-{
-       struct resource *res;
-
-       i2c_del_adapter(&pmcmsptwi_adapter);
-
-       if (pmcmsptwi_data.irq) {
-               pmcmsptwi_writel(0,
-                       pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
-               free_irq(pmcmsptwi_data.irq, &pmcmsptwi_data);
-       }
-
-       iounmap(pmcmsptwi_data.iobase);
-
-       res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
-       return 0;
-}
-
-/*
- * Polls the 'busy' register until the command is complete.
- * NOTE: Assumes data->lock is held.
- */
-static void pmcmsptwi_poll_complete(struct pmcmsptwi_data *data)
-{
-       int i;
-
-       for (i = 0; i < MSP_MAX_POLL; i++) {
-               u32 val = pmcmsptwi_readl(data->iobase +
-                                               MSP_TWI_BUSY_REG_OFFSET);
-               if (val == 0) {
-                       u32 reason = pmcmsptwi_readl(data->iobase +
-                                               MSP_TWI_INT_STS_REG_OFFSET);
-                       pmcmsptwi_writel(reason, data->iobase +
-                                               MSP_TWI_INT_STS_REG_OFFSET);
-                       data->last_result = pmcmsptwi_get_result(reason);
-                       return;
-               }
-               udelay(MSP_POLL_DELAY);
-       }
-
-       dev_dbg(&pmcmsptwi_adapter.dev, "Result: Poll timeout\n");
-       data->last_result = MSP_TWI_XFER_TIMEOUT;
-}
-
-/*
- * Do the transfer (low level):
- *   May use interrupt-driven or polling, depending on if an IRQ is
- *   presently registered.
- * NOTE: Assumes data->lock is held.
- */
-static enum pmcmsptwi_xfer_result pmcmsptwi_do_xfer(
-                       u32 reg, struct pmcmsptwi_data *data)
-{
-       dev_dbg(&pmcmsptwi_adapter.dev, "Writing cmd reg 0x%08x\n", reg);
-       pmcmsptwi_writel(reg, data->iobase + MSP_TWI_CMD_REG_OFFSET);
-       if (data->irq) {
-               unsigned long timeleft = wait_for_completion_timeout(
-                                               &data->wait, MSP_IRQ_TIMEOUT);
-               if (timeleft == 0) {
-                       dev_dbg(&pmcmsptwi_adapter.dev,
-                               "Result: IRQ timeout\n");
-                       complete(&data->wait);
-                       data->last_result = MSP_TWI_XFER_TIMEOUT;
-               }
-       } else
-               pmcmsptwi_poll_complete(data);
-
-       return data->last_result;
-}
-
-/*
- * Helper routine, converts 'pmctwi_cmd' struct to register format
- */
-static inline u32 pmcmsptwi_cmd_to_reg(const struct pmcmsptwi_cmd *cmd)
-{
-       return ((cmd->type & 0x3) << 8) |
-               (((cmd->write_len - 1) & 0x7) << 4) |
-               ((cmd->read_len - 1) & 0x7);
-}
-
-/*
- * Do the transfer (high level)
- */
-static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
-                       struct pmcmsptwi_cmd *cmd,
-                       struct pmcmsptwi_data *data)
-{
-       enum pmcmsptwi_xfer_result retval;
-
-       mutex_lock(&data->lock);
-       dev_dbg(&pmcmsptwi_adapter.dev,
-               "Setting address to 0x%04x\n", cmd->addr);
-       pmcmsptwi_writel(cmd->addr, data->iobase + MSP_TWI_ADD_REG_OFFSET);
-
-       if (cmd->type == MSP_TWI_CMD_WRITE ||
-           cmd->type == MSP_TWI_CMD_WRITE_READ) {
-               u64 tmp = be64_to_cpup((__be64 *)cmd->write_data);
-               tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8;
-               dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp);
-               pmcmsptwi_writel(tmp & 0x00000000ffffffffLL,
-                               data->iobase + MSP_TWI_DAT_0_REG_OFFSET);
-               if (cmd->write_len > 4)
-                       pmcmsptwi_writel(tmp >> 32,
-                               data->iobase + MSP_TWI_DAT_1_REG_OFFSET);
-       }
-
-       retval = pmcmsptwi_do_xfer(pmcmsptwi_cmd_to_reg(cmd), data);
-       if (retval != MSP_TWI_XFER_OK)
-               goto xfer_err;
-
-       if (cmd->type == MSP_TWI_CMD_READ ||
-           cmd->type == MSP_TWI_CMD_WRITE_READ) {
-               int i;
-               u64 rmsk = ~(0xffffffffffffffffLL << (cmd->read_len * 8));
-               u64 tmp = (u64)pmcmsptwi_readl(data->iobase +
-                                       MSP_TWI_DAT_0_REG_OFFSET);
-               if (cmd->read_len > 4)
-                       tmp |= (u64)pmcmsptwi_readl(data->iobase +
-                                       MSP_TWI_DAT_1_REG_OFFSET) << 32;
-               tmp &= rmsk;
-               dev_dbg(&pmcmsptwi_adapter.dev, "Read 0x%016llx\n", tmp);
-
-               for (i = 0; i < cmd->read_len; i++)
-                       cmd->read_data[i] = tmp >> i;
-       }
-
-xfer_err:
-       mutex_unlock(&data->lock);
-
-       return retval;
-}
-
-/* -- Algorithm functions -- */
-
-/*
- * Sends an i2c command out on the adapter
- */
-static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
-                               struct i2c_msg *msg, int num)
-{
-       struct pmcmsptwi_data *data = i2c_get_adapdata(adap);
-       struct pmcmsptwi_cmd cmd;
-       struct pmcmsptwi_cfg oldcfg, newcfg;
-       int ret;
-
-       if (num == 2) {
-               struct i2c_msg *nextmsg = msg + 1;
-
-               cmd.type = MSP_TWI_CMD_WRITE_READ;
-               cmd.write_len = msg->len;
-               cmd.write_data = msg->buf;
-               cmd.read_len = nextmsg->len;
-               cmd.read_data = nextmsg->buf;
-       } else if (msg->flags & I2C_M_RD) {
-               cmd.type = MSP_TWI_CMD_READ;
-               cmd.read_len = msg->len;
-               cmd.read_data = msg->buf;
-               cmd.write_len = 0;
-               cmd.write_data = NULL;
-       } else {
-               cmd.type = MSP_TWI_CMD_WRITE;
-               cmd.read_len = 0;
-               cmd.read_data = NULL;
-               cmd.write_len = msg->len;
-               cmd.write_data = msg->buf;
-       }
-
-       cmd.addr = msg->addr;
-
-       if (msg->flags & I2C_M_TEN) {
-               pmcmsptwi_get_twi_config(&newcfg, data);
-               memcpy(&oldcfg, &newcfg, sizeof(oldcfg));
-
-               /* Set the special 10-bit address flag */
-               newcfg.add10 = 1;
-
-               pmcmsptwi_set_twi_config(&newcfg, data);
-       }
-
-       /* Execute the command */
-       ret = pmcmsptwi_xfer_cmd(&cmd, data);
-
-       if (msg->flags & I2C_M_TEN)
-               pmcmsptwi_set_twi_config(&oldcfg, data);
-
-       dev_dbg(&adap->dev, "I2C %s of %d bytes %s\n",
-               (msg->flags & I2C_M_RD) ? "read" : "write", msg->len,
-               (ret == MSP_TWI_XFER_OK) ? "succeeded" : "failed");
-
-       if (ret != MSP_TWI_XFER_OK) {
-               /*
-                * TODO: We could potentially loop and retry in the case
-                * of MSP_TWI_XFER_TIMEOUT.
-                */
-               return -EIO;
-       }
-
-       return num;
-}
-
-static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
-{
-       return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
-               I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
-               I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL;
-}
-
-static const struct i2c_adapter_quirks pmcmsptwi_i2c_quirks = {
-       .flags = I2C_AQ_COMB_WRITE_THEN_READ | I2C_AQ_NO_ZERO_LEN,
-       .max_write_len = MSP_MAX_BYTES_PER_RW,
-       .max_read_len = MSP_MAX_BYTES_PER_RW,
-       .max_comb_1st_msg_len = MSP_MAX_BYTES_PER_RW,
-       .max_comb_2nd_msg_len = MSP_MAX_BYTES_PER_RW,
-};
-
-/* -- Initialization -- */
-
-static const struct i2c_algorithm pmcmsptwi_algo = {
-       .master_xfer    = pmcmsptwi_master_xfer,
-       .functionality  = pmcmsptwi_i2c_func,
-};
-
-static struct i2c_adapter pmcmsptwi_adapter = {
-       .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
-       .algo           = &pmcmsptwi_algo,
-       .quirks         = &pmcmsptwi_i2c_quirks,
-       .name           = DRV_NAME,
-};
-
-static struct platform_driver pmcmsptwi_driver = {
-       .probe  = pmcmsptwi_probe,
-       .remove = pmcmsptwi_remove,
-       .driver = {
-               .name   = DRV_NAME,
-       },
-};
-
-module_platform_driver(pmcmsptwi_driver);
-
-MODULE_DESCRIPTION("PMC MSP TWI/SMBus/I2C driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
index 61dc20f..fcd35e8 100644 (file)
@@ -778,7 +778,7 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
                        ret = -EINVAL;
 
                        /* abort TX descriptors */
-                       dmaengine_terminate_all(qup->btx.dma);
+                       dmaengine_terminate_sync(qup->btx.dma);
                        goto desc_err;
                }
 
index 4d82761..b49a1b1 100644 (file)
@@ -1137,7 +1137,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
         */
        if (!(i2c->quirks & QUIRK_POLL)) {
                i2c->irq = ret = platform_get_irq(pdev, 0);
-               if (ret <= 0) {
+               if (ret < 0) {
                        dev_err(&pdev->dev, "cannot find IRQ\n");
                        clk_unprepare(i2c->clk);
                        return ret;
index 2d2e630..db8fa41 100644 (file)
@@ -458,9 +458,9 @@ static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd)
        if (pd->dma_direction == DMA_NONE)
                return;
        else if (pd->dma_direction == DMA_FROM_DEVICE)
-               dmaengine_terminate_all(pd->dma_rx);
+               dmaengine_terminate_sync(pd->dma_rx);
        else if (pd->dma_direction == DMA_TO_DEVICE)
-               dmaengine_terminate_all(pd->dma_tx);
+               dmaengine_terminate_sync(pd->dma_tx);
 
        sh_mobile_i2c_dma_unmap(pd);
 }
index 2f6f646..9e3483f 100644 (file)
@@ -234,7 +234,7 @@ static int p2wi_probe(struct platform_device *pdev)
        if (IS_ERR(p2wi->regs))
                return PTR_ERR(p2wi->regs);
 
-       strlcpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name));
+       strscpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name));
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
index 31be181..e4026c5 100644 (file)
@@ -578,7 +578,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
 
        i2c->irq = platform_get_irq(pdev, 0);
        if (i2c->irq < 0)
-               return -ENODEV;
+               return i2c->irq;
 
        ret = devm_request_irq(&pdev->dev, i2c->irq, synquacer_i2c_isr,
                               0, dev_name(&pdev->dev), i2c);
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
new file mode 100644 (file)
index 0000000..f10a603
--- /dev/null
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtio I2C Bus Driver
+ *
+ * The Virtio I2C Specification:
+ * https://raw.githubusercontent.com/oasis-tcs/virtio-spec/master/virtio-i2c.tex
+ *
+ * Copyright (c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_i2c.h>
+
+/**
+ * struct virtio_i2c - virtio I2C data
+ * @vdev: virtio device for this controller
+ * @completion: completion of virtio I2C message
+ * @adap: I2C adapter for this controller
+ * @vq: the virtio virtqueue for communication
+ */
+struct virtio_i2c {
+       struct virtio_device *vdev;
+       struct completion completion;
+       struct i2c_adapter adap;
+       struct virtqueue *vq;
+};
+
+/**
+ * struct virtio_i2c_req - the virtio I2C request structure
+ * @out_hdr: the OUT header of the virtio I2C message
+ * @buf: the buffer into which data is read, or from which it's written
+ * @in_hdr: the IN header of the virtio I2C message
+ */
+struct virtio_i2c_req {
+       struct virtio_i2c_out_hdr out_hdr       ____cacheline_aligned;
+       uint8_t *buf                            ____cacheline_aligned;
+       struct virtio_i2c_in_hdr in_hdr         ____cacheline_aligned;
+};
+
+static void virtio_i2c_msg_done(struct virtqueue *vq)
+{
+       struct virtio_i2c *vi = vq->vdev->priv;
+
+       complete(&vi->completion);
+}
+
+static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
+                                  struct virtio_i2c_req *reqs,
+                                  struct i2c_msg *msgs, int num)
+{
+       struct scatterlist *sgs[3], out_hdr, msg_buf, in_hdr;
+       int i;
+
+       for (i = 0; i < num; i++) {
+               int outcnt = 0, incnt = 0;
+
+               /*
+                * We don't support 0 length messages and so filter out
+                * 0 length transfers by using i2c_adapter_quirks.
+                */
+               if (!msgs[i].len)
+                       break;
+
+               /*
+                * Only 7-bit mode supported for this moment. For the address
+                * format, Please check the Virtio I2C Specification.
+                */
+               reqs[i].out_hdr.addr = cpu_to_le16(msgs[i].addr << 1);
+
+               if (i != num - 1)
+                       reqs[i].out_hdr.flags = cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT);
+
+               sg_init_one(&out_hdr, &reqs[i].out_hdr, sizeof(reqs[i].out_hdr));
+               sgs[outcnt++] = &out_hdr;
+
+               reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
+               if (!reqs[i].buf)
+                       break;
+
+               sg_init_one(&msg_buf, reqs[i].buf, msgs[i].len);
+
+               if (msgs[i].flags & I2C_M_RD)
+                       sgs[outcnt + incnt++] = &msg_buf;
+               else
+                       sgs[outcnt++] = &msg_buf;
+
+               sg_init_one(&in_hdr, &reqs[i].in_hdr, sizeof(reqs[i].in_hdr));
+               sgs[outcnt + incnt++] = &in_hdr;
+
+               if (virtqueue_add_sgs(vq, sgs, outcnt, incnt, &reqs[i], GFP_KERNEL)) {
+                       i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], false);
+                       break;
+               }
+       }
+
+       return i;
+}
+
+static int virtio_i2c_complete_reqs(struct virtqueue *vq,
+                                   struct virtio_i2c_req *reqs,
+                                   struct i2c_msg *msgs, int num,
+                                   bool timedout)
+{
+       struct virtio_i2c_req *req;
+       bool failed = timedout;
+       unsigned int len;
+       int i, j = 0;
+
+       for (i = 0; i < num; i++) {
+               /* Detach the ith request from the vq */
+               req = virtqueue_get_buf(vq, &len);
+
+               /*
+                * Condition req == &reqs[i] should always meet since we have
+                * total num requests in the vq. reqs[i] can never be NULL here.
+                */
+               if (!failed && (WARN_ON(req != &reqs[i]) ||
+                               req->in_hdr.status != VIRTIO_I2C_MSG_OK))
+                       failed = true;
+
+               i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
+
+               if (!failed)
+                       j++;
+       }
+
+       return timedout ? -ETIMEDOUT : j;
+}
+
+static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+                          int num)
+{
+       struct virtio_i2c *vi = i2c_get_adapdata(adap);
+       struct virtqueue *vq = vi->vq;
+       struct virtio_i2c_req *reqs;
+       unsigned long time_left;
+       int count;
+
+       reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL);
+       if (!reqs)
+               return -ENOMEM;
+
+       count = virtio_i2c_prepare_reqs(vq, reqs, msgs, num);
+       if (!count)
+               goto err_free;
+
+       /*
+        * For the case where count < num, i.e. we weren't able to queue all the
+        * msgs, ideally we should abort right away and return early, but some
+        * of the messages are already sent to the remote I2C controller and the
+        * virtqueue will be left in undefined state in that case. We kick the
+        * remote here to clear the virtqueue, so we can try another set of
+        * messages later on.
+        */
+
+       reinit_completion(&vi->completion);
+       virtqueue_kick(vq);
+
+       time_left = wait_for_completion_timeout(&vi->completion, adap->timeout);
+       if (!time_left)
+               dev_err(&adap->dev, "virtio i2c backend timeout.\n");
+
+       count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left);
+
+err_free:
+       kfree(reqs);
+       return count;
+}
+
+static void virtio_i2c_del_vqs(struct virtio_device *vdev)
+{
+       vdev->config->reset(vdev);
+       vdev->config->del_vqs(vdev);
+}
+
+static int virtio_i2c_setup_vqs(struct virtio_i2c *vi)
+{
+       struct virtio_device *vdev = vi->vdev;
+
+       vi->vq = virtio_find_single_vq(vdev, virtio_i2c_msg_done, "msg");
+       return PTR_ERR_OR_ZERO(vi->vq);
+}
+
+static u32 virtio_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static struct i2c_algorithm virtio_algorithm = {
+       .master_xfer = virtio_i2c_xfer,
+       .functionality = virtio_i2c_func,
+};
+
+static const struct i2c_adapter_quirks virtio_i2c_quirks = {
+       .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
+static int virtio_i2c_probe(struct virtio_device *vdev)
+{
+       struct virtio_i2c *vi;
+       int ret;
+
+       vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
+       if (!vi)
+               return -ENOMEM;
+
+       vdev->priv = vi;
+       vi->vdev = vdev;
+
+       init_completion(&vi->completion);
+
+       ret = virtio_i2c_setup_vqs(vi);
+       if (ret)
+               return ret;
+
+       vi->adap.owner = THIS_MODULE;
+       snprintf(vi->adap.name, sizeof(vi->adap.name),
+                "i2c_virtio at virtio bus %d", vdev->index);
+       vi->adap.algo = &virtio_algorithm;
+       vi->adap.quirks = &virtio_i2c_quirks;
+       vi->adap.dev.parent = &vdev->dev;
+       vi->adap.dev.of_node = vdev->dev.of_node;
+       i2c_set_adapdata(&vi->adap, vi);
+
+       /*
+        * Setup ACPI node for controlled devices which will be probed through
+        * ACPI.
+        */
+       ACPI_COMPANION_SET(&vi->adap.dev, ACPI_COMPANION(vdev->dev.parent));
+
+       ret = i2c_add_adapter(&vi->adap);
+       if (ret)
+               virtio_i2c_del_vqs(vdev);
+
+       return ret;
+}
+
+static void virtio_i2c_remove(struct virtio_device *vdev)
+{
+       struct virtio_i2c *vi = vdev->priv;
+
+       i2c_del_adapter(&vi->adap);
+       virtio_i2c_del_vqs(vdev);
+}
+
+static struct virtio_device_id id_table[] = {
+       { VIRTIO_ID_I2C_ADAPTER, VIRTIO_DEV_ANY_ID },
+       {}
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+#ifdef CONFIG_PM_SLEEP
+static int virtio_i2c_freeze(struct virtio_device *vdev)
+{
+       virtio_i2c_del_vqs(vdev);
+       return 0;
+}
+
+static int virtio_i2c_restore(struct virtio_device *vdev)
+{
+       return virtio_i2c_setup_vqs(vdev->priv);
+}
+#endif
+
+static struct virtio_driver virtio_i2c_driver = {
+       .id_table       = id_table,
+       .probe          = virtio_i2c_probe,
+       .remove         = virtio_i2c_remove,
+       .driver = {
+               .name   = "i2c_virtio",
+       },
+#ifdef CONFIG_PM_SLEEP
+       .freeze = virtio_i2c_freeze,
+       .restore = virtio_i2c_restore,
+#endif
+};
+module_virtio_driver(virtio_i2c_driver);
+
+MODULE_AUTHOR("Jie Deng <jie.deng@intel.com>");
+MODULE_AUTHOR("Conghui Chen <conghui.chen@intel.com>");
+MODULE_DESCRIPTION("Virtio i2c bus driver");
+MODULE_LICENSE("GPL");
index f2241ce..6d24dc3 100644 (file)
@@ -517,7 +517,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
                return PTR_ERR(priv->base);
 
        priv->irq = platform_get_irq(pdev, 0);
-       if (priv->irq <= 0)
+       if (priv->irq < 0)
                return priv->irq;
        /* SMBAlert irq */
        priv->alert_data.irq = platform_get_irq(pdev, 1);
index cb64fe6..bce0e8b 100644 (file)
@@ -14,6 +14,8 @@
 
 /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
 
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
 #include <linux/cdev.h>
 #include <linux/compat.h>
 #include <linux/device.h>
@@ -68,8 +70,7 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
        struct i2c_dev *i2c_dev;
 
        if (adap->nr >= I2C_MINORS) {
-               printk(KERN_ERR "i2c-dev: Out of device minors (%d)\n",
-                      adap->nr);
+               pr_err("Out of device minors (%d)\n", adap->nr);
                return ERR_PTR(-ENODEV);
        }
 
@@ -101,7 +102,7 @@ static ssize_t name_show(struct device *dev,
 
        if (!i2c_dev)
                return -ENODEV;
-       return sprintf(buf, "%s\n", i2c_dev->adap->name);
+       return sysfs_emit(buf, "%s\n", i2c_dev->adap->name);
 }
 static DEVICE_ATTR_RO(name);
 
@@ -141,16 +142,16 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
        if (count > 8192)
                count = 8192;
 
-       tmp = kmalloc(count, GFP_KERNEL);
+       tmp = kzalloc(count, GFP_KERNEL);
        if (tmp == NULL)
                return -ENOMEM;
 
-       pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
-               iminor(file_inode(file)), count);
+       pr_debug("i2c-%d reading %zu bytes.\n", iminor(file_inode(file)), count);
 
        ret = i2c_master_recv(client, tmp, count);
        if (ret >= 0)
-               ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
+               if (copy_to_user(buf, tmp, ret))
+                       ret = -EFAULT;
        kfree(tmp);
        return ret;
 }
@@ -169,8 +170,7 @@ static ssize_t i2cdev_write(struct file *file, const char __user *buf,
        if (IS_ERR(tmp))
                return PTR_ERR(tmp);
 
-       pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
-               iminor(file_inode(file)), count);
+       pr_debug("i2c-%d writing %zu bytes.\n", iminor(file_inode(file)), count);
 
        ret = i2c_master_send(client, tmp, count);
        kfree(tmp);
@@ -673,8 +673,7 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
                return res;
        }
 
-       pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
-                adap->name, adap->nr);
+       pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
        return 0;
 }
 
@@ -693,7 +692,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
 
        put_i2c_dev(i2c_dev, true);
 
-       pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
+       pr_debug("adapter [%s] unregistered\n", adap->name);
        return 0;
 }
 
@@ -726,7 +725,7 @@ static int __init i2c_dev_init(void)
 {
        int res;
 
-       printk(KERN_INFO "i2c /dev entries driver\n");
+       pr_info("i2c /dev entries driver\n");
 
        res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c");
        if (res)
@@ -754,7 +753,7 @@ out_unreg_class:
 out_unreg_chrdev:
        unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
 out:
-       printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
+       pr_err("Driver Initialisation failed\n");
        return res;
 }
 
index 0e56ace..8d8b1ba 100644 (file)
@@ -231,6 +231,7 @@ config DMARD10
 
 config FXLS8962AF
        tristate
+       depends on I2C || !I2C # cannot be built-in for modular I2C
 
 config FXLS8962AF_I2C
        tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
@@ -247,6 +248,7 @@ config FXLS8962AF_I2C
 config FXLS8962AF_SPI
        tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver"
        depends on SPI
+       depends on I2C || !I2C
        select FXLS8962AF
        select REGMAP_SPI
        help
index 078d878..0019f1e 100644 (file)
@@ -637,7 +637,7 @@ static int fxls8962af_i2c_raw_read_errata3(struct fxls8962af_data *data,
                        return ret;
        }
 
-       return ret;
+       return 0;
 }
 
 static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
index 6ef0960..f9c8385 100644 (file)
@@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
 
        adc_period = adc->auto_conversion_period;
        for (i = 0; i < 16; ++i) {
-               if (((1000 * (1 << i)) / 32) < adc_period)
-                       continue;
+               if (((1000 * (1 << i)) / 32) >= adc_period)
+                       break;
        }
        if (i > 0)
                i--;
index 7010c42..c56fccb 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/completion.h>
 #include <linux/regmap.h>
 #include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
 #include <linux/slab.h>
 
 #define RN5T618_ADC_CONVERSION_TIMEOUT   (msecs_to_jiffies(500))
@@ -189,6 +191,19 @@ static const struct iio_chan_spec rn5t618_adc_iio_channels[] = {
        RN5T618_ADC_CHANNEL(AIN0, IIO_VOLTAGE, "AIN0")
 };
 
+static struct iio_map rn5t618_maps[] = {
+       IIO_MAP("VADP", "rn5t618-power", "vadp"),
+       IIO_MAP("VUSB", "rn5t618-power", "vusb"),
+       { /* sentinel */ }
+};
+
+static void unregister_map(void *data)
+{
+       struct iio_dev *iio_dev = (struct iio_dev *) data;
+
+       iio_map_array_unregister(iio_dev);
+}
+
 static int rn5t618_adc_probe(struct platform_device *pdev)
 {
        int ret;
@@ -239,6 +254,14 @@ static int rn5t618_adc_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = iio_map_array_register(iio_dev, rn5t618_maps);
+       if (ret < 0)
+               return ret;
+
+       ret = devm_add_action_or_reset(adc->dev, unregister_map, iio_dev);
+       if (ret < 0)
+               return ret;
+
        return devm_iio_device_register(adc->dev, iio_dev);
 }
 
index 2383eac..a2b83f0 100644 (file)
@@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
        st->ring_xfer.tx_buf = &st->tx_buf[0];
        st->ring_xfer.rx_buf = &st->rx_buf[0];
        /* len will be set later */
-       st->ring_xfer.cs_change = true;
 
        spi_message_add_tail(&st->ring_xfer, &st->ring_msg);
 
index 2a957f1..9e0fce9 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 
+#include <linux/time.h>
+
 #define HDC100X_REG_TEMP                       0x00
 #define HDC100X_REG_HUMIDITY                   0x01
 
@@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
                                   struct iio_chan_spec const *chan)
 {
        struct i2c_client *client = data->client;
-       int delay = data->adc_int_us[chan->address];
+       int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
        int ret;
        __be16 val;
 
@@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
        struct iio_dev *indio_dev = pf->indio_dev;
        struct hdc100x_data *data = iio_priv(indio_dev);
        struct i2c_client *client = data->client;
-       int delay = data->adc_int_us[0] + data->adc_int_us[1];
+       int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
        int ret;
 
        /* dual read starts at temp register */
index a5b421f..b9a06ca 100644 (file)
@@ -411,12 +411,11 @@ int __adis_initial_startup(struct adis *adis)
        int ret;
 
        /* check if the device has rst pin low */
-       gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS);
+       gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(gpio))
                return PTR_ERR(gpio);
 
        if (gpio) {
-               gpiod_set_value_cansleep(gpio, 1);
                msleep(10);
                /* bring device out of reset */
                gpiod_set_value_cansleep(gpio, 0);
index 515a7e9..5d3b8b8 100644 (file)
@@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
        return ret;
 }
 
+static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
+{
+       struct ib_qp_attr qp_attr;
+       int qp_attr_mask, ret;
+
+       qp_attr.qp_state = IB_QPS_INIT;
+       ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
+       if (ret)
+               return ret;
+
+       return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+}
+
 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
                   struct ib_qp_init_attr *qp_init_attr)
 {
        struct rdma_id_private *id_priv;
        struct ib_qp *qp;
-       int ret = 0;
+       int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
        if (id->device != pd->device) {
@@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
 
        if (id->qp_type == IB_QPT_UD)
                ret = cma_init_ud_qp(id_priv, qp);
+       else
+               ret = cma_init_conn_qp(id_priv, qp);
        if (ret)
                goto out_destroy;
 
index f782d5e..03e1db5 100644 (file)
@@ -249,6 +249,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
        mr->uobject = uobj;
        atomic_inc(&pd->usecnt);
 
+       rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+       rdma_restrack_set_name(&mr->res, NULL);
+       rdma_restrack_add(&mr->res);
        uobj->object = mr;
 
        uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
index 283b6b8..ea0054c 100644 (file)
@@ -1681,6 +1681,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
        if (nq)
                nq->budget++;
        atomic_inc(&rdev->srq_count);
+       spin_lock_init(&srq->lock);
 
        return 0;
 
index a8688a9..4678bd6 100644 (file)
@@ -1397,7 +1397,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
        memset(&rattr, 0, sizeof(rattr));
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
-               rtnl_unlock();
                ibdev_err(&rdev->ibdev,
                          "Failed to register with netedev: %#x\n", rc);
                return -EINVAL;
index 6c8c910..c7e8d7b 100644 (file)
@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        return !err || err == -ENODATA ? npolled : err;
 }
 
+void c4iw_cq_rem_ref(struct c4iw_cq *chp)
+{
+       if (refcount_dec_and_test(&chp->refcnt))
+               complete(&chp->cq_rel_comp);
+}
+
 int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 {
        struct c4iw_cq *chp;
@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
        chp = to_c4iw_cq(ib_cq);
 
        xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
-       refcount_dec(&chp->refcnt);
-       wait_event(chp->wait, !refcount_read(&chp->refcnt));
+       c4iw_cq_rem_ref(chp);
+       wait_for_completion(&chp->cq_rel_comp);
 
        ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
                                             ibucontext);
@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        spin_lock_init(&chp->lock);
        spin_lock_init(&chp->comp_handler_lock);
        refcount_set(&chp->refcnt, 1);
-       init_waitqueue_head(&chp->wait);
+       init_completion(&chp->cq_rel_comp);
        ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
        if (ret)
                goto err_destroy_cq;
index 7798d09..34211a5 100644 (file)
@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
                break;
        }
 done:
-       if (refcount_dec_and_test(&chp->refcnt))
-               wake_up(&chp->wait);
+       c4iw_cq_rem_ref(chp);
        c4iw_qp_rem_ref(&qhp->ibqp);
 out:
        return;
@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-               if (refcount_dec_and_test(&chp->refcnt))
-                       wake_up(&chp->wait);
+               c4iw_cq_rem_ref(chp);
        } else {
                pr_debug("unknown cqid 0x%x\n", qid);
                xa_unlock_irqrestore(&dev->cqs, flag);
index 3883af3..ac5f581 100644 (file)
@@ -428,7 +428,7 @@ struct c4iw_cq {
        spinlock_t lock;
        spinlock_t comp_handler_lock;
        refcount_t refcnt;
-       wait_queue_head_t wait;
+       struct completion cq_rel_comp;
        struct c4iw_wr_wait *wr_waitp;
 };
 
@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
 int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void c4iw_cq_rem_ref(struct c4iw_cq *chp);
 int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                   struct ib_udata *udata);
 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
index 203e6dd..be4a07b 100644 (file)
@@ -357,6 +357,7 @@ static int efa_enable_msix(struct efa_dev *dev)
        }
 
        if (irq_num != msix_vecs) {
+               efa_disable_msix(dev);
                dev_err(&dev->pdev->dev,
                        "Allocated %d MSI-X (out of %d requested)\n",
                        irq_num, msix_vecs);
index eb15c31..e83dc56 100644 (file)
@@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
 {
        int i;
+       struct sdma_desc *descp;
 
        /* Handle last descriptor */
        if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
@@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
        if (unlikely(tx->num_desc == MAX_DESC))
                goto enomem;
 
-       tx->descp = kmalloc_array(
-                       MAX_DESC,
-                       sizeof(struct sdma_desc),
-                       GFP_ATOMIC);
-       if (!tx->descp)
+       descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
+       if (!descp)
                goto enomem;
+       tx->descp = descp;
 
        /* reserve last descriptor for coalescing */
        tx->desc_limit = MAX_DESC - 1;
index 8f68cc3..84f3f2b 100644 (file)
@@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
 
        hr_cmd->context =
                kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
-       if (!hr_cmd->context)
+       if (!hr_cmd->context) {
+               hr_dev->cmd_mod = 0;
                return -ENOMEM;
+       }
 
        for (i = 0; i < hr_cmd->max_cmds; ++i) {
                hr_cmd->context[i].token = i;
@@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
        spin_lock_init(&hr_cmd->context_lock);
 
        hr_cmd->use_events = 1;
-       down(&hr_cmd->poll_sem);
 
        return 0;
 }
@@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
 
        kfree(hr_cmd->context);
        hr_cmd->use_events = 0;
-
-       up(&hr_cmd->poll_sem);
 }
 
 struct hns_roce_cmd_mailbox *
index 078a971..cc6eab1 100644 (file)
@@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
 
        if (hr_dev->cmd_mod) {
                ret = hns_roce_cmd_use_events(hr_dev);
-               if (ret) {
+               if (ret)
                        dev_warn(dev,
                                 "Cmd event  mode failed, set back to poll!\n");
-                       hns_roce_cmd_use_polling(hr_dev);
-               }
        }
 
        ret = hns_roce_init_hem(hr_dev);
index dab8828..b6f9c41 100644 (file)
@@ -6,7 +6,7 @@ config INFINIBAND_IRDMA
        depends on PCI
        depends on ICE && I40E
        select GENERIC_ALLOCATOR
-       select CONFIG_AUXILIARY_BUS
+       select AUXILIARY_BUS
        help
          This is an Intel(R) Ethernet Protocol Driver for RDMA driver
          that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
index 7abeb57..b8e5e37 100644 (file)
@@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        u32 *cqb = NULL;
        void *cqc;
        int cqe_size;
-       unsigned int irqn;
        int eqn;
        int err;
 
@@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
        }
 
-       err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
        if (err)
                goto err_cqb;
 
@@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                goto err_cqb;
 
        mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
-       cq->mcq.irqn = irqn;
        if (udata)
                cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
        else
index eb9b0a2..c869b2a 100644 (file)
@@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
        struct mlx5_ib_dev *dev;
        int user_vector;
        int dev_eqn;
-       unsigned int irqn;
        int err;
 
        if (uverbs_copy_from(&user_vector, attrs,
@@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
                return PTR_ERR(c);
        dev = to_mdev(c->ibucontext.device);
 
-       err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
        if (err < 0)
                return err;
 
index 094c976..2507051 100644 (file)
@@ -4454,7 +4454,8 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev)
        mutex_lock(&mlx5_ib_multiport_mutex);
        if (mpi->ibdev)
                mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
-       list_del(&mpi->list);
+       else
+               list_del(&mpi->list);
        mutex_unlock(&mlx5_ib_multiport_mutex);
        kfree(mpi);
 }
index 3263851..3f1c5a4 100644 (file)
@@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
                 */
                spin_unlock_irq(&ent->lock);
                need_delay = need_resched() || someone_adding(cache) ||
-                            time_after(jiffies,
-                                       READ_ONCE(cache->last_add) + 300 * HZ);
+                            !time_after(jiffies,
+                                        READ_ONCE(cache->last_add) + 300 * HZ);
                spin_lock_irq(&ent->lock);
                if (ent->disabled)
                        goto out;
index 0ea9a5a..1c1d1b5 100644 (file)
@@ -85,7 +85,7 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
                goto out;
        }
 
-       elem = rxe_alloc(&rxe->mc_elem_pool);
+       elem = rxe_alloc_locked(&rxe->mc_elem_pool);
        if (!elem) {
                err = -ENOMEM;
                goto out;
index dec9292..5ac27f2 100644 (file)
@@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
 
        iph->version    =       IPVERSION;
        iph->ihl        =       sizeof(struct iphdr) >> 2;
+       iph->tot_len    =       htons(skb->len);
        iph->frag_off   =       df;
        iph->protocol   =       proto;
        iph->tos        =       tos;
index 85b8125..72d9539 100644 (file)
@@ -63,7 +63,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
        if (*num_elem < 0)
                goto err1;
 
-       q = kmalloc(sizeof(*q), GFP_KERNEL);
+       q = kzalloc(sizeof(*q), GFP_KERNEL);
        if (!q)
                goto err1;
 
index 3743dc3..360ec67 100644 (file)
@@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
                pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
                return RESPST_ERR_MALFORMED_WQE;
        }
-       size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
+       size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
        memcpy(&qp->resp.srq_wqe, wqe, size);
 
        qp->resp.wqe = &qp->resp.srq_wqe.wqe;
index 8a1e70e..7887941 100644 (file)
@@ -403,7 +403,7 @@ struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
 {
        struct icc_path **ptr, *path;
 
-       ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
+       ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
        if (!ptr)
                return ERR_PTR(-ENOMEM);
 
@@ -973,9 +973,14 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
        }
        node->avg_bw = node->init_avg;
        node->peak_bw = node->init_peak;
+
+       if (provider->pre_aggregate)
+               provider->pre_aggregate(node);
+
        if (provider->aggregate)
                provider->aggregate(node, 0, node->init_avg, node->init_peak,
                                    &node->avg_bw, &node->peak_bw);
+
        provider->set(node, node);
        node->avg_bw = 0;
        node->peak_bw = 0;
@@ -1106,6 +1111,8 @@ void icc_sync_state(struct device *dev)
                dev_dbg(p->dev, "interconnect provider is in synced state\n");
                list_for_each_entry(n, &p->nodes, node_list) {
                        if (n->init_avg || n->init_peak) {
+                               n->init_avg = 0;
+                               n->init_peak = 0;
                                aggregate_requests(n);
                                p->set(n, n);
                        }
index bf01d09..f6fae64 100644 (file)
@@ -57,6 +57,11 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
                        qn->sum_avg[i] += avg_bw;
                        qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
                }
+
+               if (node->init_avg || node->init_peak) {
+                       qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg);
+                       qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak);
+               }
        }
 
        *agg_avg += avg_bw;
@@ -79,7 +84,6 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
 int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 {
        struct qcom_icc_provider *qp;
-       struct qcom_icc_node *qn;
        struct icc_node *node;
 
        if (!src)
@@ -88,12 +92,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
                node = src;
 
        qp = to_qcom_provider(node->provider);
-       qn = node->data;
-
-       qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC],
-                                                node->avg_bw);
-       qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC],
-                                                 node->peak_bw);
 
        qcom_icc_bcm_voter_commit(qp->voter);
 
index 98ba927..6f0df62 100644 (file)
@@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
        __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
        __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
        sg_free_table(&sh->sgt);
+       kfree(sh);
 }
 #endif /* CONFIG_DMA_REMAP */
 
index c6cf44a..9ec374e 100644 (file)
@@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
                                 u32 pasid, bool fault_ignore)
 {
        struct pasid_entry *pte;
-       u16 did;
+       u16 did, pgtt;
 
        pte = intel_pasid_get_entry(dev, pasid);
        if (WARN_ON(!pte))
@@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
                return;
 
        did = pasid_get_domain_id(pte);
+       pgtt = pasid_pte_get_pgtt(pte);
+
        intel_pasid_clear_entry(dev, pasid, fault_ignore);
 
        if (!ecap_coherent(iommu->ecap))
                clflush_cache_range(pte, sizeof(*pte));
 
        pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-       qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+       if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
+               qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+       else
+               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
 
        /* Device IOTLB doesn't need to be flushed in caching mode. */
        if (!cap_caching_mode(iommu->cap))
index 5ff61c3..c11bc8b 100644 (file)
@@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
        return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
 }
 
+/* Get PGTT field of a PASID table entry */
+static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
+{
+       return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
+}
+
 extern unsigned int intel_pasid_max_id;
 int intel_pasid_alloc_table(struct device *dev);
 void intel_pasid_free_table(struct device *dev);
index 9b0f22b..4b9b3f3 100644 (file)
@@ -675,7 +675,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
                        kfree_rcu(sdev, rcu);
 
                        if (list_empty(&svm->devs)) {
-                               intel_svm_free_pasid(mm);
                                if (svm->notifier.ops) {
                                        mmu_notifier_unregister(&svm->notifier, mm);
                                        /* Clear mm's pasid. */
@@ -690,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
                                kfree(svm);
                        }
                }
+               /* Drop a PASID reference and free it if no reference. */
+               intel_svm_free_pasid(mm);
        }
 out:
        return ret;
index 5419c4b..63f0af1 100644 (file)
@@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev)
        struct iommu_group *group = dev->iommu_group;
        struct group_device *tmp_device, *device = NULL;
 
+       if (!group)
+               return;
+
        dev_info(dev, "Removing from iommu group %d\n", group->id);
 
        /* Pre-notify listeners that a device is being removed. */
index 6019e58..83df387 100644 (file)
@@ -90,7 +90,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        struct zpci_dev *zdev = to_zpci_dev(dev);
        struct s390_domain_device *domain_device;
        unsigned long flags;
-       int rc;
+       int cc, rc;
 
        if (!zdev)
                return -ENODEV;
@@ -99,14 +99,21 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        if (!domain_device)
                return -ENOMEM;
 
-       if (zdev->dma_table)
-               zpci_dma_exit_device(zdev);
+       if (zdev->dma_table) {
+               cc = zpci_dma_exit_device(zdev);
+               if (cc) {
+                       rc = -EIO;
+                       goto out_free;
+               }
+       }
 
        zdev->dma_table = s390_domain->dma_table;
-       rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+       cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
                                (u64) zdev->dma_table);
-       if (rc)
+       if (cc) {
+               rc = -EIO;
                goto out_restore;
+       }
 
        spin_lock_irqsave(&s390_domain->list_lock, flags);
        /* First device defines the DMA range limits */
@@ -130,6 +137,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
 
 out_restore:
        zpci_dma_init_device(zdev);
+out_free:
        kfree(domain_device);
 
        return rc;
index 3461b0a..cbfdade 100644 (file)
@@ -89,16 +89,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
        free_irq(tpci200->info->pdev->irq, (void *) tpci200);
 
        pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
-       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
 
        pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-       pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
 
        pci_disable_device(tpci200->info->pdev);
-       pci_dev_put(tpci200->info->pdev);
 }
 
 static void tpci200_enable_irq(struct tpci200_board *tpci200,
@@ -257,7 +254,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_disable_pci;
+               goto err_disable_device;
        }
 
        /* Request IO ID INT space (Bar 3) */
@@ -269,7 +266,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ip_space;
+               goto err_ip_interface_bar;
        }
 
        /* Request MEM8 space (Bar 5) */
@@ -280,7 +277,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ioid_int_space;
+               goto err_io_id_int_spaces_bar;
        }
 
        /* Request MEM16 space (Bar 4) */
@@ -291,7 +288,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_mem8_space;
+               goto err_mem8_space_bar;
        }
 
        /* Map internal tpci200 driver user space */
@@ -305,7 +302,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
                res = -ENOMEM;
-               goto out_release_mem8_space;
+               goto err_mem16_space_bar;
        }
 
        /* Initialize lock that protects interface_regs */
@@ -344,18 +341,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) unable to register IRQ !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ioid_int_space;
+               goto err_interface_regs;
        }
 
        return 0;
 
-out_release_mem8_space:
+err_interface_regs:
+       pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
+err_mem16_space_bar:
+       pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
+err_mem8_space_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-out_release_ioid_int_space:
+err_io_id_int_spaces_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
-out_release_ip_space:
+err_ip_interface_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
-out_disable_pci:
+err_disable_device:
        pci_disable_device(tpci200->info->pdev);
        return res;
 }
@@ -527,7 +528,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
        if (!tpci200->info) {
                ret = -ENOMEM;
-               goto out_err_info;
+               goto err_tpci200;
        }
 
        pci_dev_get(pdev);
@@ -538,7 +539,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
                ret = -EBUSY;
-               goto out_err_pci_request;
+               goto err_tpci200_info;
        }
        tpci200->info->cfg_regs = ioremap(
                        pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
@@ -546,7 +547,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (!tpci200->info->cfg_regs) {
                dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
                ret = -EFAULT;
-               goto out_err_ioremap;
+               goto err_request_region;
        }
 
        /* Disable byte swapping for 16 bit IP module access. This will ensure
@@ -569,7 +570,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "error during tpci200 install\n");
                ret = -ENODEV;
-               goto out_err_install;
+               goto err_cfg_regs;
        }
 
        /* Register the carrier in the industry pack bus driver */
@@ -581,7 +582,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev,
                        "error registering the carrier on ipack driver\n");
                ret = -EFAULT;
-               goto out_err_bus_register;
+               goto err_tpci200_install;
        }
 
        /* save the bus number given by ipack to logging purpose */
@@ -592,19 +593,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
                tpci200_create_device(tpci200, i);
        return 0;
 
-out_err_bus_register:
+err_tpci200_install:
        tpci200_uninstall(tpci200);
-       /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
-       tpci200->info->cfg_regs = NULL;
-out_err_install:
-       if (tpci200->info->cfg_regs)
-               iounmap(tpci200->info->cfg_regs);
-out_err_ioremap:
+err_cfg_regs:
+       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+err_request_region:
        pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
-out_err_pci_request:
-       pci_dev_put(pdev);
+err_tpci200_info:
        kfree(tpci200->info);
-out_err_info:
+       pci_dev_put(pdev);
+err_tpci200:
        kfree(tpci200);
        return ret;
 }
@@ -614,6 +612,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
        ipack_bus_unregister(tpci200->info->ipack_bus);
        tpci200_uninstall(tpci200);
 
+       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+
+       pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
+
+       pci_dev_put(tpci200->info->pdev);
+
        kfree(tpci200->info);
        kfree(tpci200);
 }
index ede02dc..5ddb8e5 100644 (file)
@@ -267,9 +267,7 @@ static int alpine_msix_init(struct device_node *node,
                goto err_priv;
        }
 
-       priv->msi_map = kcalloc(BITS_TO_LONGS(priv->num_spis),
-                               sizeof(*priv->msi_map),
-                               GFP_KERNEL);
+       priv->msi_map = bitmap_zalloc(priv->num_spis, GFP_KERNEL);
        if (!priv->msi_map) {
                ret = -ENOMEM;
                goto err_priv;
@@ -285,7 +283,7 @@ static int alpine_msix_init(struct device_node *node,
        return 0;
 
 err_map:
-       kfree(priv->msi_map);
+       bitmap_free(priv->msi_map);
 err_priv:
        kfree(priv);
        return ret;
index b8c06bd..6fc145a 100644 (file)
@@ -226,7 +226,7 @@ static void aic_irq_eoi(struct irq_data *d)
         * Reading the interrupt reason automatically acknowledges and masks
         * the IRQ, so we just unmask it here if needed.
         */
-       if (!irqd_irq_disabled(d) && !irqd_irq_masked(d))
+       if (!irqd_irq_masked(d))
                aic_irq_unmask(d);
 }
 
index be9ea6f..9349fc6 100644 (file)
@@ -269,7 +269,7 @@ static void gicv2m_teardown(void)
 
        list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
                list_del(&v2m->entry);
-               kfree(v2m->bm);
+               bitmap_free(v2m->bm);
                iounmap(v2m->base);
                of_node_put(to_of_node(v2m->fwnode));
                if (is_fwnode_irqchip(v2m->fwnode))
@@ -386,8 +386,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
                        break;
                }
        }
-       v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
-                         GFP_KERNEL);
+       v2m->bm = bitmap_zalloc(v2m->nr_spis, GFP_KERNEL);
        if (!v2m->bm) {
                ret = -ENOMEM;
                goto err_iounmap;
index ba39668..7f40dca 100644 (file)
@@ -2140,7 +2140,7 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
        if (err)
                goto out;
 
-       bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
+       bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
        if (!bitmap)
                goto out;
 
@@ -2156,7 +2156,7 @@ out:
 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
 {
        WARN_ON(free_lpi_range(base, nr_ids));
-       kfree(bitmap);
+       bitmap_free(bitmap);
 }
 
 static void gic_reset_prop_table(void *va)
@@ -3387,7 +3387,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
                kfree(dev);
                kfree(itt);
-               kfree(lpi_map);
+               bitmap_free(lpi_map);
                kfree(col_map);
                return NULL;
        }
index e81e89a..b84c9c2 100644 (file)
@@ -290,8 +290,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
                if (ret)
                        goto err_free_mbi;
 
-               mbi_ranges[n].bm = kcalloc(BITS_TO_LONGS(mbi_ranges[n].nr_spis),
-                                          sizeof(long), GFP_KERNEL);
+               mbi_ranges[n].bm = bitmap_zalloc(mbi_ranges[n].nr_spis, GFP_KERNEL);
                if (!mbi_ranges[n].bm) {
                        ret = -ENOMEM;
                        goto err_free_mbi;
@@ -329,7 +328,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
 err_free_mbi:
        if (mbi_ranges) {
                for (n = 0; n < mbi_range_nr; n++)
-                       kfree(mbi_ranges[n].bm);
+                       bitmap_free(mbi_ranges[n].bm);
                kfree(mbi_ranges);
        }
 
index e0f4deb..fd4e9a3 100644 (file)
@@ -100,6 +100,27 @@ EXPORT_SYMBOL(gic_pmr_sync);
 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
 EXPORT_SYMBOL(gic_nonsecure_priorities);
 
+/*
+ * When the Non-secure world has access to group 0 interrupts (as a
+ * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
+ * return the Distributor's view of the interrupt priority.
+ *
+ * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
+ * written by software is moved to the Non-secure range by the Distributor.
+ *
+ * If both are true (which is when gic_nonsecure_priorities gets enabled),
+ * we need to shift down the priority programmed by software to match it
+ * against the value returned by ICC_RPR_EL1.
+ */
+#define GICD_INT_RPR_PRI(priority)                                     \
+       ({                                                              \
+               u32 __priority = (priority);                            \
+               if (static_branch_unlikely(&gic_nonsecure_priorities))  \
+                       __priority = 0x80 | (__priority >> 1);          \
+                                                                       \
+               __priority;                                             \
+       })
+
 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
 static refcount_t *ppi_nmi_refs;
 
@@ -446,18 +467,23 @@ static void gic_irq_set_prio(struct irq_data *d, u8 prio)
        writeb_relaxed(prio, base + offset + index);
 }
 
-static u32 gic_get_ppi_index(struct irq_data *d)
+static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
 {
-       switch (get_intid_range(d)) {
+       switch (__get_intid_range(hwirq)) {
        case PPI_RANGE:
-               return d->hwirq - 16;
+               return hwirq - 16;
        case EPPI_RANGE:
-               return d->hwirq - EPPI_BASE_INTID + 16;
+               return hwirq - EPPI_BASE_INTID + 16;
        default:
                unreachable();
        }
 }
 
+static u32 gic_get_ppi_index(struct irq_data *d)
+{
+       return __gic_get_ppi_index(d->hwirq);
+}
+
 static int gic_irq_nmi_setup(struct irq_data *d)
 {
        struct irq_desc *desc = irq_to_desc(d->irq);
@@ -687,7 +713,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
                return;
 
        if (gic_supports_nmi() &&
-           unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
+           unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
                gic_handle_nmi(irqnr, regs);
                return;
        }
@@ -1467,10 +1493,34 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        }
 }
 
+static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
+                                     irq_hw_number_t hwirq)
+{
+       enum gic_intid_range range;
+
+       if (!gic_data.ppi_descs)
+               return false;
+
+       if (!is_of_node(fwspec->fwnode))
+               return false;
+
+       if (fwspec->param_count < 4 || !fwspec->param[3])
+               return false;
+
+       range = __get_intid_range(hwirq);
+       if (range != PPI_RANGE && range != EPPI_RANGE)
+               return false;
+
+       return true;
+}
+
 static int gic_irq_domain_select(struct irq_domain *d,
                                 struct irq_fwspec *fwspec,
                                 enum irq_domain_bus_token bus_token)
 {
+       unsigned int type, ret, ppi_idx;
+       irq_hw_number_t hwirq;
+
        /* Not for us */
         if (fwspec->fwnode != d->fwnode)
                return 0;
@@ -1479,16 +1529,19 @@ static int gic_irq_domain_select(struct irq_domain *d,
        if (!is_of_node(fwspec->fwnode))
                return 1;
 
+       ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
+       if (WARN_ON_ONCE(ret))
+               return 0;
+
+       if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
+               return d == gic_data.domain;
+
        /*
         * If this is a PPI and we have a 4th (non-null) parameter,
         * then we need to match the partition domain.
         */
-       if (fwspec->param_count >= 4 &&
-           fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
-           gic_data.ppi_descs)
-               return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
-
-       return d == gic_data.domain;
+       ppi_idx = __gic_get_ppi_index(hwirq);
+       return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
 }
 
 static const struct irq_domain_ops gic_irq_domain_ops = {
@@ -1503,7 +1556,9 @@ static int partition_domain_translate(struct irq_domain *d,
                                      unsigned long *hwirq,
                                      unsigned int *type)
 {
+       unsigned long ppi_intid;
        struct device_node *np;
+       unsigned int ppi_idx;
        int ret;
 
        if (!gic_data.ppi_descs)
@@ -1513,7 +1568,12 @@ static int partition_domain_translate(struct irq_domain *d,
        if (WARN_ON(!np))
                return -EINVAL;
 
-       ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
+       ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
+       if (WARN_ON_ONCE(ret))
+               return 0;
+
+       ppi_idx = __gic_get_ppi_index(ppi_intid);
+       ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
                                     of_node_to_fwnode(np));
        if (ret < 0)
                return ret;
index f790ca6..a4eb8a2 100644 (file)
@@ -92,18 +92,22 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
        case IRQ_TYPE_EDGE_RISING:
                pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_edge_irq);
                break;
        case IRQ_TYPE_EDGE_FALLING:
                pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_edge_irq);
                break;
        case IRQ_TYPE_LEVEL_HIGH:
                pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_level_irq);
                break;
        case IRQ_TYPE_LEVEL_LOW:
                pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_level_irq);
                break;
        default:
                ret = -EINVAL;
@@ -113,11 +117,24 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
        return ret;
 }
 
+static void pch_pic_ack_irq(struct irq_data *d)
+{
+       unsigned int reg;
+       struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+       reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
+       if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
+               writel(BIT(PIC_REG_BIT(d->hwirq)),
+                       priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+       }
+       irq_chip_ack_parent(d);
+}
+
 static struct irq_chip pch_pic_irq_chip = {
        .name                   = "PCH PIC",
        .irq_mask               = pch_pic_mask_irq,
        .irq_unmask             = pch_pic_unmask_irq,
-       .irq_ack                = irq_chip_ack_parent,
+       .irq_ack                = pch_pic_ack_irq,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
        .irq_set_type           = pch_pic_set_type,
 };
index 55322da..b4927e4 100644 (file)
@@ -362,10 +362,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
 
        msi_data->irqs_num = MSI_IRQS_PER_MSIR *
                             (1 << msi_data->cfg->ibs_shift);
-       msi_data->used = devm_kcalloc(&pdev->dev,
-                                   BITS_TO_LONGS(msi_data->irqs_num),
-                                   sizeof(*msi_data->used),
-                                   GFP_KERNEL);
+       msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL);
        if (!msi_data->used)
                return -ENOMEM;
        /*
index 6ff98b8..586e52d 100644 (file)
@@ -65,6 +65,7 @@ static struct irq_chip mtk_sysirq_chip = {
        .irq_set_type           = mtk_sysirq_set_type,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
 static int mtk_sysirq_domain_translate(struct irq_domain *d,
index 3be5c5d..fe88a78 100644 (file)
@@ -210,9 +210,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
                gicp->spi_cnt += gicp->spi_ranges[i].count;
        }
 
-       gicp->spi_bitmap = devm_kcalloc(&pdev->dev,
-                               BITS_TO_LONGS(gicp->spi_cnt), sizeof(long),
-                               GFP_KERNEL);
+       gicp->spi_bitmap = devm_bitmap_zalloc(&pdev->dev, gicp->spi_cnt, GFP_KERNEL);
        if (!gicp->spi_bitmap)
                return -ENOMEM;
 
index b4d3678..dc4145a 100644 (file)
@@ -171,8 +171,7 @@ static int __init mvebu_odmi_init(struct device_node *node,
        if (!odmis)
                return -ENOMEM;
 
-       odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
-                          sizeof(long), GFP_KERNEL);
+       odmis_bm = bitmap_zalloc(odmis_count * NODMIS_PER_FRAME, GFP_KERNEL);
        if (!odmis_bm) {
                ret = -ENOMEM;
                goto err_alloc;
@@ -227,7 +226,7 @@ err_unmap:
                if (odmi->base && !IS_ERR(odmi->base))
                        iounmap(odmis[i].base);
        }
-       kfree(odmis_bm);
+       bitmap_free(odmis_bm);
 err_alloc:
        kfree(odmis);
        return ret;
index 89c23a1..8e76d29 100644 (file)
@@ -215,8 +215,7 @@ struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
                goto out;
        desc->domain = d;
 
-       desc->bitmap = kcalloc(BITS_TO_LONGS(nr_parts), sizeof(long),
-                              GFP_KERNEL);
+       desc->bitmap = bitmap_zalloc(nr_parts, GFP_KERNEL);
        if (WARN_ON(!desc->bitmap))
                goto out;
 
index 32d5920..173e652 100644 (file)
@@ -53,26 +53,6 @@ static u32 pdc_reg_read(int reg, u32 i)
        return readl_relaxed(pdc_base + reg + i * sizeof(u32));
 }
 
-static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
-                                         enum irqchip_irq_state which,
-                                         bool *state)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return 0;
-
-       return irq_chip_get_parent_state(d, which, state);
-}
-
-static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
-                                         enum irqchip_irq_state which,
-                                         bool value)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return 0;
-
-       return irq_chip_set_parent_state(d, which, value);
-}
-
 static void pdc_enable_intr(struct irq_data *d, bool on)
 {
        int pin_out = d->hwirq;
@@ -91,38 +71,16 @@ static void pdc_enable_intr(struct irq_data *d, bool on)
 
 static void qcom_pdc_gic_disable(struct irq_data *d)
 {
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
        pdc_enable_intr(d, false);
        irq_chip_disable_parent(d);
 }
 
 static void qcom_pdc_gic_enable(struct irq_data *d)
 {
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
        pdc_enable_intr(d, true);
        irq_chip_enable_parent(d);
 }
 
-static void qcom_pdc_gic_mask(struct irq_data *d)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
-       irq_chip_mask_parent(d);
-}
-
-static void qcom_pdc_gic_unmask(struct irq_data *d)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
-       irq_chip_unmask_parent(d);
-}
-
 /*
  * GIC does not handle falling edge or active low. To allow falling edge and
  * active low interrupts to be handled at GIC, PDC has an inverter that inverts
@@ -159,14 +117,10 @@ enum pdc_irq_config_bits {
  */
 static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
 {
-       int pin_out = d->hwirq;
        enum pdc_irq_config_bits pdc_type;
        enum pdc_irq_config_bits old_pdc_type;
        int ret;
 
-       if (pin_out == GPIO_NO_WAKE_IRQ)
-               return 0;
-
        switch (type) {
        case IRQ_TYPE_EDGE_RISING:
                pdc_type = PDC_EDGE_RISING;
@@ -191,8 +145,8 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
                return -EINVAL;
        }
 
-       old_pdc_type = pdc_reg_read(IRQ_i_CFG, pin_out);
-       pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type);
+       old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq);
+       pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type);
 
        ret = irq_chip_set_type_parent(d, type);
        if (ret)
@@ -216,12 +170,12 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
 static struct irq_chip qcom_pdc_gic_chip = {
        .name                   = "PDC",
        .irq_eoi                = irq_chip_eoi_parent,
-       .irq_mask               = qcom_pdc_gic_mask,
-       .irq_unmask             = qcom_pdc_gic_unmask,
+       .irq_mask               = irq_chip_mask_parent,
+       .irq_unmask             = irq_chip_unmask_parent,
        .irq_disable            = qcom_pdc_gic_disable,
        .irq_enable             = qcom_pdc_gic_enable,
-       .irq_get_irqchip_state  = qcom_pdc_gic_get_irqchip_state,
-       .irq_set_irqchip_state  = qcom_pdc_gic_set_irqchip_state,
+       .irq_get_irqchip_state  = irq_chip_get_parent_state,
+       .irq_set_irqchip_state  = irq_chip_set_parent_state,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_type           = qcom_pdc_gic_set_type,
        .flags                  = IRQCHIP_MASK_ON_SUSPEND |
@@ -282,7 +236,7 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
 
        parent_hwirq = get_parent_hwirq(hwirq);
        if (parent_hwirq == PDC_NO_PARENT_IRQ)
-               return 0;
+               return irq_domain_disconnect_hierarchy(domain->parent, virq);
 
        if (type & IRQ_TYPE_EDGE_BOTH)
                type = IRQ_TYPE_EDGE_RISING;
@@ -319,17 +273,17 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
        if (ret)
                return ret;
 
+       if (hwirq == GPIO_NO_WAKE_IRQ)
+               return irq_domain_disconnect_hierarchy(domain, virq);
+
        ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
                                            &qcom_pdc_gic_chip, NULL);
        if (ret)
                return ret;
 
-       if (hwirq == GPIO_NO_WAKE_IRQ)
-               return 0;
-
        parent_hwirq = get_parent_hwirq(hwirq);
        if (parent_hwirq == PDC_NO_PARENT_IRQ)
-               return 0;
+               return irq_domain_disconnect_hierarchy(domain->parent, virq);
 
        if (type & IRQ_TYPE_EDGE_BOTH)
                type = IRQ_TYPE_EDGE_RISING;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
deleted file mode 100644 (file)
index 04caa0f..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Open-Channel SSD NVM configuration
-#
-
-menuconfig NVM
-       bool "Open-Channel SSD target support (DEPRECATED)"
-       depends on BLOCK
-       help
-         Say Y here to get to enable Open-channel SSDs.
-
-         Open-Channel SSDs implement a set of extension to SSDs, that
-         exposes direct access to the underlying non-volatile memory.
-
-         If you say N, all options in this submenu will be skipped and disabled
-         only do this if you know what you are doing.
-
-         This code is deprecated and will be removed in Linux 5.15.
-
-if NVM
-
-config NVM_PBLK
-       tristate "Physical Block Device Open-Channel SSD target"
-       select CRC32
-       help
-         Allows an open-channel SSD to be exposed as a block device to the
-         host. The target assumes the device exposes raw flash and must be
-         explicitly managed by the host.
-
-         Please note the disk format is considered EXPERIMENTAL for now.
-
-if NVM_PBLK
-
-config NVM_PBLK_DEBUG
-       bool "PBlk Debug Support"
-       default n
-       help
-         Enables debug support for pblk. This includes extra checks, more
-         vocal error messages, and extra tracking fields in the pblk sysfs
-         entries.
-
-endif # NVM_PBLK_DEBUG
-
-endif # NVM
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
deleted file mode 100644 (file)
index 97d9d7c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Open-Channel SSDs.
-#
-
-obj-$(CONFIG_NVM)              := core.o
-obj-$(CONFIG_NVM_PBLK)         += pblk.o
-pblk-y                         := pblk-init.o pblk-core.o pblk-rb.o \
-                                  pblk-write.o pblk-cache.o pblk-read.o \
-                                  pblk-gc.o pblk-recovery.o pblk-map.o \
-                                  pblk-rl.o pblk-sysfs.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
deleted file mode 100644 (file)
index cf8a754..0000000
+++ /dev/null
@@ -1,1440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
- * Initial release: Matias Bjorling <m@bjorling.me>
- */
-
-#define pr_fmt(fmt) "nvm: " fmt
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/sem.h>
-#include <linux/bitmap.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/miscdevice.h>
-#include <linux/lightnvm.h>
-#include <linux/sched/sysctl.h>
-
-static LIST_HEAD(nvm_tgt_types);
-static DECLARE_RWSEM(nvm_tgtt_lock);
-static LIST_HEAD(nvm_devices);
-static DECLARE_RWSEM(nvm_lock);
-
-/* Map between virtual and physical channel and lun */
-struct nvm_ch_map {
-       int ch_off;
-       int num_lun;
-       int *lun_offs;
-};
-
-struct nvm_dev_map {
-       struct nvm_ch_map *chnls;
-       int num_ch;
-};
-
-static void nvm_free(struct kref *ref);
-
-static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
-{
-       struct nvm_target *tgt;
-
-       list_for_each_entry(tgt, &dev->targets, list)
-               if (!strcmp(name, tgt->disk->disk_name))
-                       return tgt;
-
-       return NULL;
-}
-
-static bool nvm_target_exists(const char *name)
-{
-       struct nvm_dev *dev;
-       struct nvm_target *tgt;
-       bool ret = false;
-
-       down_write(&nvm_lock);
-       list_for_each_entry(dev, &nvm_devices, devices) {
-               mutex_lock(&dev->mlock);
-               list_for_each_entry(tgt, &dev->targets, list) {
-                       if (!strcmp(name, tgt->disk->disk_name)) {
-                               ret = true;
-                               mutex_unlock(&dev->mlock);
-                               goto out;
-                       }
-               }
-               mutex_unlock(&dev->mlock);
-       }
-
-out:
-       up_write(&nvm_lock);
-       return ret;
-}
-
-static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
-{
-       int i;
-
-       for (i = lun_begin; i <= lun_end; i++) {
-               if (test_and_set_bit(i, dev->lun_map)) {
-                       pr_err("lun %d already allocated\n", i);
-                       goto err;
-               }
-       }
-
-       return 0;
-err:
-       while (--i >= lun_begin)
-               clear_bit(i, dev->lun_map);
-
-       return -EBUSY;
-}
-
-static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
-                                int lun_end)
-{
-       int i;
-
-       for (i = lun_begin; i <= lun_end; i++)
-               WARN_ON(!test_and_clear_bit(i, dev->lun_map));
-}
-
-static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_dev_map *dev_map = tgt_dev->map;
-       int i, j;
-
-       for (i = 0; i < dev_map->num_ch; i++) {
-               struct nvm_ch_map *ch_map = &dev_map->chnls[i];
-               int *lun_offs = ch_map->lun_offs;
-               int ch = i + ch_map->ch_off;
-
-               if (clear) {
-                       for (j = 0; j < ch_map->num_lun; j++) {
-                               int lun = j + lun_offs[j];
-                               int lunid = (ch * dev->geo.num_lun) + lun;
-
-                               WARN_ON(!test_and_clear_bit(lunid,
-                                                       dev->lun_map));
-                       }
-               }
-
-               kfree(ch_map->lun_offs);
-       }
-
-       kfree(dev_map->chnls);
-       kfree(dev_map);
-
-       kfree(tgt_dev->luns);
-       kfree(tgt_dev);
-}
-
-static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
-                                             u16 lun_begin, u16 lun_end,
-                                             u16 op)
-{
-       struct nvm_tgt_dev *tgt_dev = NULL;
-       struct nvm_dev_map *dev_rmap = dev->rmap;
-       struct nvm_dev_map *dev_map;
-       struct ppa_addr *luns;
-       int num_lun = lun_end - lun_begin + 1;
-       int luns_left = num_lun;
-       int num_ch = num_lun / dev->geo.num_lun;
-       int num_ch_mod = num_lun % dev->geo.num_lun;
-       int bch = lun_begin / dev->geo.num_lun;
-       int blun = lun_begin % dev->geo.num_lun;
-       int lunid = 0;
-       int lun_balanced = 1;
-       int sec_per_lun, prev_num_lun;
-       int i, j;
-
-       num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
-
-       dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
-       if (!dev_map)
-               goto err_dev;
-
-       dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
-       if (!dev_map->chnls)
-               goto err_chnls;
-
-       luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
-       if (!luns)
-               goto err_luns;
-
-       prev_num_lun = (luns_left > dev->geo.num_lun) ?
-                                       dev->geo.num_lun : luns_left;
-       for (i = 0; i < num_ch; i++) {
-               struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
-               int *lun_roffs = ch_rmap->lun_offs;
-               struct nvm_ch_map *ch_map = &dev_map->chnls[i];
-               int *lun_offs;
-               int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
-                                       dev->geo.num_lun : luns_left;
-
-               if (lun_balanced && prev_num_lun != luns_in_chnl)
-                       lun_balanced = 0;
-
-               ch_map->ch_off = ch_rmap->ch_off = bch;
-               ch_map->num_lun = luns_in_chnl;
-
-               lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
-               if (!lun_offs)
-                       goto err_ch;
-
-               for (j = 0; j < luns_in_chnl; j++) {
-                       luns[lunid].ppa = 0;
-                       luns[lunid].a.ch = i;
-                       luns[lunid++].a.lun = j;
-
-                       lun_offs[j] = blun;
-                       lun_roffs[j + blun] = blun;
-               }
-
-               ch_map->lun_offs = lun_offs;
-
-               /* when starting a new channel, lun offset is reset */
-               blun = 0;
-               luns_left -= luns_in_chnl;
-       }
-
-       dev_map->num_ch = num_ch;
-
-       tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
-       if (!tgt_dev)
-               goto err_ch;
-
-       /* Inherit device geometry from parent */
-       memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
-
-       /* Target device only owns a portion of the physical device */
-       tgt_dev->geo.num_ch = num_ch;
-       tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
-       tgt_dev->geo.all_luns = num_lun;
-       tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
-
-       tgt_dev->geo.op = op;
-
-       sec_per_lun = dev->geo.clba * dev->geo.num_chk;
-       tgt_dev->geo.total_secs = num_lun * sec_per_lun;
-
-       tgt_dev->q = dev->q;
-       tgt_dev->map = dev_map;
-       tgt_dev->luns = luns;
-       tgt_dev->parent = dev;
-
-       return tgt_dev;
-err_ch:
-       while (--i >= 0)
-               kfree(dev_map->chnls[i].lun_offs);
-       kfree(luns);
-err_luns:
-       kfree(dev_map->chnls);
-err_chnls:
-       kfree(dev_map);
-err_dev:
-       return tgt_dev;
-}
-
-static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
-{
-       struct nvm_tgt_type *tt;
-
-       list_for_each_entry(tt, &nvm_tgt_types, list)
-               if (!strcmp(name, tt->name))
-                       return tt;
-
-       return NULL;
-}
-
-static struct nvm_tgt_type *nvm_find_target_type(const char *name)
-{
-       struct nvm_tgt_type *tt;
-
-       down_write(&nvm_tgtt_lock);
-       tt = __nvm_find_target_type(name);
-       up_write(&nvm_tgtt_lock);
-
-       return tt;
-}
-
-static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
-                                int lun_end)
-{
-       if (lun_begin > lun_end || lun_end >= geo->all_luns) {
-               pr_err("lun out of bound (%u:%u > %u)\n",
-                       lun_begin, lun_end, geo->all_luns - 1);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int __nvm_config_simple(struct nvm_dev *dev,
-                              struct nvm_ioctl_create_simple *s)
-{
-       struct nvm_geo *geo = &dev->geo;
-
-       if (s->lun_begin == -1 && s->lun_end == -1) {
-               s->lun_begin = 0;
-               s->lun_end = geo->all_luns - 1;
-       }
-
-       return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
-}
-
-static int __nvm_config_extended(struct nvm_dev *dev,
-                                struct nvm_ioctl_create_extended *e)
-{
-       if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
-               e->lun_begin = 0;
-               e->lun_end = dev->geo.all_luns - 1;
-       }
-
-       /* op not set falls into target's default */
-       if (e->op == 0xFFFF) {
-               e->op = NVM_TARGET_DEFAULT_OP;
-       } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
-               pr_err("invalid over provisioning value\n");
-               return -EINVAL;
-       }
-
-       return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
-}
-
-static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
-{
-       struct nvm_ioctl_create_extended e;
-       struct gendisk *tdisk;
-       struct nvm_tgt_type *tt;
-       struct nvm_target *t;
-       struct nvm_tgt_dev *tgt_dev;
-       void *targetdata;
-       unsigned int mdts;
-       int ret;
-
-       switch (create->conf.type) {
-       case NVM_CONFIG_TYPE_SIMPLE:
-               ret = __nvm_config_simple(dev, &create->conf.s);
-               if (ret)
-                       return ret;
-
-               e.lun_begin = create->conf.s.lun_begin;
-               e.lun_end = create->conf.s.lun_end;
-               e.op = NVM_TARGET_DEFAULT_OP;
-               break;
-       case NVM_CONFIG_TYPE_EXTENDED:
-               ret = __nvm_config_extended(dev, &create->conf.e);
-               if (ret)
-                       return ret;
-
-               e = create->conf.e;
-               break;
-       default:
-               pr_err("config type not valid\n");
-               return -EINVAL;
-       }
-
-       tt = nvm_find_target_type(create->tgttype);
-       if (!tt) {
-               pr_err("target type %s not found\n", create->tgttype);
-               return -EINVAL;
-       }
-
-       if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
-               pr_err("device is incompatible with target L2P type.\n");
-               return -EINVAL;
-       }
-
-       if (nvm_target_exists(create->tgtname)) {
-               pr_err("target name already exists (%s)\n",
-                                                       create->tgtname);
-               return -EINVAL;
-       }
-
-       ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
-       if (ret)
-               return ret;
-
-       t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
-       if (!t) {
-               ret = -ENOMEM;
-               goto err_reserve;
-       }
-
-       tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
-       if (!tgt_dev) {
-               pr_err("could not create target device\n");
-               ret = -ENOMEM;
-               goto err_t;
-       }
-
-       tdisk = blk_alloc_disk(dev->q->node);
-       if (!tdisk) {
-               ret = -ENOMEM;
-               goto err_dev;
-       }
-
-       strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
-       tdisk->major = 0;
-       tdisk->first_minor = 0;
-       tdisk->fops = tt->bops;
-
-       targetdata = tt->init(tgt_dev, tdisk, create->flags);
-       if (IS_ERR(targetdata)) {
-               ret = PTR_ERR(targetdata);
-               goto err_init;
-       }
-
-       tdisk->private_data = targetdata;
-       tdisk->queue->queuedata = targetdata;
-
-       mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
-       if (dev->geo.mdts) {
-               mdts = min_t(u32, dev->geo.mdts,
-                               (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
-       }
-       blk_queue_max_hw_sectors(tdisk->queue, mdts);
-
-       set_capacity(tdisk, tt->capacity(targetdata));
-       add_disk(tdisk);
-
-       if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
-               ret = -ENOMEM;
-               goto err_sysfs;
-       }
-
-       t->type = tt;
-       t->disk = tdisk;
-       t->dev = tgt_dev;
-
-       mutex_lock(&dev->mlock);
-       list_add_tail(&t->list, &dev->targets);
-       mutex_unlock(&dev->mlock);
-
-       __module_get(tt->owner);
-
-       return 0;
-err_sysfs:
-       if (tt->exit)
-               tt->exit(targetdata, true);
-err_init:
-       blk_cleanup_disk(tdisk);
-err_dev:
-       nvm_remove_tgt_dev(tgt_dev, 0);
-err_t:
-       kfree(t);
-err_reserve:
-       nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
-       return ret;
-}
-
-static void __nvm_remove_target(struct nvm_target *t, bool graceful)
-{
-       struct nvm_tgt_type *tt = t->type;
-       struct gendisk *tdisk = t->disk;
-
-       del_gendisk(tdisk);
-
-       if (tt->sysfs_exit)
-               tt->sysfs_exit(tdisk);
-
-       if (tt->exit)
-               tt->exit(tdisk->private_data, graceful);
-
-       nvm_remove_tgt_dev(t->dev, 1);
-       blk_cleanup_disk(tdisk);
-       module_put(t->type->owner);
-
-       list_del(&t->list);
-       kfree(t);
-}
-
-/**
- * nvm_remove_tgt - Removes a target from the media manager
- * @remove:    ioctl structure with target name to remove.
- *
- * Returns:
- * 0: on success
- * 1: on not found
- * <0: on error
- */
-static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
-{
-       struct nvm_target *t = NULL;
-       struct nvm_dev *dev;
-
-       down_read(&nvm_lock);
-       list_for_each_entry(dev, &nvm_devices, devices) {
-               mutex_lock(&dev->mlock);
-               t = nvm_find_target(dev, remove->tgtname);
-               if (t) {
-                       mutex_unlock(&dev->mlock);
-                       break;
-               }
-               mutex_unlock(&dev->mlock);
-       }
-       up_read(&nvm_lock);
-
-       if (!t) {
-               pr_err("failed to remove target %s\n",
-                               remove->tgtname);
-               return 1;
-       }
-
-       __nvm_remove_target(t, true);
-       kref_put(&dev->ref, nvm_free);
-
-       return 0;
-}
-
-static int nvm_register_map(struct nvm_dev *dev)
-{
-       struct nvm_dev_map *rmap;
-       int i, j;
-
-       rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
-       if (!rmap)
-               goto err_rmap;
-
-       rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
-                                                               GFP_KERNEL);
-       if (!rmap->chnls)
-               goto err_chnls;
-
-       for (i = 0; i < dev->geo.num_ch; i++) {
-               struct nvm_ch_map *ch_rmap;
-               int *lun_roffs;
-               int luns_in_chnl = dev->geo.num_lun;
-
-               ch_rmap = &rmap->chnls[i];
-
-               ch_rmap->ch_off = -1;
-               ch_rmap->num_lun = luns_in_chnl;
-
-               lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
-               if (!lun_roffs)
-                       goto err_ch;
-
-               for (j = 0; j < luns_in_chnl; j++)
-                       lun_roffs[j] = -1;
-
-               ch_rmap->lun_offs = lun_roffs;
-       }
-
-       dev->rmap = rmap;
-
-       return 0;
-err_ch:
-       while (--i >= 0)
-               kfree(rmap->chnls[i].lun_offs);
-err_chnls:
-       kfree(rmap);
-err_rmap:
-       return -ENOMEM;
-}
-
-static void nvm_unregister_map(struct nvm_dev *dev)
-{
-       struct nvm_dev_map *rmap = dev->rmap;
-       int i;
-
-       for (i = 0; i < dev->geo.num_ch; i++)
-               kfree(rmap->chnls[i].lun_offs);
-
-       kfree(rmap->chnls);
-       kfree(rmap);
-}
-
-static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
-{
-       struct nvm_dev_map *dev_map = tgt_dev->map;
-       struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
-       int lun_off = ch_map->lun_offs[p->a.lun];
-
-       p->a.ch += ch_map->ch_off;
-       p->a.lun += lun_off;
-}
-
-static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_dev_map *dev_rmap = dev->rmap;
-       struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
-       int lun_roff = ch_rmap->lun_offs[p->a.lun];
-
-       p->a.ch -= ch_rmap->ch_off;
-       p->a.lun -= lun_roff;
-}
-
-static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
-                               struct ppa_addr *ppa_list, int nr_ppas)
-{
-       int i;
-
-       for (i = 0; i < nr_ppas; i++) {
-               nvm_map_to_dev(tgt_dev, &ppa_list[i]);
-               ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
-       }
-}
-
-static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
-                               struct ppa_addr *ppa_list, int nr_ppas)
-{
-       int i;
-
-       for (i = 0; i < nr_ppas; i++) {
-               ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
-               nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
-       }
-}
-
-static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
-}
-
-static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
-}
-
-int nvm_register_tgt_type(struct nvm_tgt_type *tt)
-{
-       int ret = 0;
-
-       down_write(&nvm_tgtt_lock);
-       if (__nvm_find_target_type(tt->name))
-               ret = -EEXIST;
-       else
-               list_add(&tt->list, &nvm_tgt_types);
-       up_write(&nvm_tgtt_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(nvm_register_tgt_type);
-
-void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
-{
-       if (!tt)
-               return;
-
-       down_write(&nvm_tgtt_lock);
-       list_del(&tt->list);
-       up_write(&nvm_tgtt_lock);
-}
-EXPORT_SYMBOL(nvm_unregister_tgt_type);
-
-void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
-                                                       dma_addr_t *dma_handler)
-{
-       return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
-                                                               dma_handler);
-}
-EXPORT_SYMBOL(nvm_dev_dma_alloc);
-
-void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
-{
-       dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
-}
-EXPORT_SYMBOL(nvm_dev_dma_free);
-
-static struct nvm_dev *nvm_find_nvm_dev(const char *name)
-{
-       struct nvm_dev *dev;
-
-       list_for_each_entry(dev, &nvm_devices, devices)
-               if (!strcmp(name, dev->name))
-                       return dev;
-
-       return NULL;
-}
-
-static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
-                       const struct ppa_addr *ppas, int nr_ppas)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_geo *geo = &tgt_dev->geo;
-       int i, plane_cnt, pl_idx;
-       struct ppa_addr ppa;
-
-       if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
-               rqd->nr_ppas = nr_ppas;
-               rqd->ppa_addr = ppas[0];
-
-               return 0;
-       }
-
-       rqd->nr_ppas = nr_ppas;
-       rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
-       if (!rqd->ppa_list) {
-               pr_err("failed to allocate dma memory\n");
-               return -ENOMEM;
-       }
-
-       plane_cnt = geo->pln_mode;
-       rqd->nr_ppas *= plane_cnt;
-
-       for (i = 0; i < nr_ppas; i++) {
-               for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
-                       ppa = ppas[i];
-                       ppa.g.pl = pl_idx;
-                       rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
-               }
-       }
-
-       return 0;
-}
-
-static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
-                       struct nvm_rq *rqd)
-{
-       if (!rqd->ppa_list)
-               return;
-
-       nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-}
-
-static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
-{
-       int flags = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_20)
-               return 0;
-
-       if (rqd->is_seq)
-               flags |= geo->pln_mode >> 1;
-
-       if (rqd->opcode == NVM_OP_PREAD)
-               flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
-       else if (rqd->opcode == NVM_OP_PWRITE)
-               flags |= NVM_IO_SCRAMBLE_ENABLE;
-
-       return flags;
-}
-
-int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       int ret;
-
-       if (!dev->ops->submit_io)
-               return -ENODEV;
-
-       nvm_rq_tgt_to_dev(tgt_dev, rqd);
-
-       rqd->dev = tgt_dev;
-       rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
-
-       /* In case of error, fail with right address format */
-       ret = dev->ops->submit_io(dev, rqd, buf);
-       if (ret)
-               nvm_rq_dev_to_tgt(tgt_dev, rqd);
-       return ret;
-}
-EXPORT_SYMBOL(nvm_submit_io);
-
-static void nvm_sync_end_io(struct nvm_rq *rqd)
-{
-       struct completion *waiting = rqd->private;
-
-       complete(waiting);
-}
-
-static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
-                             void *buf)
-{
-       DECLARE_COMPLETION_ONSTACK(wait);
-       int ret = 0;
-
-       rqd->end_io = nvm_sync_end_io;
-       rqd->private = &wait;
-
-       ret = dev->ops->submit_io(dev, rqd, buf);
-       if (ret)
-               return ret;
-
-       wait_for_completion_io(&wait);
-
-       return 0;
-}
-
-int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
-                      void *buf)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       int ret;
-
-       if (!dev->ops->submit_io)
-               return -ENODEV;
-
-       nvm_rq_tgt_to_dev(tgt_dev, rqd);
-
-       rqd->dev = tgt_dev;
-       rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
-
-       ret = nvm_submit_io_wait(dev, rqd, buf);
-
-       return ret;
-}
-EXPORT_SYMBOL(nvm_submit_io_sync);
-
-void nvm_end_io(struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *tgt_dev = rqd->dev;
-
-       /* Convert address space */
-       if (tgt_dev)
-               nvm_rq_dev_to_tgt(tgt_dev, rqd);
-
-       if (rqd->end_io)
-               rqd->end_io(rqd);
-}
-EXPORT_SYMBOL(nvm_end_io);
-
-static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
-       if (!dev->ops->submit_io)
-               return -ENODEV;
-
-       rqd->dev = NULL;
-       rqd->flags = nvm_set_flags(&dev->geo, rqd);
-
-       return nvm_submit_io_wait(dev, rqd, NULL);
-}
-
-static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
-{
-       struct nvm_rq rqd = { NULL };
-       struct bio bio;
-       struct bio_vec bio_vec;
-       struct page *page;
-       int ret;
-
-       page = alloc_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
-
-       bio_init(&bio, &bio_vec, 1);
-       bio_add_page(&bio, page, PAGE_SIZE, 0);
-       bio_set_op_attrs(&bio, REQ_OP_READ, 0);
-
-       rqd.bio = &bio;
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.is_seq = 1;
-       rqd.nr_ppas = 1;
-       rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
-
-       ret = nvm_submit_io_sync_raw(dev, &rqd);
-       __free_page(page);
-       if (ret)
-               return ret;
-
-       return rqd.error;
-}
-
-/*
- * Scans a 1.2 chunk first and last page to determine if its state.
- * If the chunk is found to be open, also scan it to update the write
- * pointer.
- */
-static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
-                            struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret, pg, pl;
-
-       /* sense first page */
-       ret = nvm_bb_chunk_sense(dev, ppa);
-       if (ret < 0) /* io error */
-               return ret;
-       else if (ret == 0) /* valid data */
-               meta->state = NVM_CHK_ST_OPEN;
-       else if (ret > 0) {
-               /*
-                * If empty page, the chunk is free, else it is an
-                * actual io error. In that case, mark it offline.
-                */
-               switch (ret) {
-               case NVM_RSP_ERR_EMPTYPAGE:
-                       meta->state = NVM_CHK_ST_FREE;
-                       return 0;
-               case NVM_RSP_ERR_FAILCRC:
-               case NVM_RSP_ERR_FAILECC:
-               case NVM_RSP_WARN_HIGHECC:
-                       meta->state = NVM_CHK_ST_OPEN;
-                       goto scan;
-               default:
-                       return -ret; /* other io error */
-               }
-       }
-
-       /* sense last page */
-       ppa.g.pg = geo->num_pg - 1;
-       ppa.g.pl = geo->num_pln - 1;
-
-       ret = nvm_bb_chunk_sense(dev, ppa);
-       if (ret < 0) /* io error */
-               return ret;
-       else if (ret == 0) { /* Chunk fully written */
-               meta->state = NVM_CHK_ST_CLOSED;
-               meta->wp = geo->clba;
-               return 0;
-       } else if (ret > 0) {
-               switch (ret) {
-               case NVM_RSP_ERR_EMPTYPAGE:
-               case NVM_RSP_ERR_FAILCRC:
-               case NVM_RSP_ERR_FAILECC:
-               case NVM_RSP_WARN_HIGHECC:
-                       meta->state = NVM_CHK_ST_OPEN;
-                       break;
-               default:
-                       return -ret; /* other io error */
-               }
-       }
-
-scan:
-       /*
-        * chunk is open, we scan sequentially to update the write pointer.
-        * We make the assumption that targets write data across all planes
-        * before moving to the next page.
-        */
-       for (pg = 0; pg < geo->num_pg; pg++) {
-               for (pl = 0; pl < geo->num_pln; pl++) {
-                       ppa.g.pg = pg;
-                       ppa.g.pl = pl;
-
-                       ret = nvm_bb_chunk_sense(dev, ppa);
-                       if (ret < 0) /* io error */
-                               return ret;
-                       else if (ret == 0) {
-                               meta->wp += geo->ws_min;
-                       } else if (ret > 0) {
-                               switch (ret) {
-                               case NVM_RSP_ERR_EMPTYPAGE:
-                                       return 0;
-                               case NVM_RSP_ERR_FAILCRC:
-                               case NVM_RSP_ERR_FAILECC:
-                               case NVM_RSP_WARN_HIGHECC:
-                                       meta->wp += geo->ws_min;
-                                       break;
-                               default:
-                                       return -ret; /* other io error */
-                               }
-                       }
-               }
-       }
-
-       return 0;
-}
-
-/*
- * folds a bad block list from its plane representation to its
- * chunk representation.
- *
- * If any of the planes status are bad or grown bad, the chunk is marked
- * offline. If not bad, the first plane state acts as the chunk state.
- */
-static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
-                          u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret, blk, pl, offset, blktype;
-
-       for (blk = 0; blk < geo->num_chk; blk++) {
-               offset = blk * geo->pln_mode;
-               blktype = blks[offset];
-
-               for (pl = 0; pl < geo->pln_mode; pl++) {
-                       if (blks[offset + pl] &
-                                       (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
-                               blktype = blks[offset + pl];
-                               break;
-                       }
-               }
-
-               ppa.g.blk = blk;
-
-               meta->wp = 0;
-               meta->type = NVM_CHK_TP_W_SEQ;
-               meta->wi = 0;
-               meta->slba = generic_to_dev_addr(dev, ppa).ppa;
-               meta->cnlb = dev->geo.clba;
-
-               if (blktype == NVM_BLK_T_FREE) {
-                       ret = nvm_bb_chunk_scan(dev, ppa, meta);
-                       if (ret)
-                               return ret;
-               } else {
-                       meta->state = NVM_CHK_ST_OFFLINE;
-               }
-
-               meta++;
-       }
-
-       return 0;
-}
-
-static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
-                          int nchks, struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr ppa;
-       u8 *blks;
-       int ch, lun, nr_blks;
-       int ret = 0;
-
-       ppa.ppa = slba;
-       ppa = dev_to_generic_addr(dev, ppa);
-
-       if (ppa.g.blk != 0)
-               return -EINVAL;
-
-       if ((nchks % geo->num_chk) != 0)
-               return -EINVAL;
-
-       nr_blks = geo->num_chk * geo->pln_mode;
-
-       blks = kmalloc(nr_blks, GFP_KERNEL);
-       if (!blks)
-               return -ENOMEM;
-
-       for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
-               for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
-                       struct ppa_addr ppa_gen, ppa_dev;
-
-                       if (!nchks)
-                               goto done;
-
-                       ppa_gen.ppa = 0;
-                       ppa_gen.g.ch = ch;
-                       ppa_gen.g.lun = lun;
-                       ppa_dev = generic_to_dev_addr(dev, ppa_gen);
-
-                       ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
-                       if (ret)
-                               goto done;
-
-                       ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
-                                                                       meta);
-                       if (ret)
-                               goto done;
-
-                       meta += geo->num_chk;
-                       nchks -= geo->num_chk;
-               }
-       }
-done:
-       kfree(blks);
-       return ret;
-}
-
-int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
-                      int nchks, struct nvm_chk_meta *meta)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-
-       nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
-
-       if (dev->geo.version == NVM_OCSSD_SPEC_12)
-               return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
-
-       return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
-}
-EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
-
-int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
-                      int nr_ppas, int type)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_rq rqd;
-       int ret;
-
-       if (dev->geo.version == NVM_OCSSD_SPEC_20)
-               return 0;
-
-       if (nr_ppas > NVM_MAX_VLBA) {
-               pr_err("unable to update all blocks atomically\n");
-               return -EINVAL;
-       }
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
-       nvm_rq_tgt_to_dev(tgt_dev, &rqd);
-
-       ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
-       nvm_free_rqd_ppalist(tgt_dev, &rqd);
-       if (ret)
-               return -EINVAL;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
-
-static int nvm_core_init(struct nvm_dev *dev)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret;
-
-       dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
-                                       sizeof(unsigned long), GFP_KERNEL);
-       if (!dev->lun_map)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&dev->area_list);
-       INIT_LIST_HEAD(&dev->targets);
-       mutex_init(&dev->mlock);
-       spin_lock_init(&dev->lock);
-
-       ret = nvm_register_map(dev);
-       if (ret)
-               goto err_fmtype;
-
-       return 0;
-err_fmtype:
-       kfree(dev->lun_map);
-       return ret;
-}
-
-static void nvm_free(struct kref *ref)
-{
-       struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
-
-       if (dev->dma_pool)
-               dev->ops->destroy_dma_pool(dev->dma_pool);
-
-       if (dev->rmap)
-               nvm_unregister_map(dev);
-
-       kfree(dev->lun_map);
-       kfree(dev);
-}
-
-static int nvm_init(struct nvm_dev *dev)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret = -EINVAL;
-
-       if (dev->ops->identity(dev)) {
-               pr_err("device could not be identified\n");
-               goto err;
-       }
-
-       pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
-                       geo->minor_ver_id, geo->vmnt);
-
-       ret = nvm_core_init(dev);
-       if (ret) {
-               pr_err("could not initialize core structures.\n");
-               goto err;
-       }
-
-       pr_info("registered %s [%u/%u/%u/%u/%u]\n",
-                       dev->name, dev->geo.ws_min, dev->geo.ws_opt,
-                       dev->geo.num_chk, dev->geo.all_luns,
-                       dev->geo.num_ch);
-       return 0;
-err:
-       pr_err("failed to initialize nvm\n");
-       return ret;
-}
-
-struct nvm_dev *nvm_alloc_dev(int node)
-{
-       struct nvm_dev *dev;
-
-       dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
-       if (dev)
-               kref_init(&dev->ref);
-
-       return dev;
-}
-EXPORT_SYMBOL(nvm_alloc_dev);
-
-int nvm_register(struct nvm_dev *dev)
-{
-       int ret, exp_pool_size;
-
-       pr_warn_once("lightnvm support is deprecated and will be removed in Linux 5.15.\n");
-
-       if (!dev->q || !dev->ops) {
-               kref_put(&dev->ref, nvm_free);
-               return -EINVAL;
-       }
-
-       ret = nvm_init(dev);
-       if (ret) {
-               kref_put(&dev->ref, nvm_free);
-               return ret;
-       }
-
-       exp_pool_size = max_t(int, PAGE_SIZE,
-                             (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
-       exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
-
-       dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
-                                                 exp_pool_size);
-       if (!dev->dma_pool) {
-               pr_err("could not create dma pool\n");
-               kref_put(&dev->ref, nvm_free);
-               return -ENOMEM;
-       }
-
-       /* register device with a supported media manager */
-       down_write(&nvm_lock);
-       list_add(&dev->devices, &nvm_devices);
-       up_write(&nvm_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(nvm_register);
-
-void nvm_unregister(struct nvm_dev *dev)
-{
-       struct nvm_target *t, *tmp;
-
-       mutex_lock(&dev->mlock);
-       list_for_each_entry_safe(t, tmp, &dev->targets, list) {
-               if (t->dev->parent != dev)
-                       continue;
-               __nvm_remove_target(t, false);
-               kref_put(&dev->ref, nvm_free);
-       }
-       mutex_unlock(&dev->mlock);
-
-       down_write(&nvm_lock);
-       list_del(&dev->devices);
-       up_write(&nvm_lock);
-
-       kref_put(&dev->ref, nvm_free);
-}
-EXPORT_SYMBOL(nvm_unregister);
-
-static int __nvm_configure_create(struct nvm_ioctl_create *create)
-{
-       struct nvm_dev *dev;
-       int ret;
-
-       down_write(&nvm_lock);
-       dev = nvm_find_nvm_dev(create->dev);
-       up_write(&nvm_lock);
-
-       if (!dev) {
-               pr_err("device not found\n");
-               return -EINVAL;
-       }
-
-       kref_get(&dev->ref);
-       ret = nvm_create_tgt(dev, create);
-       if (ret)
-               kref_put(&dev->ref, nvm_free);
-
-       return ret;
-}
-
-static long nvm_ioctl_info(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_info *info;
-       struct nvm_tgt_type *tt;
-       int tgt_iter = 0;
-
-       info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
-       if (IS_ERR(info))
-               return PTR_ERR(info);
-
-       info->version[0] = NVM_VERSION_MAJOR;
-       info->version[1] = NVM_VERSION_MINOR;
-       info->version[2] = NVM_VERSION_PATCH;
-
-       down_write(&nvm_tgtt_lock);
-       list_for_each_entry(tt, &nvm_tgt_types, list) {
-               struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
-
-               tgt->version[0] = tt->version[0];
-               tgt->version[1] = tt->version[1];
-               tgt->version[2] = tt->version[2];
-               strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
-
-               tgt_iter++;
-       }
-
-       info->tgtsize = tgt_iter;
-       up_write(&nvm_tgtt_lock);
-
-       if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
-               kfree(info);
-               return -EFAULT;
-       }
-
-       kfree(info);
-       return 0;
-}
-
-static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_get_devices *devices;
-       struct nvm_dev *dev;
-       int i = 0;
-
-       devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
-       if (!devices)
-               return -ENOMEM;
-
-       down_write(&nvm_lock);
-       list_for_each_entry(dev, &nvm_devices, devices) {
-               struct nvm_ioctl_device_info *info = &devices->info[i];
-
-               strlcpy(info->devname, dev->name, sizeof(info->devname));
-
-               /* kept for compatibility */
-               info->bmversion[0] = 1;
-               info->bmversion[1] = 0;
-               info->bmversion[2] = 0;
-               strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
-               i++;
-
-               if (i >= ARRAY_SIZE(devices->info)) {
-                       pr_err("max %zd devices can be reported.\n",
-                              ARRAY_SIZE(devices->info));
-                       break;
-               }
-       }
-       up_write(&nvm_lock);
-
-       devices->nr_devices = i;
-
-       if (copy_to_user(arg, devices,
-                        sizeof(struct nvm_ioctl_get_devices))) {
-               kfree(devices);
-               return -EFAULT;
-       }
-
-       kfree(devices);
-       return 0;
-}
-
-static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_create create;
-
-       if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
-               return -EFAULT;
-
-       if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
-           create.conf.e.rsv != 0) {
-               pr_err("reserved config field in use\n");
-               return -EINVAL;
-       }
-
-       create.dev[DISK_NAME_LEN - 1] = '\0';
-       create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
-       create.tgtname[DISK_NAME_LEN - 1] = '\0';
-
-       if (create.flags != 0) {
-               __u32 flags = create.flags;
-
-               /* Check for valid flags */
-               if (flags & NVM_TARGET_FACTORY)
-                       flags &= ~NVM_TARGET_FACTORY;
-
-               if (flags) {
-                       pr_err("flag not supported\n");
-                       return -EINVAL;
-               }
-       }
-
-       return __nvm_configure_create(&create);
-}
-
-static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_remove remove;
-
-       if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
-               return -EFAULT;
-
-       remove.tgtname[DISK_NAME_LEN - 1] = '\0';
-
-       if (remove.flags != 0) {
-               pr_err("no flags supported\n");
-               return -EINVAL;
-       }
-
-       return nvm_remove_tgt(&remove);
-}
-
-/* kept for compatibility reasons */
-static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_dev_init init;
-
-       if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
-               return -EFAULT;
-
-       if (init.flags != 0) {
-               pr_err("no flags supported\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/* Kept for compatibility reasons */
-static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_dev_factory fact;
-
-       if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
-               return -EFAULT;
-
-       fact.dev[DISK_NAME_LEN - 1] = '\0';
-
-       if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
-               return -EINVAL;
-
-       return 0;
-}
-
-static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
-{
-       void __user *argp = (void __user *)arg;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case NVM_INFO:
-               return nvm_ioctl_info(file, argp);
-       case NVM_GET_DEVICES:
-               return nvm_ioctl_get_devices(file, argp);
-       case NVM_DEV_CREATE:
-               return nvm_ioctl_dev_create(file, argp);
-       case NVM_DEV_REMOVE:
-               return nvm_ioctl_dev_remove(file, argp);
-       case NVM_DEV_INIT:
-               return nvm_ioctl_dev_init(file, argp);
-       case NVM_DEV_FACTORY:
-               return nvm_ioctl_dev_factory(file, argp);
-       }
-       return 0;
-}
-
-static const struct file_operations _ctl_fops = {
-       .open = nonseekable_open,
-       .unlocked_ioctl = nvm_ctl_ioctl,
-       .owner = THIS_MODULE,
-       .llseek  = noop_llseek,
-};
-
-static struct miscdevice _nvm_misc = {
-       .minor          = MISC_DYNAMIC_MINOR,
-       .name           = "lightnvm",
-       .nodename       = "lightnvm/control",
-       .fops           = &_ctl_fops,
-};
-builtin_misc_device(_nvm_misc);
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
deleted file mode 100644 (file)
index f185f1a..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-cache.c - pblk's write cache
- */
-
-#include "pblk.h"
-
-void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
-                               unsigned long flags)
-{
-       struct pblk_w_ctx w_ctx;
-       sector_t lba = pblk_get_lba(bio);
-       unsigned long start_time;
-       unsigned int bpos, pos;
-       int nr_entries = pblk_get_secs(bio);
-       int i, ret;
-
-       start_time = bio_start_io_acct(bio);
-
-       /* Update the write buffer head (mem) with the entries that we can
-        * write. The write in itself cannot fail, so there is no need to
-        * rollback from here on.
-        */
-retry:
-       ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
-       switch (ret) {
-       case NVM_IO_REQUEUE:
-               io_schedule();
-               goto retry;
-       case NVM_IO_ERR:
-               pblk_pipeline_stop(pblk);
-               bio_io_error(bio);
-               goto out;
-       }
-
-       pblk_ppa_set_empty(&w_ctx.ppa);
-       w_ctx.flags = flags;
-       if (bio->bi_opf & REQ_PREFLUSH) {
-               w_ctx.flags |= PBLK_FLUSH_ENTRY;
-               pblk_write_kick(pblk);
-       }
-
-       if (unlikely(!bio_has_data(bio)))
-               goto out;
-
-       for (i = 0; i < nr_entries; i++) {
-               void *data = bio_data(bio);
-
-               w_ctx.lba = lba + i;
-
-               pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i);
-               pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos);
-
-               bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
-       }
-
-       atomic64_add(nr_entries, &pblk->user_wa);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(nr_entries, &pblk->inflight_writes);
-       atomic_long_add(nr_entries, &pblk->req_writes);
-#endif
-
-       pblk_rl_inserted(&pblk->rl, nr_entries);
-
-out:
-       bio_end_io_acct(bio, start_time);
-       pblk_write_should_kick(pblk);
-
-       if (ret == NVM_IO_DONE)
-               bio_endio(bio);
-}
-
-/*
- * On GC the incoming lbas are not necessarily sequential. Also, some of the
- * lbas might not be valid entries, which are marked as empty by the GC thread
- */
-int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
-{
-       struct pblk_w_ctx w_ctx;
-       unsigned int bpos, pos;
-       void *data = gc_rq->data;
-       int i, valid_entries;
-
-       /* Update the write buffer head (mem) with the entries that we can
-        * write. The write in itself cannot fail, so there is no need to
-        * rollback from here on.
-        */
-retry:
-       if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) {
-               io_schedule();
-               goto retry;
-       }
-
-       w_ctx.flags = PBLK_IOTYPE_GC;
-       pblk_ppa_set_empty(&w_ctx.ppa);
-
-       for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) {
-               if (gc_rq->lba_list[i] == ADDR_EMPTY)
-                       continue;
-
-               w_ctx.lba = gc_rq->lba_list[i];
-
-               pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries);
-               pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line,
-                                               gc_rq->paddr_list[i], pos);
-
-               data += PBLK_EXPOSED_PAGE_SIZE;
-               valid_entries++;
-       }
-
-       WARN_ONCE(gc_rq->secs_to_gc != valid_entries,
-                                       "pblk: inconsistent GC write\n");
-
-       atomic64_add(valid_entries, &pblk->gc_wa);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(valid_entries, &pblk->inflight_writes);
-       atomic_long_add(valid_entries, &pblk->recov_gc_writes);
-#endif
-
-       pblk_write_should_kick(pblk);
-       return NVM_IO_OK;
-}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
deleted file mode 100644 (file)
index 33d39d3..0000000
+++ /dev/null
@@ -1,2151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-core.c - pblk's core functionality
- *
- */
-
-#define CREATE_TRACE_POINTS
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-static void pblk_line_mark_bb(struct work_struct *work)
-{
-       struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
-                                                                       ws);
-       struct pblk *pblk = line_ws->pblk;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct ppa_addr *ppa = line_ws->priv;
-       int ret;
-
-       ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
-       if (ret) {
-               struct pblk_line *line;
-               int pos;
-
-               line = pblk_ppa_to_line(pblk, *ppa);
-               pos = pblk_ppa_to_pos(&dev->geo, *ppa);
-
-               pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
-                               line->id, pos);
-       }
-
-       kfree(ppa);
-       mempool_free(line_ws, &pblk->gen_ws_pool);
-}
-
-static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
-                        struct ppa_addr ppa_addr)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr *ppa;
-       int pos = pblk_ppa_to_pos(geo, ppa_addr);
-
-       pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
-       atomic_long_inc(&pblk->erase_failed);
-
-       atomic_dec(&line->blk_in_line);
-       if (test_and_set_bit(pos, line->blk_bitmap))
-               pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
-                                                       line->id, pos);
-
-       /* Not necessary to mark bad blocks on 2.0 spec. */
-       if (geo->version == NVM_OCSSD_SPEC_20)
-               return;
-
-       ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
-       if (!ppa)
-               return;
-
-       *ppa = ppa_addr;
-       pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
-                                               GFP_ATOMIC, pblk->bb_wq);
-}
-
-static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_chk_meta *chunk;
-       struct pblk_line *line;
-       int pos;
-
-       line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
-       pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
-       chunk = &line->chks[pos];
-
-       atomic_dec(&line->left_seblks);
-
-       if (rqd->error) {
-               trace_pblk_chunk_reset(pblk_disk_name(pblk),
-                               &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
-
-               chunk->state = NVM_CHK_ST_OFFLINE;
-               pblk_mark_bb(pblk, line, rqd->ppa_addr);
-       } else {
-               trace_pblk_chunk_reset(pblk_disk_name(pblk),
-                               &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
-
-               chunk->state = NVM_CHK_ST_FREE;
-       }
-
-       trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
-                               chunk->state);
-
-       atomic_dec(&pblk->inflight_io);
-}
-
-/* Erase completion assumes that only one block is erased at the time */
-static void pblk_end_io_erase(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-
-       __pblk_end_io_erase(pblk, rqd);
-       mempool_free(rqd, &pblk->e_rq_pool);
-}
-
-/*
- * Get information for all chunks from the device.
- *
- * The caller is responsible for freeing (vmalloc) the returned structure
- */
-struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_chk_meta *meta;
-       struct ppa_addr ppa;
-       unsigned long len;
-       int ret;
-
-       ppa.ppa = 0;
-
-       len = geo->all_chunks * sizeof(*meta);
-       meta = vzalloc(len);
-       if (!meta)
-               return ERR_PTR(-ENOMEM);
-
-       ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
-       if (ret) {
-               vfree(meta);
-               return ERR_PTR(-EIO);
-       }
-
-       return meta;
-}
-
-struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
-                                             struct nvm_chk_meta *meta,
-                                             struct ppa_addr ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
-       int lun_off = ppa.m.pu * geo->num_chk;
-       int chk_off = ppa.m.chk;
-
-       return meta + ch_off + lun_off + chk_off;
-}
-
-void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
-                          u64 paddr)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list = NULL;
-
-       /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
-        * table is modified with reclaimed sectors, a check is done to endure
-        * that newer updates are not overwritten.
-        */
-       spin_lock(&line->lock);
-       WARN_ON(line->state == PBLK_LINESTATE_FREE);
-
-       if (test_and_set_bit(paddr, line->invalid_bitmap)) {
-               WARN_ONCE(1, "pblk: double invalidate\n");
-               spin_unlock(&line->lock);
-               return;
-       }
-       le32_add_cpu(line->vsc, -1);
-
-       if (line->state == PBLK_LINESTATE_CLOSED)
-               move_list = pblk_line_gc_list(pblk, line);
-       spin_unlock(&line->lock);
-
-       if (move_list) {
-               spin_lock(&l_mg->gc_lock);
-               spin_lock(&line->lock);
-               /* Prevent moving a line that has just been chosen for GC */
-               if (line->state == PBLK_LINESTATE_GC) {
-                       spin_unlock(&line->lock);
-                       spin_unlock(&l_mg->gc_lock);
-                       return;
-               }
-               spin_unlock(&line->lock);
-
-               list_move_tail(&line->list, move_list);
-               spin_unlock(&l_mg->gc_lock);
-       }
-}
-
-void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct pblk_line *line;
-       u64 paddr;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a device address */
-       BUG_ON(pblk_addr_in_cache(ppa));
-       BUG_ON(pblk_ppa_empty(ppa));
-#endif
-
-       line = pblk_ppa_to_line(pblk, ppa);
-       paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
-
-       __pblk_map_invalidate(pblk, line, paddr);
-}
-
-static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
-                                 unsigned int nr_secs)
-{
-       sector_t lba;
-
-       spin_lock(&pblk->trans_lock);
-       for (lba = slba; lba < slba + nr_secs; lba++) {
-               struct ppa_addr ppa;
-
-               ppa = pblk_trans_map_get(pblk, lba);
-
-               if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
-                       pblk_map_invalidate(pblk, ppa);
-
-               pblk_ppa_set_empty(&ppa);
-               pblk_trans_map_set(pblk, lba, ppa);
-       }
-       spin_unlock(&pblk->trans_lock);
-}
-
-int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &rqd->dma_meta_list);
-       if (!rqd->meta_list)
-               return -ENOMEM;
-
-       if (rqd->nr_ppas == 1)
-               return 0;
-
-       rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
-       rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
-
-       return 0;
-}
-
-void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       if (rqd->meta_list)
-               nvm_dev_dma_free(dev->parent, rqd->meta_list,
-                               rqd->dma_meta_list);
-}
-
-/* Caller must guarantee that the request is a valid type */
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
-{
-       mempool_t *pool;
-       struct nvm_rq *rqd;
-       int rq_size;
-
-       switch (type) {
-       case PBLK_WRITE:
-       case PBLK_WRITE_INT:
-               pool = &pblk->w_rq_pool;
-               rq_size = pblk_w_rq_size;
-               break;
-       case PBLK_READ:
-               pool = &pblk->r_rq_pool;
-               rq_size = pblk_g_rq_size;
-               break;
-       default:
-               pool = &pblk->e_rq_pool;
-               rq_size = pblk_g_rq_size;
-       }
-
-       rqd = mempool_alloc(pool, GFP_KERNEL);
-       memset(rqd, 0, rq_size);
-
-       return rqd;
-}
-
-/* Typically used on completion path. Cannot guarantee request consistency */
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
-{
-       mempool_t *pool;
-
-       switch (type) {
-       case PBLK_WRITE:
-               kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
-               fallthrough;
-       case PBLK_WRITE_INT:
-               pool = &pblk->w_rq_pool;
-               break;
-       case PBLK_READ:
-               pool = &pblk->r_rq_pool;
-               break;
-       case PBLK_ERASE:
-               pool = &pblk->e_rq_pool;
-               break;
-       default:
-               pblk_err(pblk, "trying to free unknown rqd type\n");
-               return;
-       }
-
-       pblk_free_rqd_meta(pblk, rqd);
-       mempool_free(rqd, pool);
-}
-
-void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
-                        int nr_pages)
-{
-       struct bio_vec *bv;
-       struct page *page;
-       int i, e, nbv = 0;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bv = &bio->bi_io_vec[i];
-               page = bv->bv_page;
-               for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
-                       if (nbv >= off)
-                               mempool_free(page++, &pblk->page_bio_pool);
-       }
-}
-
-int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
-                      int nr_pages)
-{
-       struct request_queue *q = pblk->dev->q;
-       struct page *page;
-       int i, ret;
-
-       for (i = 0; i < nr_pages; i++) {
-               page = mempool_alloc(&pblk->page_bio_pool, flags);
-
-               ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
-               if (ret != PBLK_EXPOSED_PAGE_SIZE) {
-                       pblk_err(pblk, "could not add page to bio\n");
-                       mempool_free(page, &pblk->page_bio_pool);
-                       goto err;
-               }
-       }
-
-       return 0;
-err:
-       pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
-       return -1;
-}
-
-void pblk_write_kick(struct pblk *pblk)
-{
-       wake_up_process(pblk->writer_ts);
-       mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
-}
-
-void pblk_write_timer_fn(struct timer_list *t)
-{
-       struct pblk *pblk = from_timer(pblk, t, wtimer);
-
-       /* kick the write thread every tick to flush outstanding data */
-       pblk_write_kick(pblk);
-}
-
-void pblk_write_should_kick(struct pblk *pblk)
-{
-       unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
-
-       if (secs_avail >= pblk->min_write_pgs_data)
-               pblk_write_kick(pblk);
-}
-
-static void pblk_wait_for_meta(struct pblk *pblk)
-{
-       do {
-               if (!atomic_read(&pblk->inflight_io))
-                       break;
-
-               schedule();
-       } while (1);
-}
-
-static void pblk_flush_writer(struct pblk *pblk)
-{
-       pblk_rb_flush(&pblk->rwb);
-       do {
-               if (!pblk_rb_sync_count(&pblk->rwb))
-                       break;
-
-               pblk_write_kick(pblk);
-               schedule();
-       } while (1);
-}
-
-struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list = NULL;
-       int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
-                       * (pblk->min_write_pgs - pblk->min_write_pgs_data);
-       int vsc = le32_to_cpu(*line->vsc) + packed_meta;
-
-       lockdep_assert_held(&line->lock);
-
-       if (line->w_err_gc->has_write_err) {
-               if (line->gc_group != PBLK_LINEGC_WERR) {
-                       line->gc_group = PBLK_LINEGC_WERR;
-                       move_list = &l_mg->gc_werr_list;
-                       pblk_rl_werr_line_in(&pblk->rl);
-               }
-       } else if (!vsc) {
-               if (line->gc_group != PBLK_LINEGC_FULL) {
-                       line->gc_group = PBLK_LINEGC_FULL;
-                       move_list = &l_mg->gc_full_list;
-               }
-       } else if (vsc < lm->high_thrs) {
-               if (line->gc_group != PBLK_LINEGC_HIGH) {
-                       line->gc_group = PBLK_LINEGC_HIGH;
-                       move_list = &l_mg->gc_high_list;
-               }
-       } else if (vsc < lm->mid_thrs) {
-               if (line->gc_group != PBLK_LINEGC_MID) {
-                       line->gc_group = PBLK_LINEGC_MID;
-                       move_list = &l_mg->gc_mid_list;
-               }
-       } else if (vsc < line->sec_in_line) {
-               if (line->gc_group != PBLK_LINEGC_LOW) {
-                       line->gc_group = PBLK_LINEGC_LOW;
-                       move_list = &l_mg->gc_low_list;
-               }
-       } else if (vsc == line->sec_in_line) {
-               if (line->gc_group != PBLK_LINEGC_EMPTY) {
-                       line->gc_group = PBLK_LINEGC_EMPTY;
-                       move_list = &l_mg->gc_empty_list;
-               }
-       } else {
-               line->state = PBLK_LINESTATE_CORRUPT;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-
-               line->gc_group = PBLK_LINEGC_NONE;
-               move_list =  &l_mg->corrupt_list;
-               pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
-                                               line->id, vsc,
-                                               line->sec_in_line,
-                                               lm->high_thrs, lm->mid_thrs);
-       }
-
-       return move_list;
-}
-
-void pblk_discard(struct pblk *pblk, struct bio *bio)
-{
-       sector_t slba = pblk_get_lba(bio);
-       sector_t nr_secs = pblk_get_secs(bio);
-
-       pblk_invalidate_range(pblk, slba, nr_secs);
-}
-
-void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       atomic_long_inc(&pblk->write_failed);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_print_failed_rqd(pblk, rqd, rqd->error);
-#endif
-}
-
-void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       /* Empty page read is not necessarily an error (e.g., L2P recovery) */
-       if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
-               atomic_long_inc(&pblk->read_empty);
-               return;
-       }
-
-       switch (rqd->error) {
-       case NVM_RSP_WARN_HIGHECC:
-               atomic_long_inc(&pblk->read_high_ecc);
-               break;
-       case NVM_RSP_ERR_FAILECC:
-       case NVM_RSP_ERR_FAILCRC:
-               atomic_long_inc(&pblk->read_failed);
-               break;
-       default:
-               pblk_err(pblk, "unknown read error:%d\n", rqd->error);
-       }
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_print_failed_rqd(pblk, rqd, rqd->error);
-#endif
-}
-
-void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
-{
-       pblk->sec_per_write = sec_per_write;
-}
-
-int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       atomic_inc(&pblk->inflight_io);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if (pblk_check_io(pblk, rqd))
-               return NVM_IO_ERR;
-#endif
-
-       return nvm_submit_io(dev, rqd, buf);
-}
-
-void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       int i;
-
-       for (i = 0; i < rqd->nr_ppas; i++) {
-               struct ppa_addr *ppa = &ppa_list[i];
-               struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
-               u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
-
-               if (caddr == 0)
-                       trace_pblk_chunk_state(pblk_disk_name(pblk),
-                                                       ppa, NVM_CHK_ST_OPEN);
-               else if (caddr == (chunk->cnlb - 1))
-                       trace_pblk_chunk_state(pblk_disk_name(pblk),
-                                                       ppa, NVM_CHK_ST_CLOSED);
-       }
-}
-
-int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       int ret;
-
-       atomic_inc(&pblk->inflight_io);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if (pblk_check_io(pblk, rqd))
-               return NVM_IO_ERR;
-#endif
-
-       ret = nvm_submit_io_sync(dev, rqd, buf);
-
-       if (trace_pblk_chunk_state_enabled() && !ret &&
-           rqd->opcode == NVM_OP_PWRITE)
-               pblk_check_chunk_state_update(pblk, rqd);
-
-       return ret;
-}
-
-static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
-                                  void *buf)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       int ret;
-
-       pblk_down_chunk(pblk, ppa_list[0]);
-       ret = pblk_submit_io_sync(pblk, rqd, buf);
-       pblk_up_chunk(pblk, ppa_list[0]);
-
-       return ret;
-}
-
-int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
-                  unsigned long secs_to_flush, bool skip_meta)
-{
-       int max = pblk->sec_per_write;
-       int min = pblk->min_write_pgs;
-       int secs_to_sync = 0;
-
-       if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
-               min = max = pblk->min_write_pgs_data;
-
-       if (secs_avail >= max)
-               secs_to_sync = max;
-       else if (secs_avail >= min)
-               secs_to_sync = min * (secs_avail / min);
-       else if (secs_to_flush)
-               secs_to_sync = min;
-
-       return secs_to_sync;
-}
-
-void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
-{
-       u64 addr;
-       int i;
-
-       spin_lock(&line->lock);
-       addr = find_next_zero_bit(line->map_bitmap,
-                                       pblk->lm.sec_per_line, line->cur_sec);
-       line->cur_sec = addr - nr_secs;
-
-       for (i = 0; i < nr_secs; i++, line->cur_sec--)
-               WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
-       spin_unlock(&line->lock);
-}
-
-u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
-{
-       u64 addr;
-       int i;
-
-       lockdep_assert_held(&line->lock);
-
-       /* logic error: ppa out-of-bounds. Prevent generating bad address */
-       if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
-               WARN(1, "pblk: page allocation out of bounds\n");
-               nr_secs = pblk->lm.sec_per_line - line->cur_sec;
-       }
-
-       line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
-                                       pblk->lm.sec_per_line, line->cur_sec);
-       for (i = 0; i < nr_secs; i++, line->cur_sec++)
-               WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
-
-       return addr;
-}
-
-u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
-{
-       u64 addr;
-
-       /* Lock needed in case a write fails and a recovery needs to remap
-        * failed write buffer entries
-        */
-       spin_lock(&line->lock);
-       addr = __pblk_alloc_page(pblk, line, nr_secs);
-       line->left_msecs -= nr_secs;
-       WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
-       spin_unlock(&line->lock);
-
-       return addr;
-}
-
-u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
-{
-       u64 paddr;
-
-       spin_lock(&line->lock);
-       paddr = find_next_zero_bit(line->map_bitmap,
-                                       pblk->lm.sec_per_line, line->cur_sec);
-       spin_unlock(&line->lock);
-
-       return paddr;
-}
-
-u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int bit;
-
-       /* This usually only happens on bad lines */
-       bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       if (bit >= lm->blk_per_line)
-               return -1;
-
-       return bit * geo->ws_opt;
-}
-
-int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct ppa_addr *ppa_list;
-       struct nvm_rq rqd;
-       u64 paddr = pblk_line_smeta_start(pblk, line);
-       int i, ret;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       ret = pblk_alloc_rqd_meta(pblk, &rqd);
-       if (ret)
-               return ret;
-
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.nr_ppas = lm->smeta_sec;
-       rqd.is_seq = 1;
-       ppa_list = nvm_rq_to_ppa_list(&rqd);
-
-       for (i = 0; i < lm->smeta_sec; i++, paddr++)
-               ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-
-       ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
-       if (ret) {
-               pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
-               goto clear_rqd;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
-               pblk_log_read_err(pblk, &rqd);
-               ret = -EIO;
-       }
-
-clear_rqd:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-}
-
-static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
-                                u64 paddr)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct ppa_addr *ppa_list;
-       struct nvm_rq rqd;
-       __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-       int i, ret;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       ret = pblk_alloc_rqd_meta(pblk, &rqd);
-       if (ret)
-               return ret;
-
-       rqd.opcode = NVM_OP_PWRITE;
-       rqd.nr_ppas = lm->smeta_sec;
-       rqd.is_seq = 1;
-       ppa_list = nvm_rq_to_ppa_list(&rqd);
-
-       for (i = 0; i < lm->smeta_sec; i++, paddr++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk,
-                                                          rqd.meta_list, i);
-
-               ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-               meta->lba = lba_list[paddr] = addr_empty;
-       }
-
-       ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
-       if (ret) {
-               pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
-               goto clear_rqd;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error) {
-               pblk_log_write_err(pblk, &rqd);
-               ret = -EIO;
-       }
-
-clear_rqd:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-}
-
-int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
-                        void *emeta_buf)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       void *ppa_list_buf, *meta_list;
-       struct ppa_addr *ppa_list;
-       struct nvm_rq rqd;
-       u64 paddr = line->emeta_ssec;
-       dma_addr_t dma_ppa_list, dma_meta_list;
-       int min = pblk->min_write_pgs;
-       int left_ppas = lm->emeta_sec[0];
-       int line_id = line->id;
-       int rq_ppas, rq_len;
-       int i, j;
-       int ret;
-
-       meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &dma_meta_list);
-       if (!meta_list)
-               return -ENOMEM;
-
-       ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
-       dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
-
-next_rq:
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
-       rq_len = rq_ppas * geo->csecs;
-
-       rqd.meta_list = meta_list;
-       rqd.ppa_list = ppa_list_buf;
-       rqd.dma_meta_list = dma_meta_list;
-       rqd.dma_ppa_list = dma_ppa_list;
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.nr_ppas = rq_ppas;
-       ppa_list = nvm_rq_to_ppa_list(&rqd);
-
-       for (i = 0; i < rqd.nr_ppas; ) {
-               struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-               int pos = pblk_ppa_to_pos(geo, ppa);
-
-               if (pblk_io_aligned(pblk, rq_ppas))
-                       rqd.is_seq = 1;
-
-               while (test_bit(pos, line->blk_bitmap)) {
-                       paddr += min;
-                       if (pblk_boundary_paddr_checks(pblk, paddr)) {
-                               ret = -EINTR;
-                               goto free_rqd_dma;
-                       }
-
-                       ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-                       pos = pblk_ppa_to_pos(geo, ppa);
-               }
-
-               if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
-                       ret = -EINTR;
-                       goto free_rqd_dma;
-               }
-
-               for (j = 0; j < min; j++, i++, paddr++)
-                       ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
-       }
-
-       ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
-       if (ret) {
-               pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
-               goto free_rqd_dma;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
-               pblk_log_read_err(pblk, &rqd);
-               ret = -EIO;
-               goto free_rqd_dma;
-       }
-
-       emeta_buf += rq_len;
-       left_ppas -= rq_ppas;
-       if (left_ppas)
-               goto next_rq;
-
-free_rqd_dma:
-       nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
-       return ret;
-}
-
-static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                           struct ppa_addr ppa)
-{
-       rqd->opcode = NVM_OP_ERASE;
-       rqd->ppa_addr = ppa;
-       rqd->nr_ppas = 1;
-       rqd->is_seq = 1;
-       rqd->bio = NULL;
-}
-
-static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_rq rqd = {NULL};
-       int ret;
-
-       trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
-                               PBLK_CHUNK_RESET_START);
-
-       pblk_setup_e_rq(pblk, &rqd, ppa);
-
-       /* The write thread schedules erases so that it minimizes disturbances
-        * with writes. Thus, there is no need to take the LUN semaphore.
-        */
-       ret = pblk_submit_io_sync(pblk, &rqd, NULL);
-       rqd.private = pblk;
-       __pblk_end_io_erase(pblk, &rqd);
-
-       return ret;
-}
-
-int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct ppa_addr ppa;
-       int ret, bit = -1;
-
-       /* Erase only good blocks, one at a time */
-       do {
-               spin_lock(&line->lock);
-               bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
-                                                               bit + 1);
-               if (bit >= lm->blk_per_line) {
-                       spin_unlock(&line->lock);
-                       break;
-               }
-
-               ppa = pblk->luns[bit].bppa; /* set ch and lun */
-               ppa.a.blk = line->id;
-
-               atomic_dec(&line->left_eblks);
-               WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
-               spin_unlock(&line->lock);
-
-               ret = pblk_blk_erase_sync(pblk, ppa);
-               if (ret) {
-                       pblk_err(pblk, "failed to erase line %d\n", line->id);
-                       return ret;
-               }
-       } while (1);
-
-       return 0;
-}
-
-static void pblk_line_setup_metadata(struct pblk_line *line,
-                                    struct pblk_line_mgmt *l_mg,
-                                    struct pblk_line_meta *lm)
-{
-       int meta_line;
-
-       lockdep_assert_held(&l_mg->free_lock);
-
-retry_meta:
-       meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-       if (meta_line == PBLK_DATA_LINES) {
-               spin_unlock(&l_mg->free_lock);
-               io_schedule();
-               spin_lock(&l_mg->free_lock);
-               goto retry_meta;
-       }
-
-       set_bit(meta_line, &l_mg->meta_bitmap);
-       line->meta_line = meta_line;
-
-       line->smeta = l_mg->sline_meta[meta_line];
-       line->emeta = l_mg->eline_meta[meta_line];
-
-       memset(line->smeta, 0, lm->smeta_len);
-       memset(line->emeta->buf, 0, lm->emeta_len[0]);
-
-       line->emeta->mem = 0;
-       atomic_set(&line->emeta->sync, 0);
-}
-
-/* For now lines are always assumed full lines. Thus, smeta former and current
- * lun bitmaps are omitted.
- */
-static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
-                                 struct pblk_line *cur)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_emeta *emeta = line->emeta;
-       struct line_emeta *emeta_buf = emeta->buf;
-       struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
-       int nr_blk_line;
-
-       /* After erasing the line, new bad blocks might appear and we risk
-        * having an invalid line
-        */
-       nr_blk_line = lm->blk_per_line -
-                       bitmap_weight(line->blk_bitmap, lm->blk_per_line);
-       if (nr_blk_line < lm->min_blk_line) {
-               spin_lock(&l_mg->free_lock);
-               spin_lock(&line->lock);
-               line->state = PBLK_LINESTATE_BAD;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_add_tail(&line->list, &l_mg->bad_list);
-               spin_unlock(&l_mg->free_lock);
-
-               pblk_debug(pblk, "line %d is bad\n", line->id);
-
-               return 0;
-       }
-
-       /* Run-time metadata */
-       line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
-
-       /* Mark LUNs allocated in this line (all for now) */
-       bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
-
-       smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
-       export_guid(smeta_buf->header.uuid, &pblk->instance_uuid);
-       smeta_buf->header.id = cpu_to_le32(line->id);
-       smeta_buf->header.type = cpu_to_le16(line->type);
-       smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
-       smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
-
-       /* Start metadata */
-       smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
-       smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
-
-       /* Fill metadata among lines */
-       if (cur) {
-               memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
-               smeta_buf->prev_id = cpu_to_le32(cur->id);
-               cur->emeta->buf->next_id = cpu_to_le32(line->id);
-       } else {
-               smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
-       }
-
-       /* All smeta must be set at this point */
-       smeta_buf->header.crc = cpu_to_le32(
-                       pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
-       smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
-
-       /* End metadata */
-       memcpy(&emeta_buf->header, &smeta_buf->header,
-                                               sizeof(struct line_header));
-
-       emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
-       emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
-       emeta_buf->header.crc = cpu_to_le32(
-                       pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
-
-       emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
-       emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
-       emeta_buf->nr_valid_lbas = cpu_to_le64(0);
-       emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
-       emeta_buf->crc = cpu_to_le32(0);
-       emeta_buf->prev_id = smeta_buf->prev_id;
-
-       return 1;
-}
-
-static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
-       if (!line->map_bitmap)
-               return -ENOMEM;
-
-       memset(line->map_bitmap, 0, lm->sec_bitmap_len);
-
-       /* will be initialized using bb info from map_bitmap */
-       line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
-       if (!line->invalid_bitmap) {
-               mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-               line->map_bitmap = NULL;
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-/* For now lines are always assumed full lines. Thus, smeta former and current
- * lun bitmaps are omitted.
- */
-static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
-                            int init)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       u64 off;
-       int bit = -1;
-       int emeta_secs;
-
-       line->sec_in_line = lm->sec_per_line;
-
-       /* Capture bad block information on line mapping bitmaps */
-       while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
-                                       bit + 1)) < lm->blk_per_line) {
-               off = bit * geo->ws_opt;
-               bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
-                                                       lm->sec_per_line);
-               bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
-                                                       lm->sec_per_line);
-               line->sec_in_line -= geo->clba;
-       }
-
-       /* Mark smeta metadata sectors as bad sectors */
-       bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       off = bit * geo->ws_opt;
-       bitmap_set(line->map_bitmap, off, lm->smeta_sec);
-       line->sec_in_line -= lm->smeta_sec;
-       line->cur_sec = off + lm->smeta_sec;
-
-       if (init && pblk_line_smeta_write(pblk, line, off)) {
-               pblk_debug(pblk, "line smeta I/O failed. Retry\n");
-               return 0;
-       }
-
-       bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
-
-       /* Mark emeta metadata sectors as bad sectors. We need to consider bad
-        * blocks to make sure that there are enough sectors to store emeta
-        */
-       emeta_secs = lm->emeta_sec[0];
-       off = lm->sec_per_line;
-       while (emeta_secs) {
-               off -= geo->ws_opt;
-               if (!test_bit(off, line->invalid_bitmap)) {
-                       bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
-                       emeta_secs -= geo->ws_opt;
-               }
-       }
-
-       line->emeta_ssec = off;
-       line->sec_in_line -= lm->emeta_sec[0];
-       line->nr_valid_lbas = 0;
-       line->left_msecs = line->sec_in_line;
-       *line->vsc = cpu_to_le32(line->sec_in_line);
-
-       if (lm->sec_per_line - line->sec_in_line !=
-               bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
-               spin_lock(&line->lock);
-               line->state = PBLK_LINESTATE_BAD;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_add_tail(&line->list, &l_mg->bad_list);
-               pblk_err(pblk, "unexpected line %d is bad\n", line->id);
-
-               return 0;
-       }
-
-       return 1;
-}
-
-static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int blk_to_erase = atomic_read(&line->blk_in_line);
-       int i;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct pblk_lun *rlun = &pblk->luns[i];
-               int pos = pblk_ppa_to_pos(geo, rlun->bppa);
-               int state = line->chks[pos].state;
-
-               /* Free chunks should not be erased */
-               if (state & NVM_CHK_ST_FREE) {
-                       set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
-                                                       line->erase_bitmap);
-                       blk_to_erase--;
-               }
-       }
-
-       return blk_to_erase;
-}
-
-static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int blk_in_line = atomic_read(&line->blk_in_line);
-       int blk_to_erase;
-
-       /* Bad blocks do not need to be erased */
-       bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
-
-       spin_lock(&line->lock);
-
-       /* If we have not written to this line, we need to mark up free chunks
-        * as already erased
-        */
-       if (line->state == PBLK_LINESTATE_NEW) {
-               blk_to_erase = pblk_prepare_new_line(pblk, line);
-               line->state = PBLK_LINESTATE_FREE;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-       } else {
-               blk_to_erase = blk_in_line;
-       }
-
-       if (blk_in_line < lm->min_blk_line) {
-               spin_unlock(&line->lock);
-               return -EAGAIN;
-       }
-
-       if (line->state != PBLK_LINESTATE_FREE) {
-               WARN(1, "pblk: corrupted line %d, state %d\n",
-                                                       line->id, line->state);
-               spin_unlock(&line->lock);
-               return -EINTR;
-       }
-
-       line->state = PBLK_LINESTATE_OPEN;
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                               line->state);
-
-       atomic_set(&line->left_eblks, blk_to_erase);
-       atomic_set(&line->left_seblks, blk_to_erase);
-
-       line->meta_distance = lm->meta_distance;
-       spin_unlock(&line->lock);
-
-       kref_init(&line->ref);
-       atomic_set(&line->sec_to_update, 0);
-
-       return 0;
-}
-
-/* Line allocations in the recovery path are always single threaded */
-int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int ret;
-
-       spin_lock(&l_mg->free_lock);
-       l_mg->data_line = line;
-       list_del(&line->list);
-
-       ret = pblk_line_prepare(pblk, line);
-       if (ret) {
-               list_add(&line->list, &l_mg->free_list);
-               spin_unlock(&l_mg->free_lock);
-               return ret;
-       }
-       spin_unlock(&l_mg->free_lock);
-
-       ret = pblk_line_alloc_bitmaps(pblk, line);
-       if (ret)
-               goto fail;
-
-       if (!pblk_line_init_bb(pblk, line, 0)) {
-               ret = -EINTR;
-               goto fail;
-       }
-
-       pblk_rl_free_lines_dec(&pblk->rl, line, true);
-       return 0;
-
-fail:
-       spin_lock(&l_mg->free_lock);
-       list_add(&line->list, &l_mg->free_list);
-       spin_unlock(&l_mg->free_lock);
-
-       return ret;
-}
-
-void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-       line->map_bitmap = NULL;
-       line->smeta = NULL;
-       line->emeta = NULL;
-}
-
-static void pblk_line_reinit(struct pblk_line *line)
-{
-       *line->vsc = cpu_to_le32(EMPTY_ENTRY);
-
-       line->map_bitmap = NULL;
-       line->invalid_bitmap = NULL;
-       line->smeta = NULL;
-       line->emeta = NULL;
-}
-
-void pblk_line_free(struct pblk_line *line)
-{
-       struct pblk *pblk = line->pblk;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-       mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
-
-       pblk_line_reinit(line);
-}
-
-struct pblk_line *pblk_line_get(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line *line;
-       int ret, bit;
-
-       lockdep_assert_held(&l_mg->free_lock);
-
-retry:
-       if (list_empty(&l_mg->free_list)) {
-               pblk_err(pblk, "no free lines\n");
-               return NULL;
-       }
-
-       line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
-       list_del(&line->list);
-       l_mg->nr_free_lines--;
-
-       bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       if (unlikely(bit >= lm->blk_per_line)) {
-               spin_lock(&line->lock);
-               line->state = PBLK_LINESTATE_BAD;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_add_tail(&line->list, &l_mg->bad_list);
-
-               pblk_debug(pblk, "line %d is bad\n", line->id);
-               goto retry;
-       }
-
-       ret = pblk_line_prepare(pblk, line);
-       if (ret) {
-               switch (ret) {
-               case -EAGAIN:
-                       list_add(&line->list, &l_mg->bad_list);
-                       goto retry;
-               case -EINTR:
-                       list_add(&line->list, &l_mg->corrupt_list);
-                       goto retry;
-               default:
-                       pblk_err(pblk, "failed to prepare line %d\n", line->id);
-                       list_add(&line->list, &l_mg->free_list);
-                       l_mg->nr_free_lines++;
-                       return NULL;
-               }
-       }
-
-       return line;
-}
-
-static struct pblk_line *pblk_line_retry(struct pblk *pblk,
-                                        struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *retry_line;
-
-retry:
-       spin_lock(&l_mg->free_lock);
-       retry_line = pblk_line_get(pblk);
-       if (!retry_line) {
-               l_mg->data_line = NULL;
-               spin_unlock(&l_mg->free_lock);
-               return NULL;
-       }
-
-       retry_line->map_bitmap = line->map_bitmap;
-       retry_line->invalid_bitmap = line->invalid_bitmap;
-       retry_line->smeta = line->smeta;
-       retry_line->emeta = line->emeta;
-       retry_line->meta_line = line->meta_line;
-
-       pblk_line_reinit(line);
-
-       l_mg->data_line = retry_line;
-       spin_unlock(&l_mg->free_lock);
-
-       pblk_rl_free_lines_dec(&pblk->rl, line, false);
-
-       if (pblk_line_erase(pblk, retry_line))
-               goto retry;
-
-       return retry_line;
-}
-
-static void pblk_set_space_limit(struct pblk *pblk)
-{
-       struct pblk_rl *rl = &pblk->rl;
-
-       atomic_set(&rl->rb_space, 0);
-}
-
-struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-
-       spin_lock(&l_mg->free_lock);
-       line = pblk_line_get(pblk);
-       if (!line) {
-               spin_unlock(&l_mg->free_lock);
-               return NULL;
-       }
-
-       line->seq_nr = l_mg->d_seq_nr++;
-       line->type = PBLK_LINETYPE_DATA;
-       l_mg->data_line = line;
-
-       pblk_line_setup_metadata(line, l_mg, &pblk->lm);
-
-       /* Allocate next line for preparation */
-       l_mg->data_next = pblk_line_get(pblk);
-       if (!l_mg->data_next) {
-               /* If we cannot get a new line, we need to stop the pipeline.
-                * Only allow as many writes in as we can store safely and then
-                * fail gracefully
-                */
-               pblk_set_space_limit(pblk);
-
-               l_mg->data_next = NULL;
-       } else {
-               l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
-               l_mg->data_next->type = PBLK_LINETYPE_DATA;
-       }
-       spin_unlock(&l_mg->free_lock);
-
-       if (pblk_line_alloc_bitmaps(pblk, line))
-               return NULL;
-
-       if (pblk_line_erase(pblk, line)) {
-               line = pblk_line_retry(pblk, line);
-               if (!line)
-                       return NULL;
-       }
-
-retry_setup:
-       if (!pblk_line_init_metadata(pblk, line, NULL)) {
-               line = pblk_line_retry(pblk, line);
-               if (!line)
-                       return NULL;
-
-               goto retry_setup;
-       }
-
-       if (!pblk_line_init_bb(pblk, line, 1)) {
-               line = pblk_line_retry(pblk, line);
-               if (!line)
-                       return NULL;
-
-               goto retry_setup;
-       }
-
-       pblk_rl_free_lines_dec(&pblk->rl, line, true);
-
-       return line;
-}
-
-void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct pblk_line *line;
-
-       line = pblk_ppa_to_line(pblk, ppa);
-       kref_put(&line->ref, pblk_line_put_wq);
-}
-
-void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       int i;
-
-       for (i = 0; i < rqd->nr_ppas; i++)
-               pblk_ppa_to_line_put(pblk, ppa_list[i]);
-}
-
-static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
-{
-       lockdep_assert_held(&pblk->l_mg.free_lock);
-
-       pblk_set_space_limit(pblk);
-       pblk->state = PBLK_STATE_STOPPING;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-}
-
-static void pblk_line_close_meta_sync(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line *line, *tline;
-       LIST_HEAD(list);
-
-       spin_lock(&l_mg->close_lock);
-       if (list_empty(&l_mg->emeta_list)) {
-               spin_unlock(&l_mg->close_lock);
-               return;
-       }
-
-       list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
-       spin_unlock(&l_mg->close_lock);
-
-       list_for_each_entry_safe(line, tline, &list, list) {
-               struct pblk_emeta *emeta = line->emeta;
-
-               while (emeta->mem < lm->emeta_len[0]) {
-                       int ret;
-
-                       ret = pblk_submit_meta_io(pblk, line);
-                       if (ret) {
-                               pblk_err(pblk, "sync meta line %d failed (%d)\n",
-                                                       line->id, ret);
-                               return;
-                       }
-               }
-       }
-
-       pblk_wait_for_meta(pblk);
-       flush_workqueue(pblk->close_wq);
-}
-
-void __pblk_pipeline_flush(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int ret;
-
-       spin_lock(&l_mg->free_lock);
-       if (pblk->state == PBLK_STATE_RECOVERING ||
-                                       pblk->state == PBLK_STATE_STOPPED) {
-               spin_unlock(&l_mg->free_lock);
-               return;
-       }
-       pblk->state = PBLK_STATE_RECOVERING;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-       spin_unlock(&l_mg->free_lock);
-
-       pblk_flush_writer(pblk);
-       pblk_wait_for_meta(pblk);
-
-       ret = pblk_recov_pad(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
-               return;
-       }
-
-       flush_workqueue(pblk->bb_wq);
-       pblk_line_close_meta_sync(pblk);
-}
-
-void __pblk_pipeline_stop(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       spin_lock(&l_mg->free_lock);
-       pblk->state = PBLK_STATE_STOPPED;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-       l_mg->data_line = NULL;
-       l_mg->data_next = NULL;
-       spin_unlock(&l_mg->free_lock);
-}
-
-void pblk_pipeline_stop(struct pblk *pblk)
-{
-       __pblk_pipeline_flush(pblk);
-       __pblk_pipeline_stop(pblk);
-}
-
-struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *cur, *new = NULL;
-       unsigned int left_seblks;
-
-       new = l_mg->data_next;
-       if (!new)
-               goto out;
-
-       spin_lock(&l_mg->free_lock);
-       cur = l_mg->data_line;
-       l_mg->data_line = new;
-
-       pblk_line_setup_metadata(new, l_mg, &pblk->lm);
-       spin_unlock(&l_mg->free_lock);
-
-retry_erase:
-       left_seblks = atomic_read(&new->left_seblks);
-       if (left_seblks) {
-               /* If line is not fully erased, erase it */
-               if (atomic_read(&new->left_eblks)) {
-                       if (pblk_line_erase(pblk, new))
-                               goto out;
-               } else {
-                       io_schedule();
-               }
-               goto retry_erase;
-       }
-
-       if (pblk_line_alloc_bitmaps(pblk, new))
-               return NULL;
-
-retry_setup:
-       if (!pblk_line_init_metadata(pblk, new, cur)) {
-               new = pblk_line_retry(pblk, new);
-               if (!new)
-                       goto out;
-
-               goto retry_setup;
-       }
-
-       if (!pblk_line_init_bb(pblk, new, 1)) {
-               new = pblk_line_retry(pblk, new);
-               if (!new)
-                       goto out;
-
-               goto retry_setup;
-       }
-
-       pblk_rl_free_lines_dec(&pblk->rl, new, true);
-
-       /* Allocate next line for preparation */
-       spin_lock(&l_mg->free_lock);
-       l_mg->data_next = pblk_line_get(pblk);
-       if (!l_mg->data_next) {
-               /* If we cannot get a new line, we need to stop the pipeline.
-                * Only allow as many writes in as we can store safely and then
-                * fail gracefully
-                */
-               pblk_stop_writes(pblk, new);
-               l_mg->data_next = NULL;
-       } else {
-               l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
-               l_mg->data_next->type = PBLK_LINETYPE_DATA;
-       }
-       spin_unlock(&l_mg->free_lock);
-
-out:
-       return new;
-}
-
-static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_gc *gc = &pblk->gc;
-
-       spin_lock(&line->lock);
-       WARN_ON(line->state != PBLK_LINESTATE_GC);
-       if (line->w_err_gc->has_gc_err) {
-               spin_unlock(&line->lock);
-               pblk_err(pblk, "line %d had errors during GC\n", line->id);
-               pblk_put_line_back(pblk, line);
-               line->w_err_gc->has_gc_err = 0;
-               return;
-       }
-
-       line->state = PBLK_LINESTATE_FREE;
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-       line->gc_group = PBLK_LINEGC_NONE;
-       pblk_line_free(line);
-
-       if (line->w_err_gc->has_write_err) {
-               pblk_rl_werr_line_out(&pblk->rl);
-               line->w_err_gc->has_write_err = 0;
-       }
-
-       spin_unlock(&line->lock);
-       atomic_dec(&gc->pipeline_gc);
-
-       spin_lock(&l_mg->free_lock);
-       list_add_tail(&line->list, &l_mg->free_list);
-       l_mg->nr_free_lines++;
-       spin_unlock(&l_mg->free_lock);
-
-       pblk_rl_free_lines_inc(&pblk->rl, line);
-}
-
-static void pblk_line_put_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *line_put_ws = container_of(work,
-                                               struct pblk_line_ws, ws);
-       struct pblk *pblk = line_put_ws->pblk;
-       struct pblk_line *line = line_put_ws->line;
-
-       __pblk_line_put(pblk, line);
-       mempool_free(line_put_ws, &pblk->gen_ws_pool);
-}
-
-void pblk_line_put(struct kref *ref)
-{
-       struct pblk_line *line = container_of(ref, struct pblk_line, ref);
-       struct pblk *pblk = line->pblk;
-
-       __pblk_line_put(pblk, line);
-}
-
-void pblk_line_put_wq(struct kref *ref)
-{
-       struct pblk_line *line = container_of(ref, struct pblk_line, ref);
-       struct pblk *pblk = line->pblk;
-       struct pblk_line_ws *line_put_ws;
-
-       line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
-       if (!line_put_ws)
-               return;
-
-       line_put_ws->pblk = pblk;
-       line_put_ws->line = line;
-       line_put_ws->priv = NULL;
-
-       INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
-       queue_work(pblk->r_end_wq, &line_put_ws->ws);
-}
-
-int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_rq *rqd;
-       int err;
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
-
-       pblk_setup_e_rq(pblk, rqd, ppa);
-
-       rqd->end_io = pblk_end_io_erase;
-       rqd->private = pblk;
-
-       trace_pblk_chunk_reset(pblk_disk_name(pblk),
-                               &ppa, PBLK_CHUNK_RESET_START);
-
-       /* The write thread schedules erases so that it minimizes disturbances
-        * with writes. Thus, there is no need to take the LUN semaphore.
-        */
-       err = pblk_submit_io(pblk, rqd, NULL);
-       if (err) {
-               struct nvm_tgt_dev *dev = pblk->dev;
-               struct nvm_geo *geo = &dev->geo;
-
-               pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
-                                       pblk_ppa_to_line_id(ppa),
-                                       pblk_ppa_to_pos(geo, ppa));
-       }
-
-       return err;
-}
-
-struct pblk_line *pblk_line_get_data(struct pblk *pblk)
-{
-       return pblk->l_mg.data_line;
-}
-
-/* For now, always erase next line */
-struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
-{
-       return pblk->l_mg.data_next;
-}
-
-int pblk_line_is_full(struct pblk_line *line)
-{
-       return (line->left_msecs == 0);
-}
-
-static void pblk_line_should_sync_meta(struct pblk *pblk)
-{
-       if (pblk_rl_is_limit(&pblk->rl))
-               pblk_line_close_meta_sync(pblk);
-}
-
-void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list;
-       int i;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
-                               "pblk: corrupt closed line %d\n", line->id);
-#endif
-
-       spin_lock(&l_mg->free_lock);
-       WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
-       spin_unlock(&l_mg->free_lock);
-
-       spin_lock(&l_mg->gc_lock);
-       spin_lock(&line->lock);
-       WARN_ON(line->state != PBLK_LINESTATE_OPEN);
-       line->state = PBLK_LINESTATE_CLOSED;
-       move_list = pblk_line_gc_list(pblk, line);
-       list_add_tail(&line->list, move_list);
-
-       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-       line->map_bitmap = NULL;
-       line->smeta = NULL;
-       line->emeta = NULL;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct pblk_lun *rlun = &pblk->luns[i];
-               int pos = pblk_ppa_to_pos(geo, rlun->bppa);
-               int state = line->chks[pos].state;
-
-               if (!(state & NVM_CHK_ST_OFFLINE))
-                       state = NVM_CHK_ST_CLOSED;
-       }
-
-       spin_unlock(&line->lock);
-       spin_unlock(&l_mg->gc_lock);
-
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-}
-
-void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_emeta *emeta = line->emeta;
-       struct line_emeta *emeta_buf = emeta->buf;
-       struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
-
-       /* No need for exact vsc value; avoid a big line lock and take aprox. */
-       memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
-       memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
-
-       wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
-       wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
-       wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
-
-       if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
-               emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
-               export_guid(emeta_buf->header.uuid, &pblk->instance_uuid);
-               emeta_buf->header.id = cpu_to_le32(line->id);
-               emeta_buf->header.type = cpu_to_le16(line->type);
-               emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
-               emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
-               emeta_buf->header.crc = cpu_to_le32(
-                       pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
-       }
-
-       emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
-       emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
-
-       spin_lock(&l_mg->close_lock);
-       spin_lock(&line->lock);
-
-       /* Update the in-memory start address for emeta, in case it has
-        * shifted due to write errors
-        */
-       if (line->emeta_ssec != line->cur_sec)
-               line->emeta_ssec = line->cur_sec;
-
-       list_add_tail(&line->list, &l_mg->emeta_list);
-       spin_unlock(&line->lock);
-       spin_unlock(&l_mg->close_lock);
-
-       pblk_line_should_sync_meta(pblk);
-}
-
-static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int lba_list_size = lm->emeta_len[2];
-       struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
-       struct pblk_emeta *emeta = line->emeta;
-
-       w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
-       memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
-                               lba_list_size);
-}
-
-void pblk_line_close_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
-                                                                       ws);
-       struct pblk *pblk = line_ws->pblk;
-       struct pblk_line *line = line_ws->line;
-       struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
-
-       /* Write errors makes the emeta start address stored in smeta invalid,
-        * so keep a copy of the lba list until we've gc'd the line
-        */
-       if (w_err_gc->has_write_err)
-               pblk_save_lba_list(pblk, line);
-
-       pblk_line_close(pblk, line);
-       mempool_free(line_ws, &pblk->gen_ws_pool);
-}
-
-void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
-                     void (*work)(struct work_struct *), gfp_t gfp_mask,
-                     struct workqueue_struct *wq)
-{
-       struct pblk_line_ws *line_ws;
-
-       line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
-       if (!line_ws) {
-               pblk_err(pblk, "pblk: could not allocate memory\n");
-               return;
-       }
-
-       line_ws->pblk = pblk;
-       line_ws->line = line;
-       line_ws->priv = priv;
-
-       INIT_WORK(&line_ws->ws, work);
-       queue_work(wq, &line_ws->ws);
-}
-
-static void __pblk_down_chunk(struct pblk *pblk, int pos)
-{
-       struct pblk_lun *rlun = &pblk->luns[pos];
-       int ret;
-
-       /*
-        * Only send one inflight I/O per LUN. Since we map at a page
-        * granurality, all ppas in the I/O will map to the same LUN
-        */
-
-       ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
-       if (ret == -ETIME || ret == -EINTR)
-               pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
-                               -ret);
-}
-
-void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int pos = pblk_ppa_to_pos(geo, ppa);
-
-       __pblk_down_chunk(pblk, pos);
-}
-
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
-                 unsigned long *lun_bitmap)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int pos = pblk_ppa_to_pos(geo, ppa);
-
-       /* If the LUN has been locked for this same request, do no attempt to
-        * lock it again
-        */
-       if (test_and_set_bit(pos, lun_bitmap))
-               return;
-
-       __pblk_down_chunk(pblk, pos);
-}
-
-void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       int pos = pblk_ppa_to_pos(geo, ppa);
-
-       rlun = &pblk->luns[pos];
-       up(&rlun->wr_sem);
-}
-
-void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       int num_lun = geo->all_luns;
-       int bit = -1;
-
-       while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
-               rlun = &pblk->luns[bit];
-               up(&rlun->wr_sem);
-       }
-}
-
-void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
-{
-       struct ppa_addr ppa_l2p;
-
-       /* logic error: lba out-of-bounds. Ignore update */
-       if (!(lba < pblk->capacity)) {
-               WARN(1, "pblk: corrupted L2P map request\n");
-               return;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-
-       if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
-               pblk_map_invalidate(pblk, ppa_l2p);
-
-       pblk_trans_map_set(pblk, lba, ppa);
-       spin_unlock(&pblk->trans_lock);
-}
-
-void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
-{
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a cache address */
-       BUG_ON(!pblk_addr_in_cache(ppa));
-       BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
-#endif
-
-       pblk_update_map(pblk, lba, ppa);
-}
-
-int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
-                      struct pblk_line *gc_line, u64 paddr_gc)
-{
-       struct ppa_addr ppa_l2p, ppa_gc;
-       int ret = 1;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a cache address */
-       BUG_ON(!pblk_addr_in_cache(ppa_new));
-       BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
-#endif
-
-       /* logic error: lba out-of-bounds. Ignore update */
-       if (!(lba < pblk->capacity)) {
-               WARN(1, "pblk: corrupted L2P map request\n");
-               return 0;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-       ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
-
-       if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
-               spin_lock(&gc_line->lock);
-               WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
-                                               "pblk: corrupted GC update");
-               spin_unlock(&gc_line->lock);
-
-               ret = 0;
-               goto out;
-       }
-
-       pblk_trans_map_set(pblk, lba, ppa_new);
-out:
-       spin_unlock(&pblk->trans_lock);
-       return ret;
-}
-
-void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
-                        struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
-{
-       struct ppa_addr ppa_l2p;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a device address */
-       BUG_ON(pblk_addr_in_cache(ppa_mapped));
-#endif
-       /* Invalidate and discard padded entries */
-       if (lba == ADDR_EMPTY) {
-               atomic64_inc(&pblk->pad_wa);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               atomic_long_inc(&pblk->padded_wb);
-#endif
-               if (!pblk_ppa_empty(ppa_mapped))
-                       pblk_map_invalidate(pblk, ppa_mapped);
-               return;
-       }
-
-       /* logic error: lba out-of-bounds. Ignore update */
-       if (!(lba < pblk->capacity)) {
-               WARN(1, "pblk: corrupted L2P map request\n");
-               return;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-
-       /* Do not update L2P if the cacheline has been updated. In this case,
-        * the mapped ppa must be invalidated
-        */
-       if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
-               if (!pblk_ppa_empty(ppa_mapped))
-                       pblk_map_invalidate(pblk, ppa_mapped);
-               goto out;
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
-#endif
-
-       pblk_trans_map_set(pblk, lba, ppa_mapped);
-out:
-       spin_unlock(&pblk->trans_lock);
-}
-
-int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
-                        sector_t blba, int nr_secs, bool *from_cache)
-{
-       int i;
-
-       spin_lock(&pblk->trans_lock);
-       for (i = 0; i < nr_secs; i++) {
-               struct ppa_addr ppa;
-
-               ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
-
-               /* If the L2P entry maps to a line, the reference is valid */
-               if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
-                       struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
-
-                       if (i > 0 && *from_cache)
-                               break;
-                       *from_cache = false;
-
-                       kref_get(&line->ref);
-               } else {
-                       if (i > 0 && !*from_cache)
-                               break;
-                       *from_cache = true;
-               }
-       }
-       spin_unlock(&pblk->trans_lock);
-       return i;
-}
-
-void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
-                         u64 *lba_list, int nr_secs)
-{
-       u64 lba;
-       int i;
-
-       spin_lock(&pblk->trans_lock);
-       for (i = 0; i < nr_secs; i++) {
-               lba = lba_list[i];
-               if (lba != ADDR_EMPTY) {
-                       /* logic error: lba out-of-bounds. Ignore update */
-                       if (!(lba < pblk->capacity)) {
-                               WARN(1, "pblk: corrupted L2P map request\n");
-                               continue;
-                       }
-                       ppas[i] = pblk_trans_map_get(pblk, lba);
-               }
-       }
-       spin_unlock(&pblk->trans_lock);
-}
-
-void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       void *buffer;
-
-       if (pblk_is_oob_meta_supported(pblk)) {
-               /* Just use OOB metadata buffer as always */
-               buffer = rqd->meta_list;
-       } else {
-               /* We need to reuse last page of request (packed metadata)
-                * in similar way as traditional oob metadata
-                */
-               buffer = page_to_virt(
-                       rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
-       }
-
-       return buffer;
-}
-
-void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       void *meta_list = rqd->meta_list;
-       void *page;
-       int i = 0;
-
-       if (pblk_is_oob_meta_supported(pblk))
-               return;
-
-       page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
-       /* We need to fill oob meta buffer with data from packed metadata */
-       for (; i < rqd->nr_ppas; i++)
-               memcpy(pblk_get_meta(pblk, meta_list, i),
-                       page + (i * sizeof(struct pblk_sec_meta)),
-                       sizeof(struct pblk_sec_meta));
-}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
deleted file mode 100644 (file)
index b31658b..0000000
+++ /dev/null
@@ -1,726 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-gc.c - pblk's garbage collector
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-#include <linux/delay.h>
-
-
-static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
-{
-       vfree(gc_rq->data);
-       kfree(gc_rq);
-}
-
-static int pblk_gc_write(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_gc_rq *gc_rq, *tgc_rq;
-       LIST_HEAD(w_list);
-
-       spin_lock(&gc->w_lock);
-       if (list_empty(&gc->w_list)) {
-               spin_unlock(&gc->w_lock);
-               return 1;
-       }
-
-       list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
-       gc->w_entries = 0;
-       spin_unlock(&gc->w_lock);
-
-       list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
-               pblk_write_gc_to_cache(pblk, gc_rq);
-               list_del(&gc_rq->list);
-               kref_put(&gc_rq->line->ref, pblk_line_put);
-               pblk_gc_free_gc_rq(gc_rq);
-       }
-
-       return 0;
-}
-
-static void pblk_gc_writer_kick(struct pblk_gc *gc)
-{
-       wake_up_process(gc->gc_writer_ts);
-}
-
-void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list;
-
-       spin_lock(&l_mg->gc_lock);
-       spin_lock(&line->lock);
-       WARN_ON(line->state != PBLK_LINESTATE_GC);
-       line->state = PBLK_LINESTATE_CLOSED;
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-
-       /* We need to reset gc_group in order to ensure that
-        * pblk_line_gc_list will return proper move_list
-        * since right now current line is not on any of the
-        * gc lists.
-        */
-       line->gc_group = PBLK_LINEGC_NONE;
-       move_list = pblk_line_gc_list(pblk, line);
-       spin_unlock(&line->lock);
-       list_add_tail(&line->list, move_list);
-       spin_unlock(&l_mg->gc_lock);
-}
-
-static void pblk_gc_line_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *gc_rq_ws = container_of(work,
-                                               struct pblk_line_ws, ws);
-       struct pblk *pblk = gc_rq_ws->pblk;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line = gc_rq_ws->line;
-       struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
-       int ret;
-
-       up(&gc->gc_sem);
-
-       /* Read from GC victim block */
-       ret = pblk_submit_read_gc(pblk, gc_rq);
-       if (ret) {
-               line->w_err_gc->has_gc_err = 1;
-               goto out;
-       }
-
-       if (!gc_rq->secs_to_gc)
-               goto out;
-
-retry:
-       spin_lock(&gc->w_lock);
-       if (gc->w_entries >= PBLK_GC_RQ_QD) {
-               spin_unlock(&gc->w_lock);
-               pblk_gc_writer_kick(&pblk->gc);
-               usleep_range(128, 256);
-               goto retry;
-       }
-       gc->w_entries++;
-       list_add_tail(&gc_rq->list, &gc->w_list);
-       spin_unlock(&gc->w_lock);
-
-       pblk_gc_writer_kick(&pblk->gc);
-
-       kfree(gc_rq_ws);
-       return;
-
-out:
-       pblk_gc_free_gc_rq(gc_rq);
-       kref_put(&line->ref, pblk_line_put);
-       kfree(gc_rq_ws);
-}
-
-static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
-                                      struct pblk_line *line)
-{
-       struct line_emeta *emeta_buf;
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int lba_list_size = lm->emeta_len[2];
-       __le64 *lba_list;
-       int ret;
-
-       emeta_buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
-       if (!emeta_buf)
-               return NULL;
-
-       ret = pblk_line_emeta_read(pblk, line, emeta_buf);
-       if (ret) {
-               pblk_err(pblk, "line %d read emeta failed (%d)\n",
-                               line->id, ret);
-               kvfree(emeta_buf);
-               return NULL;
-       }
-
-       /* If this read fails, it means that emeta is corrupted.
-        * For now, leave the line untouched.
-        * TODO: Implement a recovery routine that scans and moves
-        * all sectors on the line.
-        */
-
-       ret = pblk_recov_check_emeta(pblk, emeta_buf);
-       if (ret) {
-               pblk_err(pblk, "inconsistent emeta (line %d)\n",
-                               line->id);
-               kvfree(emeta_buf);
-               return NULL;
-       }
-
-       lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
-
-       if (lba_list)
-               memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
-
-       kvfree(emeta_buf);
-
-       return lba_list;
-}
-
-static void pblk_gc_line_prepare_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
-                                                                       ws);
-       struct pblk *pblk = line_ws->pblk;
-       struct pblk_line *line = line_ws->line;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line_ws *gc_rq_ws;
-       struct pblk_gc_rq *gc_rq;
-       __le64 *lba_list;
-       unsigned long *invalid_bitmap;
-       int sec_left, nr_secs, bit;
-
-       invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
-       if (!invalid_bitmap)
-               goto fail_free_ws;
-
-       if (line->w_err_gc->has_write_err) {
-               lba_list = line->w_err_gc->lba_list;
-               line->w_err_gc->lba_list = NULL;
-       } else {
-               lba_list = get_lba_list_from_emeta(pblk, line);
-               if (!lba_list) {
-                       pblk_err(pblk, "could not interpret emeta (line %d)\n",
-                                       line->id);
-                       goto fail_free_invalid_bitmap;
-               }
-       }
-
-       spin_lock(&line->lock);
-       bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
-       sec_left = pblk_line_vsc(line);
-       spin_unlock(&line->lock);
-
-       if (sec_left < 0) {
-               pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
-               goto fail_free_lba_list;
-       }
-
-       bit = -1;
-next_rq:
-       gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
-       if (!gc_rq)
-               goto fail_free_lba_list;
-
-       nr_secs = 0;
-       do {
-               bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
-                                                               bit + 1);
-               if (bit > line->emeta_ssec)
-                       break;
-
-               gc_rq->paddr_list[nr_secs] = bit;
-               gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
-       } while (nr_secs < pblk->max_write_pgs);
-
-       if (unlikely(!nr_secs)) {
-               kfree(gc_rq);
-               goto out;
-       }
-
-       gc_rq->nr_secs = nr_secs;
-       gc_rq->line = line;
-
-       gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
-       if (!gc_rq->data)
-               goto fail_free_gc_rq;
-
-       gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
-       if (!gc_rq_ws)
-               goto fail_free_gc_data;
-
-       gc_rq_ws->pblk = pblk;
-       gc_rq_ws->line = line;
-       gc_rq_ws->priv = gc_rq;
-
-       /* The write GC path can be much slower than the read GC one due to
-        * the budget imposed by the rate-limiter. Balance in case that we get
-        * back pressure from the write GC path.
-        */
-       while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
-               io_schedule();
-
-       kref_get(&line->ref);
-
-       INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
-       queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
-
-       sec_left -= nr_secs;
-       if (sec_left > 0)
-               goto next_rq;
-
-out:
-       kvfree(lba_list);
-       kfree(line_ws);
-       kfree(invalid_bitmap);
-
-       kref_put(&line->ref, pblk_line_put);
-       atomic_dec(&gc->read_inflight_gc);
-
-       return;
-
-fail_free_gc_data:
-       vfree(gc_rq->data);
-fail_free_gc_rq:
-       kfree(gc_rq);
-fail_free_lba_list:
-       kvfree(lba_list);
-fail_free_invalid_bitmap:
-       kfree(invalid_bitmap);
-fail_free_ws:
-       kfree(line_ws);
-
-       /* Line goes back to closed state, so we cannot release additional
-        * reference for line, since we do that only when we want to do
-        * gc to free line state transition.
-        */
-       pblk_put_line_back(pblk, line);
-       atomic_dec(&gc->read_inflight_gc);
-
-       pblk_err(pblk, "failed to GC line %d\n", line->id);
-}
-
-static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line_ws *line_ws;
-
-       pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
-
-       line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
-       if (!line_ws)
-               return -ENOMEM;
-
-       line_ws->pblk = pblk;
-       line_ws->line = line;
-
-       atomic_inc(&gc->pipeline_gc);
-       INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
-       queue_work(gc->gc_reader_wq, &line_ws->ws);
-
-       return 0;
-}
-
-static void pblk_gc_reader_kick(struct pblk_gc *gc)
-{
-       wake_up_process(gc->gc_reader_ts);
-}
-
-static void pblk_gc_kick(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       pblk_gc_writer_kick(gc);
-       pblk_gc_reader_kick(gc);
-
-       /* If we're shutting down GC, let's not start it up again */
-       if (gc->gc_enabled) {
-               wake_up_process(gc->gc_ts);
-               mod_timer(&gc->gc_timer,
-                         jiffies + msecs_to_jiffies(GC_TIME_MSECS));
-       }
-}
-
-static int pblk_gc_read(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line;
-
-       spin_lock(&gc->r_lock);
-       if (list_empty(&gc->r_list)) {
-               spin_unlock(&gc->r_lock);
-               return 1;
-       }
-
-       line = list_first_entry(&gc->r_list, struct pblk_line, list);
-       list_del(&line->list);
-       spin_unlock(&gc->r_lock);
-
-       pblk_gc_kick(pblk);
-
-       if (pblk_gc_line(pblk, line)) {
-               pblk_err(pblk, "failed to GC line %d\n", line->id);
-               /* rollback */
-               spin_lock(&gc->r_lock);
-               list_add_tail(&line->list, &gc->r_list);
-               spin_unlock(&gc->r_lock);
-       }
-
-       return 0;
-}
-
-static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
-                                                struct list_head *group_list)
-{
-       struct pblk_line *line, *victim;
-       unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
-
-       victim = list_first_entry(group_list, struct pblk_line, list);
-
-       list_for_each_entry(line, group_list, list) {
-               if (!atomic_read(&line->sec_to_update))
-                       line_vsc = le32_to_cpu(*line->vsc);
-               if (line_vsc < victim_vsc) {
-                       victim = line;
-                       victim_vsc = le32_to_cpu(*victim->vsc);
-               }
-       }
-
-       if (victim_vsc == ~0x0)
-               return NULL;
-
-       return victim;
-}
-
-static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
-{
-       unsigned int nr_blocks_free, nr_blocks_need;
-       unsigned int werr_lines = atomic_read(&rl->werr_lines);
-
-       nr_blocks_need = pblk_rl_high_thrs(rl);
-       nr_blocks_free = pblk_rl_nr_free_blks(rl);
-
-       /* This is not critical, no need to take lock here */
-       return ((werr_lines > 0) ||
-               ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
-}
-
-void pblk_gc_free_full_lines(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line;
-
-       do {
-               spin_lock(&l_mg->gc_lock);
-               if (list_empty(&l_mg->gc_full_list)) {
-                       spin_unlock(&l_mg->gc_lock);
-                       return;
-               }
-
-               line = list_first_entry(&l_mg->gc_full_list,
-                                                       struct pblk_line, list);
-
-               spin_lock(&line->lock);
-               WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
-               line->state = PBLK_LINESTATE_GC;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_del(&line->list);
-               spin_unlock(&l_mg->gc_lock);
-
-               atomic_inc(&gc->pipeline_gc);
-               kref_put(&line->ref, pblk_line_put);
-       } while (1);
-}
-
-/*
- * Lines with no valid sectors will be returned to the free list immediately. If
- * GC is activated - either because the free block count is under the determined
- * threshold, or because it is being forced from user space - only lines with a
- * high count of invalid sectors will be recycled.
- */
-static void pblk_gc_run(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line;
-       struct list_head *group_list;
-       bool run_gc;
-       int read_inflight_gc, gc_group = 0, prev_group = 0;
-
-       pblk_gc_free_full_lines(pblk);
-
-       run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
-       if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
-               return;
-
-next_gc_group:
-       group_list = l_mg->gc_lists[gc_group++];
-
-       do {
-               spin_lock(&l_mg->gc_lock);
-
-               line = pblk_gc_get_victim_line(pblk, group_list);
-               if (!line) {
-                       spin_unlock(&l_mg->gc_lock);
-                       break;
-               }
-
-               spin_lock(&line->lock);
-               WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
-               line->state = PBLK_LINESTATE_GC;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_del(&line->list);
-               spin_unlock(&l_mg->gc_lock);
-
-               spin_lock(&gc->r_lock);
-               list_add_tail(&line->list, &gc->r_list);
-               spin_unlock(&gc->r_lock);
-
-               read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
-               pblk_gc_reader_kick(gc);
-
-               prev_group = 1;
-
-               /* No need to queue up more GC lines than we can handle */
-               run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
-               if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
-                       break;
-       } while (1);
-
-       if (!prev_group && pblk->rl.rb_state > gc_group &&
-                                               gc_group < PBLK_GC_NR_LISTS)
-               goto next_gc_group;
-}
-
-static void pblk_gc_timer(struct timer_list *t)
-{
-       struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
-
-       pblk_gc_kick(pblk);
-}
-
-static int pblk_gc_ts(void *data)
-{
-       struct pblk *pblk = data;
-
-       while (!kthread_should_stop()) {
-               pblk_gc_run(pblk);
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-       return 0;
-}
-
-static int pblk_gc_writer_ts(void *data)
-{
-       struct pblk *pblk = data;
-
-       while (!kthread_should_stop()) {
-               if (!pblk_gc_write(pblk))
-                       continue;
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-       return 0;
-}
-
-static int pblk_gc_reader_ts(void *data)
-{
-       struct pblk *pblk = data;
-       struct pblk_gc *gc = &pblk->gc;
-
-       while (!kthread_should_stop()) {
-               if (!pblk_gc_read(pblk))
-                       continue;
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
-               atomic_read(&gc->pipeline_gc));
-#endif
-
-       do {
-               if (!atomic_read(&gc->pipeline_gc))
-                       break;
-
-               schedule();
-       } while (1);
-
-       return 0;
-}
-
-static void pblk_gc_start(struct pblk *pblk)
-{
-       pblk->gc.gc_active = 1;
-       pblk_debug(pblk, "gc start\n");
-}
-
-void pblk_gc_should_start(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       if (gc->gc_enabled && !gc->gc_active) {
-               pblk_gc_start(pblk);
-               pblk_gc_kick(pblk);
-       }
-}
-
-void pblk_gc_should_stop(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       if (gc->gc_active && !gc->gc_forced)
-               gc->gc_active = 0;
-}
-
-void pblk_gc_should_kick(struct pblk *pblk)
-{
-       pblk_rl_update_rates(&pblk->rl);
-}
-
-void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
-                             int *gc_active)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       spin_lock(&gc->lock);
-       *gc_enabled = gc->gc_enabled;
-       *gc_active = gc->gc_active;
-       spin_unlock(&gc->lock);
-}
-
-int pblk_gc_sysfs_force(struct pblk *pblk, int force)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       if (force < 0 || force > 1)
-               return -EINVAL;
-
-       spin_lock(&gc->lock);
-       gc->gc_forced = force;
-
-       if (force)
-               gc->gc_enabled = 1;
-       else
-               gc->gc_enabled = 0;
-       spin_unlock(&gc->lock);
-
-       pblk_gc_should_start(pblk);
-
-       return 0;
-}
-
-int pblk_gc_init(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       int ret;
-
-       gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
-       if (IS_ERR(gc->gc_ts)) {
-               pblk_err(pblk, "could not allocate GC main kthread\n");
-               return PTR_ERR(gc->gc_ts);
-       }
-
-       gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
-                                                       "pblk-gc-writer-ts");
-       if (IS_ERR(gc->gc_writer_ts)) {
-               pblk_err(pblk, "could not allocate GC writer kthread\n");
-               ret = PTR_ERR(gc->gc_writer_ts);
-               goto fail_free_main_kthread;
-       }
-
-       gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
-                                                       "pblk-gc-reader-ts");
-       if (IS_ERR(gc->gc_reader_ts)) {
-               pblk_err(pblk, "could not allocate GC reader kthread\n");
-               ret = PTR_ERR(gc->gc_reader_ts);
-               goto fail_free_writer_kthread;
-       }
-
-       timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
-       mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
-
-       gc->gc_active = 0;
-       gc->gc_forced = 0;
-       gc->gc_enabled = 1;
-       gc->w_entries = 0;
-       atomic_set(&gc->read_inflight_gc, 0);
-       atomic_set(&gc->pipeline_gc, 0);
-
-       /* Workqueue that reads valid sectors from a line and submit them to the
-        * GC writer to be recycled.
-        */
-       gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
-       if (!gc->gc_line_reader_wq) {
-               pblk_err(pblk, "could not allocate GC line reader workqueue\n");
-               ret = -ENOMEM;
-               goto fail_free_reader_kthread;
-       }
-
-       /* Workqueue that prepare lines for GC */
-       gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
-                                       WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
-       if (!gc->gc_reader_wq) {
-               pblk_err(pblk, "could not allocate GC reader workqueue\n");
-               ret = -ENOMEM;
-               goto fail_free_reader_line_wq;
-       }
-
-       spin_lock_init(&gc->lock);
-       spin_lock_init(&gc->w_lock);
-       spin_lock_init(&gc->r_lock);
-
-       sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
-
-       INIT_LIST_HEAD(&gc->w_list);
-       INIT_LIST_HEAD(&gc->r_list);
-
-       return 0;
-
-fail_free_reader_line_wq:
-       destroy_workqueue(gc->gc_line_reader_wq);
-fail_free_reader_kthread:
-       kthread_stop(gc->gc_reader_ts);
-fail_free_writer_kthread:
-       kthread_stop(gc->gc_writer_ts);
-fail_free_main_kthread:
-       kthread_stop(gc->gc_ts);
-
-       return ret;
-}
-
-void pblk_gc_exit(struct pblk *pblk, bool graceful)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       gc->gc_enabled = 0;
-       del_timer_sync(&gc->gc_timer);
-       gc->gc_active = 0;
-
-       if (gc->gc_ts)
-               kthread_stop(gc->gc_ts);
-
-       if (gc->gc_reader_ts)
-               kthread_stop(gc->gc_reader_ts);
-
-       if (graceful) {
-               flush_workqueue(gc->gc_reader_wq);
-               flush_workqueue(gc->gc_line_reader_wq);
-       }
-
-       destroy_workqueue(gc->gc_reader_wq);
-       destroy_workqueue(gc->gc_line_reader_wq);
-
-       if (gc->gc_writer_ts)
-               kthread_stop(gc->gc_writer_ts);
-}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
deleted file mode 100644 (file)
index 5924f09..0000000
+++ /dev/null
@@ -1,1324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * Implementation of a physical block-device target for Open-channel SSDs.
- *
- * pblk-init.c - pblk's initialization.
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-static unsigned int write_buffer_size;
-
-module_param(write_buffer_size, uint, 0644);
-MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
-
-struct pblk_global_caches {
-       struct kmem_cache       *ws;
-       struct kmem_cache       *rec;
-       struct kmem_cache       *g_rq;
-       struct kmem_cache       *w_rq;
-
-       struct kref             kref;
-
-       struct mutex            mutex; /* Ensures consistency between
-                                       * caches and kref
-                                       */
-};
-
-static struct pblk_global_caches pblk_caches = {
-       .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
-       .kref = KREF_INIT(0),
-};
-
-struct bio_set pblk_bio_set;
-
-static blk_qc_t pblk_submit_bio(struct bio *bio)
-{
-       struct pblk *pblk = bio->bi_bdev->bd_disk->queue->queuedata;
-
-       if (bio_op(bio) == REQ_OP_DISCARD) {
-               pblk_discard(pblk, bio);
-               if (!(bio->bi_opf & REQ_PREFLUSH)) {
-                       bio_endio(bio);
-                       return BLK_QC_T_NONE;
-               }
-       }
-
-       /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
-        * constraint. Writes can be of arbitrary size.
-        */
-       if (bio_data_dir(bio) == READ) {
-               blk_queue_split(&bio);
-               pblk_submit_read(pblk, bio);
-       } else {
-               /* Prevent deadlock in the case of a modest LUN configuration
-                * and large user I/Os. Unless stalled, the rate limiter
-                * leaves at least 256KB available for user I/O.
-                */
-               if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
-                       blk_queue_split(&bio);
-
-               pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
-       }
-
-       return BLK_QC_T_NONE;
-}
-
-static const struct block_device_operations pblk_bops = {
-       .owner          = THIS_MODULE,
-       .submit_bio     = pblk_submit_bio,
-};
-
-
-static size_t pblk_trans_map_size(struct pblk *pblk)
-{
-       int entry_size = 8;
-
-       if (pblk->addrf_len < 32)
-               entry_size = 4;
-
-       return entry_size * pblk->capacity;
-}
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static u32 pblk_l2p_crc(struct pblk *pblk)
-{
-       size_t map_size;
-       u32 crc = ~(u32)0;
-
-       map_size = pblk_trans_map_size(pblk);
-       crc = crc32_le(crc, pblk->trans_map, map_size);
-       return crc;
-}
-#endif
-
-static void pblk_l2p_free(struct pblk *pblk)
-{
-       vfree(pblk->trans_map);
-}
-
-static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
-{
-       struct pblk_line *line = NULL;
-
-       if (factory_init) {
-               guid_gen(&pblk->instance_uuid);
-       } else {
-               line = pblk_recov_l2p(pblk);
-               if (IS_ERR(line)) {
-                       pblk_err(pblk, "could not recover l2p table\n");
-                       return -EFAULT;
-               }
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
-#endif
-
-       /* Free full lines directly as GC has not been started yet */
-       pblk_gc_free_full_lines(pblk);
-
-       if (!line) {
-               /* Configure next line for user data */
-               line = pblk_line_get_first_data(pblk);
-               if (!line)
-                       return -EFAULT;
-       }
-
-       return 0;
-}
-
-static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
-{
-       sector_t i;
-       struct ppa_addr ppa;
-       size_t map_size;
-       int ret = 0;
-
-       map_size = pblk_trans_map_size(pblk);
-       pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
-                                   __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
-       if (!pblk->trans_map) {
-               pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
-                               map_size);
-               return -ENOMEM;
-       }
-
-       pblk_ppa_set_empty(&ppa);
-
-       for (i = 0; i < pblk->capacity; i++)
-               pblk_trans_map_set(pblk, i, ppa);
-
-       ret = pblk_l2p_recover(pblk, factory_init);
-       if (ret)
-               vfree(pblk->trans_map);
-
-       return ret;
-}
-
-static void pblk_rwb_free(struct pblk *pblk)
-{
-       if (pblk_rb_tear_down_check(&pblk->rwb))
-               pblk_err(pblk, "write buffer error on tear down\n");
-
-       pblk_rb_free(&pblk->rwb);
-}
-
-static int pblk_rwb_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       unsigned long buffer_size;
-       int pgs_in_buffer, threshold;
-
-       threshold = geo->mw_cunits * geo->all_luns;
-       pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
-                                                               * geo->all_luns;
-
-       if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
-               buffer_size = write_buffer_size;
-       else
-               buffer_size = pgs_in_buffer;
-
-       return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
-}
-
-static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
-                            struct nvm_addrf_12 *dst)
-{
-       struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
-       int power_len;
-
-       /* Re-calculate channel and lun format to adapt to configuration */
-       power_len = get_count_order(geo->num_ch);
-       if (1 << power_len != geo->num_ch) {
-               pblk_err(pblk, "supports only power-of-two channel config.\n");
-               return -EINVAL;
-       }
-       dst->ch_len = power_len;
-
-       power_len = get_count_order(geo->num_lun);
-       if (1 << power_len != geo->num_lun) {
-               pblk_err(pblk, "supports only power-of-two LUN config.\n");
-               return -EINVAL;
-       }
-       dst->lun_len = power_len;
-
-       dst->blk_len = src->blk_len;
-       dst->pg_len = src->pg_len;
-       dst->pln_len = src->pln_len;
-       dst->sec_len = src->sec_len;
-
-       dst->sec_offset = 0;
-       dst->pln_offset = dst->sec_len;
-       dst->ch_offset = dst->pln_offset + dst->pln_len;
-       dst->lun_offset = dst->ch_offset + dst->ch_len;
-       dst->pg_offset = dst->lun_offset + dst->lun_len;
-       dst->blk_offset = dst->pg_offset + dst->pg_len;
-
-       dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-       dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
-       dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
-       dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
-       dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
-       dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
-
-       return dst->blk_offset + src->blk_len;
-}
-
-static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
-                            struct pblk_addrf *udst)
-{
-       struct nvm_addrf *src = &geo->addrf;
-
-       adst->ch_len = get_count_order(geo->num_ch);
-       adst->lun_len = get_count_order(geo->num_lun);
-       adst->chk_len = src->chk_len;
-       adst->sec_len = src->sec_len;
-
-       adst->sec_offset = 0;
-       adst->ch_offset = adst->sec_len;
-       adst->lun_offset = adst->ch_offset + adst->ch_len;
-       adst->chk_offset = adst->lun_offset + adst->lun_len;
-
-       adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
-       adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
-       adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
-       adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
-
-       udst->sec_stripe = geo->ws_opt;
-       udst->ch_stripe = geo->num_ch;
-       udst->lun_stripe = geo->num_lun;
-
-       udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
-       udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
-
-       return adst->chk_offset + adst->chk_len;
-}
-
-static int pblk_set_addrf(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int mod;
-
-       switch (geo->version) {
-       case NVM_OCSSD_SPEC_12:
-               div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
-               if (mod) {
-                       pblk_err(pblk, "bad configuration of sectors/pages\n");
-                       return -EINVAL;
-               }
-
-               pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
-                                                       (void *)&pblk->addrf);
-               break;
-       case NVM_OCSSD_SPEC_20:
-               pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
-                                                       &pblk->uaddrf);
-               break;
-       default:
-               pblk_err(pblk, "OCSSD revision not supported (%d)\n",
-                                                               geo->version);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int pblk_create_global_caches(void)
-{
-
-       pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
-                               sizeof(struct pblk_line_ws), 0, 0, NULL);
-       if (!pblk_caches.ws)
-               return -ENOMEM;
-
-       pblk_caches.rec = kmem_cache_create("pblk_rec",
-                               sizeof(struct pblk_rec_ctx), 0, 0, NULL);
-       if (!pblk_caches.rec)
-               goto fail_destroy_ws;
-
-       pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
-                               0, 0, NULL);
-       if (!pblk_caches.g_rq)
-               goto fail_destroy_rec;
-
-       pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
-                               0, 0, NULL);
-       if (!pblk_caches.w_rq)
-               goto fail_destroy_g_rq;
-
-       return 0;
-
-fail_destroy_g_rq:
-       kmem_cache_destroy(pblk_caches.g_rq);
-fail_destroy_rec:
-       kmem_cache_destroy(pblk_caches.rec);
-fail_destroy_ws:
-       kmem_cache_destroy(pblk_caches.ws);
-
-       return -ENOMEM;
-}
-
-static int pblk_get_global_caches(void)
-{
-       int ret = 0;
-
-       mutex_lock(&pblk_caches.mutex);
-
-       if (kref_get_unless_zero(&pblk_caches.kref))
-               goto out;
-
-       ret = pblk_create_global_caches();
-       if (!ret)
-               kref_init(&pblk_caches.kref);
-
-out:
-       mutex_unlock(&pblk_caches.mutex);
-       return ret;
-}
-
-static void pblk_destroy_global_caches(struct kref *ref)
-{
-       struct pblk_global_caches *c;
-
-       c = container_of(ref, struct pblk_global_caches, kref);
-
-       kmem_cache_destroy(c->ws);
-       kmem_cache_destroy(c->rec);
-       kmem_cache_destroy(c->g_rq);
-       kmem_cache_destroy(c->w_rq);
-}
-
-static void pblk_put_global_caches(void)
-{
-       mutex_lock(&pblk_caches.mutex);
-       kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
-       mutex_unlock(&pblk_caches.mutex);
-}
-
-static int pblk_core_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int ret, max_write_ppas;
-
-       atomic64_set(&pblk->user_wa, 0);
-       atomic64_set(&pblk->pad_wa, 0);
-       atomic64_set(&pblk->gc_wa, 0);
-       pblk->user_rst_wa = 0;
-       pblk->pad_rst_wa = 0;
-       pblk->gc_rst_wa = 0;
-
-       atomic64_set(&pblk->nr_flush, 0);
-       pblk->nr_flush_rst = 0;
-
-       pblk->min_write_pgs = geo->ws_opt;
-       pblk->min_write_pgs_data = pblk->min_write_pgs;
-       max_write_ppas = pblk->min_write_pgs * geo->all_luns;
-       pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
-       pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
-               queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
-       pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
-
-       pblk->oob_meta_size = geo->sos;
-       if (!pblk_is_oob_meta_supported(pblk)) {
-               /* For drives which does not have OOB metadata feature
-                * in order to support recovery feature we need to use
-                * so called packed metadata. Packed metada will store
-                * the same information as OOB metadata (l2p table mapping,
-                * but in the form of the single page at the end of
-                * every write request.
-                */
-               if (pblk->min_write_pgs
-                       * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
-                       /* We want to keep all the packed metadata on single
-                        * page per write requests. So we need to ensure that
-                        * it will fit.
-                        *
-                        * This is more like sanity check, since there is
-                        * no device with such a big minimal write size
-                        * (above 1 metabytes).
-                        */
-                       pblk_err(pblk, "Not supported min write size\n");
-                       return -EINVAL;
-               }
-               /* For packed meta approach we do some simplification.
-                * On read path we always issue requests which size
-                * equal to max_write_pgs, with all pages filled with
-                * user payload except of last one page which will be
-                * filled with packed metadata.
-                */
-               pblk->max_write_pgs = pblk->min_write_pgs;
-               pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
-       }
-
-       pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
-                                                               GFP_KERNEL);
-       if (!pblk->pad_dist)
-               return -ENOMEM;
-
-       if (pblk_get_global_caches())
-               goto fail_free_pad_dist;
-
-       /* Internal bios can be at most the sectors signaled by the device. */
-       ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
-       if (ret)
-               goto free_global_caches;
-
-       ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
-                                    pblk_caches.ws);
-       if (ret)
-               goto free_page_bio_pool;
-
-       ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
-                                    pblk_caches.rec);
-       if (ret)
-               goto free_gen_ws_pool;
-
-       ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
-                                    pblk_caches.g_rq);
-       if (ret)
-               goto free_rec_pool;
-
-       ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
-                                    pblk_caches.g_rq);
-       if (ret)
-               goto free_r_rq_pool;
-
-       ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
-                                    pblk_caches.w_rq);
-       if (ret)
-               goto free_e_rq_pool;
-
-       pblk->close_wq = alloc_workqueue("pblk-close-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
-       if (!pblk->close_wq)
-               goto free_w_rq_pool;
-
-       pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
-       if (!pblk->bb_wq)
-               goto free_close_wq;
-
-       pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
-       if (!pblk->r_end_wq)
-               goto free_bb_wq;
-
-       if (pblk_set_addrf(pblk))
-               goto free_r_end_wq;
-
-       INIT_LIST_HEAD(&pblk->compl_list);
-       INIT_LIST_HEAD(&pblk->resubmit_list);
-
-       return 0;
-
-free_r_end_wq:
-       destroy_workqueue(pblk->r_end_wq);
-free_bb_wq:
-       destroy_workqueue(pblk->bb_wq);
-free_close_wq:
-       destroy_workqueue(pblk->close_wq);
-free_w_rq_pool:
-       mempool_exit(&pblk->w_rq_pool);
-free_e_rq_pool:
-       mempool_exit(&pblk->e_rq_pool);
-free_r_rq_pool:
-       mempool_exit(&pblk->r_rq_pool);
-free_rec_pool:
-       mempool_exit(&pblk->rec_pool);
-free_gen_ws_pool:
-       mempool_exit(&pblk->gen_ws_pool);
-free_page_bio_pool:
-       mempool_exit(&pblk->page_bio_pool);
-free_global_caches:
-       pblk_put_global_caches();
-fail_free_pad_dist:
-       kfree(pblk->pad_dist);
-       return -ENOMEM;
-}
-
-static void pblk_core_free(struct pblk *pblk)
-{
-       if (pblk->close_wq)
-               destroy_workqueue(pblk->close_wq);
-
-       if (pblk->r_end_wq)
-               destroy_workqueue(pblk->r_end_wq);
-
-       if (pblk->bb_wq)
-               destroy_workqueue(pblk->bb_wq);
-
-       mempool_exit(&pblk->page_bio_pool);
-       mempool_exit(&pblk->gen_ws_pool);
-       mempool_exit(&pblk->rec_pool);
-       mempool_exit(&pblk->r_rq_pool);
-       mempool_exit(&pblk->e_rq_pool);
-       mempool_exit(&pblk->w_rq_pool);
-
-       pblk_put_global_caches();
-       kfree(pblk->pad_dist);
-}
-
-static void pblk_line_mg_free(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int i;
-
-       kfree(l_mg->bb_template);
-       kfree(l_mg->bb_aux);
-       kfree(l_mg->vsc_list);
-
-       for (i = 0; i < PBLK_DATA_LINES; i++) {
-               kfree(l_mg->sline_meta[i]);
-               kvfree(l_mg->eline_meta[i]->buf);
-               kfree(l_mg->eline_meta[i]);
-       }
-
-       mempool_destroy(l_mg->bitmap_pool);
-       kmem_cache_destroy(l_mg->bitmap_cache);
-}
-
-static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
-                               struct pblk_line *line)
-{
-       struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
-
-       kfree(line->blk_bitmap);
-       kfree(line->erase_bitmap);
-       kfree(line->chks);
-
-       kvfree(w_err_gc->lba_list);
-       kfree(w_err_gc);
-}
-
-static void pblk_lines_free(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-       int i;
-
-       for (i = 0; i < l_mg->nr_lines; i++) {
-               line = &pblk->lines[i];
-
-               pblk_line_free(line);
-               pblk_line_meta_free(l_mg, line);
-       }
-
-       pblk_line_mg_free(pblk);
-
-       kfree(pblk->luns);
-       kfree(pblk->lines);
-}
-
-static int pblk_luns_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       int i;
-
-       /* TODO: Implement unbalanced LUN support */
-       if (geo->num_lun < 0) {
-               pblk_err(pblk, "unbalanced LUN config.\n");
-               return -EINVAL;
-       }
-
-       pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
-                                                               GFP_KERNEL);
-       if (!pblk->luns)
-               return -ENOMEM;
-
-       for (i = 0; i < geo->all_luns; i++) {
-               /* Stripe across channels */
-               int ch = i % geo->num_ch;
-               int lun_raw = i / geo->num_ch;
-               int lunid = lun_raw + ch * geo->num_lun;
-
-               rlun = &pblk->luns[i];
-               rlun->bppa = dev->luns[lunid];
-
-               sema_init(&rlun->wr_sem, 1);
-       }
-
-       return 0;
-}
-
-/* See comment over struct line_emeta definition */
-static unsigned int calc_emeta_len(struct pblk *pblk)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-
-       /* Round to sector size so that lba_list starts on its own sector */
-       lm->emeta_sec[1] = DIV_ROUND_UP(
-                       sizeof(struct line_emeta) + lm->blk_bitmap_len +
-                       sizeof(struct wa_counters), geo->csecs);
-       lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
-
-       /* Round to sector size so that vsc_list starts on its own sector */
-       lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
-       lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
-                       geo->csecs);
-       lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
-
-       lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
-                       geo->csecs);
-       lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
-
-       lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
-
-       return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
-}
-
-static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_geo *geo = &dev->geo;
-       sector_t provisioned;
-       int sec_meta, blk_meta, clba;
-       int minimum;
-
-       if (geo->op == NVM_TARGET_DEFAULT_OP)
-               pblk->op = PBLK_DEFAULT_OP;
-       else
-               pblk->op = geo->op;
-
-       minimum = pblk_get_min_chks(pblk);
-       provisioned = nr_free_chks;
-       provisioned *= (100 - pblk->op);
-       sector_div(provisioned, 100);
-
-       if ((nr_free_chks - provisioned) < minimum) {
-               if (geo->op != NVM_TARGET_DEFAULT_OP) {
-                       pblk_err(pblk, "OP too small to create a sane instance\n");
-                       return -EINTR;
-               }
-
-               /* If the user did not specify an OP value, and PBLK_DEFAULT_OP
-                * is not enough, calculate and set sane value
-                */
-
-               provisioned = nr_free_chks - minimum;
-               pblk->op =  (100 * minimum) / nr_free_chks;
-               pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
-                               pblk->op);
-       }
-
-       pblk->op_blks = nr_free_chks - provisioned;
-
-       /* Internally pblk manages all free blocks, but all calculations based
-        * on user capacity consider only provisioned blocks
-        */
-       pblk->rl.total_blocks = nr_free_chks;
-
-       /* Consider sectors used for metadata */
-       sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
-       blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
-
-       clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
-       pblk->capacity = (provisioned - blk_meta) * clba;
-
-       atomic_set(&pblk->rl.free_blocks, nr_free_chks);
-       atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
-
-       return 0;
-}
-
-static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
-                                  struct nvm_chk_meta *meta)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int i, nr_bad_chks = 0;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct pblk_lun *rlun = &pblk->luns[i];
-               struct nvm_chk_meta *chunk;
-               struct nvm_chk_meta *chunk_meta;
-               struct ppa_addr ppa;
-               int pos;
-
-               ppa = rlun->bppa;
-               pos = pblk_ppa_to_pos(geo, ppa);
-               chunk = &line->chks[pos];
-
-               ppa.m.chk = line->id;
-               chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
-
-               chunk->state = chunk_meta->state;
-               chunk->type = chunk_meta->type;
-               chunk->wi = chunk_meta->wi;
-               chunk->slba = chunk_meta->slba;
-               chunk->cnlb = chunk_meta->cnlb;
-               chunk->wp = chunk_meta->wp;
-
-               trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
-                                       chunk->state);
-
-               if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
-                       WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
-                       continue;
-               }
-
-               if (!(chunk->state & NVM_CHK_ST_OFFLINE))
-                       continue;
-
-               set_bit(pos, line->blk_bitmap);
-               nr_bad_chks++;
-       }
-
-       return nr_bad_chks;
-}
-
-static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
-                                void *chunk_meta, int line_id)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       long nr_bad_chks, chk_in_line;
-
-       line->pblk = pblk;
-       line->id = line_id;
-       line->type = PBLK_LINETYPE_FREE;
-       line->state = PBLK_LINESTATE_NEW;
-       line->gc_group = PBLK_LINEGC_NONE;
-       line->vsc = &l_mg->vsc_list[line_id];
-       spin_lock_init(&line->lock);
-
-       nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
-
-       chk_in_line = lm->blk_per_line - nr_bad_chks;
-       if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
-                                       chk_in_line < lm->min_blk_line) {
-               line->state = PBLK_LINESTATE_BAD;
-               list_add_tail(&line->list, &l_mg->bad_list);
-               return 0;
-       }
-
-       atomic_set(&line->blk_in_line, chk_in_line);
-       list_add_tail(&line->list, &l_mg->free_list);
-       l_mg->nr_free_lines++;
-
-       return chk_in_line;
-}
-
-static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-
-       line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
-       if (!line->blk_bitmap)
-               return -ENOMEM;
-
-       line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
-       if (!line->erase_bitmap)
-               goto free_blk_bitmap;
-
-
-       line->chks = kmalloc_array(lm->blk_per_line,
-                                  sizeof(struct nvm_chk_meta), GFP_KERNEL);
-       if (!line->chks)
-               goto free_erase_bitmap;
-
-       line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
-       if (!line->w_err_gc)
-               goto free_chks;
-
-       return 0;
-
-free_chks:
-       kfree(line->chks);
-free_erase_bitmap:
-       kfree(line->erase_bitmap);
-free_blk_bitmap:
-       kfree(line->blk_bitmap);
-       return -ENOMEM;
-}
-
-static int pblk_line_mg_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int i, bb_distance;
-
-       l_mg->nr_lines = geo->num_chk;
-       l_mg->log_line = l_mg->data_line = NULL;
-       l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
-       l_mg->nr_free_lines = 0;
-       bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-
-       INIT_LIST_HEAD(&l_mg->free_list);
-       INIT_LIST_HEAD(&l_mg->corrupt_list);
-       INIT_LIST_HEAD(&l_mg->bad_list);
-       INIT_LIST_HEAD(&l_mg->gc_full_list);
-       INIT_LIST_HEAD(&l_mg->gc_high_list);
-       INIT_LIST_HEAD(&l_mg->gc_mid_list);
-       INIT_LIST_HEAD(&l_mg->gc_low_list);
-       INIT_LIST_HEAD(&l_mg->gc_empty_list);
-       INIT_LIST_HEAD(&l_mg->gc_werr_list);
-
-       INIT_LIST_HEAD(&l_mg->emeta_list);
-
-       l_mg->gc_lists[0] = &l_mg->gc_werr_list;
-       l_mg->gc_lists[1] = &l_mg->gc_high_list;
-       l_mg->gc_lists[2] = &l_mg->gc_mid_list;
-       l_mg->gc_lists[3] = &l_mg->gc_low_list;
-
-       spin_lock_init(&l_mg->free_lock);
-       spin_lock_init(&l_mg->close_lock);
-       spin_lock_init(&l_mg->gc_lock);
-
-       l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
-       if (!l_mg->vsc_list)
-               goto fail;
-
-       l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
-       if (!l_mg->bb_template)
-               goto fail_free_vsc_list;
-
-       l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
-       if (!l_mg->bb_aux)
-               goto fail_free_bb_template;
-
-       /* smeta is always small enough to fit on a kmalloc memory allocation,
-        * emeta depends on the number of LUNs allocated to the pblk instance
-        */
-       for (i = 0; i < PBLK_DATA_LINES; i++) {
-               l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
-               if (!l_mg->sline_meta[i])
-                       goto fail_free_smeta;
-       }
-
-       l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
-                       lm->sec_bitmap_len, 0, 0, NULL);
-       if (!l_mg->bitmap_cache)
-               goto fail_free_smeta;
-
-       /* the bitmap pool is used for both valid and map bitmaps */
-       l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
-                               l_mg->bitmap_cache);
-       if (!l_mg->bitmap_pool)
-               goto fail_destroy_bitmap_cache;
-
-       /* emeta allocates three different buffers for managing metadata with
-        * in-memory and in-media layouts
-        */
-       for (i = 0; i < PBLK_DATA_LINES; i++) {
-               struct pblk_emeta *emeta;
-
-               emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
-               if (!emeta)
-                       goto fail_free_emeta;
-
-               emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
-               if (!emeta->buf) {
-                       kfree(emeta);
-                       goto fail_free_emeta;
-               }
-
-               emeta->nr_entries = lm->emeta_sec[0];
-               l_mg->eline_meta[i] = emeta;
-       }
-
-       for (i = 0; i < l_mg->nr_lines; i++)
-               l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
-
-       bb_distance = (geo->all_luns) * geo->ws_opt;
-       for (i = 0; i < lm->sec_per_line; i += bb_distance)
-               bitmap_set(l_mg->bb_template, i, geo->ws_opt);
-
-       return 0;
-
-fail_free_emeta:
-       while (--i >= 0) {
-               kvfree(l_mg->eline_meta[i]->buf);
-               kfree(l_mg->eline_meta[i]);
-       }
-
-       mempool_destroy(l_mg->bitmap_pool);
-fail_destroy_bitmap_cache:
-       kmem_cache_destroy(l_mg->bitmap_cache);
-fail_free_smeta:
-       for (i = 0; i < PBLK_DATA_LINES; i++)
-               kfree(l_mg->sline_meta[i]);
-       kfree(l_mg->bb_aux);
-fail_free_bb_template:
-       kfree(l_mg->bb_template);
-fail_free_vsc_list:
-       kfree(l_mg->vsc_list);
-fail:
-       return -ENOMEM;
-}
-
-static int pblk_line_meta_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int smeta_len, emeta_len;
-       int i;
-
-       lm->sec_per_line = geo->clba * geo->all_luns;
-       lm->blk_per_line = geo->all_luns;
-       lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
-       lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
-       lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
-       lm->mid_thrs = lm->sec_per_line / 2;
-       lm->high_thrs = lm->sec_per_line / 4;
-       lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
-
-       /* Calculate necessary pages for smeta. See comment over struct
-        * line_smeta definition
-        */
-       i = 1;
-add_smeta_page:
-       lm->smeta_sec = i * geo->ws_opt;
-       lm->smeta_len = lm->smeta_sec * geo->csecs;
-
-       smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
-       if (smeta_len > lm->smeta_len) {
-               i++;
-               goto add_smeta_page;
-       }
-
-       /* Calculate necessary pages for emeta. See comment over struct
-        * line_emeta definition
-        */
-       i = 1;
-add_emeta_page:
-       lm->emeta_sec[0] = i * geo->ws_opt;
-       lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
-
-       emeta_len = calc_emeta_len(pblk);
-       if (emeta_len > lm->emeta_len[0]) {
-               i++;
-               goto add_emeta_page;
-       }
-
-       lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
-
-       lm->min_blk_line = 1;
-       if (geo->all_luns > 1)
-               lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
-                                       lm->emeta_sec[0], geo->clba);
-
-       if (lm->min_blk_line > lm->blk_per_line) {
-               pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
-                                                       lm->blk_per_line);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int pblk_lines_init(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-       void *chunk_meta;
-       int nr_free_chks = 0;
-       int i, ret;
-
-       ret = pblk_line_meta_init(pblk);
-       if (ret)
-               return ret;
-
-       ret = pblk_line_mg_init(pblk);
-       if (ret)
-               return ret;
-
-       ret = pblk_luns_init(pblk);
-       if (ret)
-               goto fail_free_meta;
-
-       chunk_meta = pblk_get_chunk_meta(pblk);
-       if (IS_ERR(chunk_meta)) {
-               ret = PTR_ERR(chunk_meta);
-               goto fail_free_luns;
-       }
-
-       pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
-                                                               GFP_KERNEL);
-       if (!pblk->lines) {
-               ret = -ENOMEM;
-               goto fail_free_chunk_meta;
-       }
-
-       for (i = 0; i < l_mg->nr_lines; i++) {
-               line = &pblk->lines[i];
-
-               ret = pblk_alloc_line_meta(pblk, line);
-               if (ret)
-                       goto fail_free_lines;
-
-               nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
-
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                                               line->state);
-       }
-
-       if (!nr_free_chks) {
-               pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
-               ret = -EINTR;
-               goto fail_free_lines;
-       }
-
-       ret = pblk_set_provision(pblk, nr_free_chks);
-       if (ret)
-               goto fail_free_lines;
-
-       vfree(chunk_meta);
-       return 0;
-
-fail_free_lines:
-       while (--i >= 0)
-               pblk_line_meta_free(l_mg, &pblk->lines[i]);
-       kfree(pblk->lines);
-fail_free_chunk_meta:
-       vfree(chunk_meta);
-fail_free_luns:
-       kfree(pblk->luns);
-fail_free_meta:
-       pblk_line_mg_free(pblk);
-
-       return ret;
-}
-
-static int pblk_writer_init(struct pblk *pblk)
-{
-       pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
-       if (IS_ERR(pblk->writer_ts)) {
-               int err = PTR_ERR(pblk->writer_ts);
-
-               if (err != -EINTR)
-                       pblk_err(pblk, "could not allocate writer kthread (%d)\n",
-                                       err);
-               return err;
-       }
-
-       timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
-       mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
-
-       return 0;
-}
-
-static void pblk_writer_stop(struct pblk *pblk)
-{
-       /* The pipeline must be stopped and the write buffer emptied before the
-        * write thread is stopped
-        */
-       WARN(pblk_rb_read_count(&pblk->rwb),
-                       "Stopping not fully persisted write buffer\n");
-
-       WARN(pblk_rb_sync_count(&pblk->rwb),
-                       "Stopping not fully synced write buffer\n");
-
-       del_timer_sync(&pblk->wtimer);
-       if (pblk->writer_ts)
-               kthread_stop(pblk->writer_ts);
-}
-
-static void pblk_free(struct pblk *pblk)
-{
-       pblk_lines_free(pblk);
-       pblk_l2p_free(pblk);
-       pblk_rwb_free(pblk);
-       pblk_core_free(pblk);
-
-       kfree(pblk);
-}
-
-static void pblk_tear_down(struct pblk *pblk, bool graceful)
-{
-       if (graceful)
-               __pblk_pipeline_flush(pblk);
-       __pblk_pipeline_stop(pblk);
-       pblk_writer_stop(pblk);
-       pblk_rb_sync_l2p(&pblk->rwb);
-       pblk_rl_free(&pblk->rl);
-
-       pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
-}
-
-static void pblk_exit(void *private, bool graceful)
-{
-       struct pblk *pblk = private;
-
-       pblk_gc_exit(pblk, graceful);
-       pblk_tear_down(pblk, graceful);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
-#endif
-
-       pblk_free(pblk);
-}
-
-static sector_t pblk_capacity(void *private)
-{
-       struct pblk *pblk = private;
-
-       return pblk->capacity * NR_PHY_IN_LOG;
-}
-
-static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
-                      int flags)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct request_queue *bqueue = dev->q;
-       struct request_queue *tqueue = tdisk->queue;
-       struct pblk *pblk;
-       int ret;
-
-       pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
-       if (!pblk)
-               return ERR_PTR(-ENOMEM);
-
-       pblk->dev = dev;
-       pblk->disk = tdisk;
-       pblk->state = PBLK_STATE_RUNNING;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-       pblk->gc.gc_enabled = 0;
-
-       if (!(geo->version == NVM_OCSSD_SPEC_12 ||
-                                       geo->version == NVM_OCSSD_SPEC_20)) {
-               pblk_err(pblk, "OCSSD version not supported (%u)\n",
-                                                       geo->version);
-               kfree(pblk);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (geo->ext) {
-               pblk_err(pblk, "extended metadata not supported\n");
-               kfree(pblk);
-               return ERR_PTR(-EINVAL);
-       }
-
-       spin_lock_init(&pblk->resubmit_lock);
-       spin_lock_init(&pblk->trans_lock);
-       spin_lock_init(&pblk->lock);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_set(&pblk->inflight_writes, 0);
-       atomic_long_set(&pblk->padded_writes, 0);
-       atomic_long_set(&pblk->padded_wb, 0);
-       atomic_long_set(&pblk->req_writes, 0);
-       atomic_long_set(&pblk->sub_writes, 0);
-       atomic_long_set(&pblk->sync_writes, 0);
-       atomic_long_set(&pblk->inflight_reads, 0);
-       atomic_long_set(&pblk->cache_reads, 0);
-       atomic_long_set(&pblk->sync_reads, 0);
-       atomic_long_set(&pblk->recov_writes, 0);
-       atomic_long_set(&pblk->recov_writes, 0);
-       atomic_long_set(&pblk->recov_gc_writes, 0);
-       atomic_long_set(&pblk->recov_gc_reads, 0);
-#endif
-
-       atomic_long_set(&pblk->read_failed, 0);
-       atomic_long_set(&pblk->read_empty, 0);
-       atomic_long_set(&pblk->read_high_ecc, 0);
-       atomic_long_set(&pblk->read_failed_gc, 0);
-       atomic_long_set(&pblk->write_failed, 0);
-       atomic_long_set(&pblk->erase_failed, 0);
-
-       ret = pblk_core_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize core\n");
-               goto fail;
-       }
-
-       ret = pblk_lines_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize lines\n");
-               goto fail_free_core;
-       }
-
-       ret = pblk_rwb_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize write buffer\n");
-               goto fail_free_lines;
-       }
-
-       ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
-       if (ret) {
-               pblk_err(pblk, "could not initialize maps\n");
-               goto fail_free_rwb;
-       }
-
-       ret = pblk_writer_init(pblk);
-       if (ret) {
-               if (ret != -EINTR)
-                       pblk_err(pblk, "could not initialize write thread\n");
-               goto fail_free_l2p;
-       }
-
-       ret = pblk_gc_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize gc\n");
-               goto fail_stop_writer;
-       }
-
-       /* inherit the size from the underlying device */
-       blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
-       blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
-
-       blk_queue_write_cache(tqueue, true, false);
-
-       tqueue->limits.discard_granularity = geo->clba * geo->csecs;
-       tqueue->limits.discard_alignment = 0;
-       blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
-
-       pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
-                       geo->all_luns, pblk->l_mg.nr_lines,
-                       (unsigned long long)pblk->capacity,
-                       pblk->rwb.nr_entries);
-
-       wake_up_process(pblk->writer_ts);
-
-       /* Check if we need to start GC */
-       pblk_gc_should_kick(pblk);
-
-       return pblk;
-
-fail_stop_writer:
-       pblk_writer_stop(pblk);
-fail_free_l2p:
-       pblk_l2p_free(pblk);
-fail_free_rwb:
-       pblk_rwb_free(pblk);
-fail_free_lines:
-       pblk_lines_free(pblk);
-fail_free_core:
-       pblk_core_free(pblk);
-fail:
-       kfree(pblk);
-       return ERR_PTR(ret);
-}
-
-/* physical block device target */
-static struct nvm_tgt_type tt_pblk = {
-       .name           = "pblk",
-       .version        = {1, 0, 0},
-
-       .bops           = &pblk_bops,
-       .capacity       = pblk_capacity,
-
-       .init           = pblk_init,
-       .exit           = pblk_exit,
-
-       .sysfs_init     = pblk_sysfs_init,
-       .sysfs_exit     = pblk_sysfs_exit,
-       .owner          = THIS_MODULE,
-};
-
-static int __init pblk_module_init(void)
-{
-       int ret;
-
-       ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
-       if (ret)
-               return ret;
-       ret = nvm_register_tgt_type(&tt_pblk);
-       if (ret)
-               bioset_exit(&pblk_bio_set);
-       return ret;
-}
-
-static void pblk_module_exit(void)
-{
-       bioset_exit(&pblk_bio_set);
-       nvm_unregister_tgt_type(&tt_pblk);
-}
-
-module_init(pblk_module_init);
-module_exit(pblk_module_exit);
-MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
-MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
deleted file mode 100644 (file)
index 5408e32..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-map.c - pblk's lba-ppa mapping strategy
- *
- */
-
-#include "pblk.h"
-
-static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
-                             struct ppa_addr *ppa_list,
-                             unsigned long *lun_bitmap,
-                             void *meta_list,
-                             unsigned int valid_secs)
-{
-       struct pblk_line *line = pblk_line_get_data(pblk);
-       struct pblk_emeta *emeta;
-       struct pblk_w_ctx *w_ctx;
-       __le64 *lba_list;
-       u64 paddr;
-       int nr_secs = pblk->min_write_pgs;
-       int i;
-
-       if (!line)
-               return -ENOSPC;
-
-       if (pblk_line_is_full(line)) {
-               struct pblk_line *prev_line = line;
-
-               /* If we cannot allocate a new line, make sure to store metadata
-                * on current line and then fail
-                */
-               line = pblk_line_replace_data(pblk);
-               pblk_line_close_meta(pblk, prev_line);
-
-               if (!line) {
-                       pblk_pipeline_stop(pblk);
-                       return -ENOSPC;
-               }
-
-       }
-
-       emeta = line->emeta;
-       lba_list = emeta_to_lbas(pblk, emeta->buf);
-
-       paddr = pblk_alloc_page(pblk, line, nr_secs);
-
-       for (i = 0; i < nr_secs; i++, paddr++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-               /* ppa to be sent to the device */
-               ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-
-               /* Write context for target bio completion on write buffer. Note
-                * that the write buffer is protected by the sync backpointer,
-                * and a single writer thread have access to each specific entry
-                * at a time. Thus, it is safe to modify the context for the
-                * entry we are setting up for submission without taking any
-                * lock or memory barrier.
-                */
-               if (i < valid_secs) {
-                       kref_get(&line->ref);
-                       atomic_inc(&line->sec_to_update);
-                       w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
-                       w_ctx->ppa = ppa_list[i];
-                       meta->lba = cpu_to_le64(w_ctx->lba);
-                       lba_list[paddr] = cpu_to_le64(w_ctx->lba);
-                       if (lba_list[paddr] != addr_empty)
-                               line->nr_valid_lbas++;
-                       else
-                               atomic64_inc(&pblk->pad_wa);
-               } else {
-                       lba_list[paddr] = addr_empty;
-                       meta->lba = addr_empty;
-                       __pblk_map_invalidate(pblk, line, paddr);
-               }
-       }
-
-       pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
-       return 0;
-}
-
-int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
-                unsigned long *lun_bitmap, unsigned int valid_secs,
-                unsigned int off)
-{
-       void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
-       void *meta_buffer;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       unsigned int map_secs;
-       int min = pblk->min_write_pgs;
-       int i;
-       int ret;
-
-       for (i = off; i < rqd->nr_ppas; i += min) {
-               map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
-               meta_buffer = pblk_get_meta(pblk, meta_list, i);
-
-               ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
-                                       lun_bitmap, meta_buffer, map_secs);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-/* only if erase_ppa is set, acquire erase semaphore */
-int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                      unsigned int sentry, unsigned long *lun_bitmap,
-                      unsigned int valid_secs, struct ppa_addr *erase_ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
-       void *meta_buffer;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       struct pblk_line *e_line, *d_line;
-       unsigned int map_secs;
-       int min = pblk->min_write_pgs;
-       int i, erase_lun;
-       int ret;
-
-
-       for (i = 0; i < rqd->nr_ppas; i += min) {
-               map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
-               meta_buffer = pblk_get_meta(pblk, meta_list, i);
-
-               ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
-                                       lun_bitmap, meta_buffer, map_secs);
-               if (ret)
-                       return ret;
-
-               erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
-
-               /* line can change after page map. We might also be writing the
-                * last line.
-                */
-               e_line = pblk_line_get_erase(pblk);
-               if (!e_line)
-                       return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
-                                                       valid_secs, i + min);
-
-               spin_lock(&e_line->lock);
-               if (!test_bit(erase_lun, e_line->erase_bitmap)) {
-                       set_bit(erase_lun, e_line->erase_bitmap);
-                       atomic_dec(&e_line->left_eblks);
-
-                       *erase_ppa = ppa_list[i];
-                       erase_ppa->a.blk = e_line->id;
-                       erase_ppa->a.reserved = 0;
-
-                       spin_unlock(&e_line->lock);
-
-                       /* Avoid evaluating e_line->left_eblks */
-                       return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
-                                                       valid_secs, i + min);
-               }
-               spin_unlock(&e_line->lock);
-       }
-
-       d_line = pblk_line_get_data(pblk);
-
-       /* line can change after page map. We might also be writing the
-        * last line.
-        */
-       e_line = pblk_line_get_erase(pblk);
-       if (!e_line)
-               return -ENOSPC;
-
-       /* Erase blocks that are bad in this line but might not be in next */
-       if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
-                       bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
-               int bit = -1;
-
-retry:
-               bit = find_next_bit(d_line->blk_bitmap,
-                                               lm->blk_per_line, bit + 1);
-               if (bit >= lm->blk_per_line)
-                       return 0;
-
-               spin_lock(&e_line->lock);
-               if (test_bit(bit, e_line->erase_bitmap)) {
-                       spin_unlock(&e_line->lock);
-                       goto retry;
-               }
-               spin_unlock(&e_line->lock);
-
-               set_bit(bit, e_line->erase_bitmap);
-               atomic_dec(&e_line->left_eblks);
-               *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
-               erase_ppa->a.blk = e_line->id;
-       }
-
-       return 0;
-}
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
deleted file mode 100644 (file)
index 5abb170..0000000
+++ /dev/null
@@ -1,858 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *
- * Based upon the circular ringbuffer.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-rb.c - pblk's write buffer
- */
-
-#include <linux/circ_buf.h>
-
-#include "pblk.h"
-
-static DECLARE_RWSEM(pblk_rb_lock);
-
-static void pblk_rb_data_free(struct pblk_rb *rb)
-{
-       struct pblk_rb_pages *p, *t;
-
-       down_write(&pblk_rb_lock);
-       list_for_each_entry_safe(p, t, &rb->pages, list) {
-               free_pages((unsigned long)page_address(p->pages), p->order);
-               list_del(&p->list);
-               kfree(p);
-       }
-       up_write(&pblk_rb_lock);
-}
-
-void pblk_rb_free(struct pblk_rb *rb)
-{
-       pblk_rb_data_free(rb);
-       vfree(rb->entries);
-}
-
-/*
- * pblk_rb_calculate_size -- calculate the size of the write buffer
- */
-static unsigned int pblk_rb_calculate_size(unsigned int nr_entries,
-                                          unsigned int threshold)
-{
-       unsigned int thr_sz = 1 << (get_count_order(threshold + NVM_MAX_VLBA));
-       unsigned int max_sz = max(thr_sz, nr_entries);
-       unsigned int max_io;
-
-       /* Alloc a write buffer that can (i) fit at least two split bios
-        * (considering max I/O size NVM_MAX_VLBA, and (ii) guarantee that the
-        * threshold will be respected
-        */
-       max_io = (1 << max((int)(get_count_order(max_sz)),
-                               (int)(get_count_order(NVM_MAX_VLBA << 1))));
-       if ((threshold + NVM_MAX_VLBA) >= max_io)
-               max_io <<= 1;
-
-       return max_io;
-}
-
-/*
- * Initialize ring buffer. The data and metadata buffers must be previously
- * allocated and their size must be a power of two
- * (Documentation/core-api/circular-buffers.rst)
- */
-int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
-                unsigned int seg_size)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entries;
-       unsigned int init_entry = 0;
-       unsigned int max_order = MAX_ORDER - 1;
-       unsigned int power_size, power_seg_sz;
-       unsigned int alloc_order, order, iter;
-       unsigned int nr_entries;
-
-       nr_entries = pblk_rb_calculate_size(size, threshold);
-       entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
-       if (!entries)
-               return -ENOMEM;
-
-       power_size = get_count_order(nr_entries);
-       power_seg_sz = get_count_order(seg_size);
-
-       down_write(&pblk_rb_lock);
-       rb->entries = entries;
-       rb->seg_size = (1 << power_seg_sz);
-       rb->nr_entries = (1 << power_size);
-       rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
-       rb->back_thres = threshold;
-       rb->flush_point = EMPTY_ENTRY;
-
-       spin_lock_init(&rb->w_lock);
-       spin_lock_init(&rb->s_lock);
-
-       INIT_LIST_HEAD(&rb->pages);
-
-       alloc_order = power_size;
-       if (alloc_order >= max_order) {
-               order = max_order;
-               iter = (1 << (alloc_order - max_order));
-       } else {
-               order = alloc_order;
-               iter = 1;
-       }
-
-       do {
-               struct pblk_rb_entry *entry;
-               struct pblk_rb_pages *page_set;
-               void *kaddr;
-               unsigned long set_size;
-               int i;
-
-               page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
-               if (!page_set) {
-                       up_write(&pblk_rb_lock);
-                       vfree(entries);
-                       return -ENOMEM;
-               }
-
-               page_set->order = order;
-               page_set->pages = alloc_pages(GFP_KERNEL, order);
-               if (!page_set->pages) {
-                       kfree(page_set);
-                       pblk_rb_data_free(rb);
-                       up_write(&pblk_rb_lock);
-                       vfree(entries);
-                       return -ENOMEM;
-               }
-               kaddr = page_address(page_set->pages);
-
-               entry = &rb->entries[init_entry];
-               entry->data = kaddr;
-               entry->cacheline = pblk_cacheline_to_addr(init_entry++);
-               entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
-
-               set_size = (1 << order);
-               for (i = 1; i < set_size; i++) {
-                       entry = &rb->entries[init_entry];
-                       entry->cacheline = pblk_cacheline_to_addr(init_entry++);
-                       entry->data = kaddr + (i * rb->seg_size);
-                       entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
-                       bio_list_init(&entry->w_ctx.bios);
-               }
-
-               list_add_tail(&page_set->list, &rb->pages);
-               iter--;
-       } while (iter > 0);
-       up_write(&pblk_rb_lock);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_set(&rb->inflight_flush_point, 0);
-#endif
-
-       /*
-        * Initialize rate-limiter, which controls access to the write buffer
-        * by user and GC I/O
-        */
-       pblk_rl_init(&pblk->rl, rb->nr_entries, threshold);
-
-       return 0;
-}
-
-static void clean_wctx(struct pblk_w_ctx *w_ctx)
-{
-       int flags;
-
-       flags = READ_ONCE(w_ctx->flags);
-       WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
-                       "pblk: overwriting unsubmitted data\n");
-
-       /* Release flags on context. Protect from writes and reads */
-       smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
-       pblk_ppa_set_empty(&w_ctx->ppa);
-       w_ctx->lba = ADDR_EMPTY;
-}
-
-#define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
-#define pblk_rb_ring_space(rb, head, tail, size) \
-                                       (CIRC_SPACE(head, tail, size))
-
-/*
- * Buffer space is calculated with respect to the back pointer signaling
- * synchronized entries to the media.
- */
-static unsigned int pblk_rb_space(struct pblk_rb *rb)
-{
-       unsigned int mem = READ_ONCE(rb->mem);
-       unsigned int sync = READ_ONCE(rb->sync);
-
-       return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
-}
-
-unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
-                             unsigned int nr_entries)
-{
-       return (p + nr_entries) & (rb->nr_entries - 1);
-}
-
-/*
- * Buffer count is calculated with respect to the submission entry signaling the
- * entries that are available to send to the media
- */
-unsigned int pblk_rb_read_count(struct pblk_rb *rb)
-{
-       unsigned int mem = READ_ONCE(rb->mem);
-       unsigned int subm = READ_ONCE(rb->subm);
-
-       return pblk_rb_ring_count(mem, subm, rb->nr_entries);
-}
-
-unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
-{
-       unsigned int mem = READ_ONCE(rb->mem);
-       unsigned int sync = READ_ONCE(rb->sync);
-
-       return pblk_rb_ring_count(mem, sync, rb->nr_entries);
-}
-
-unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
-{
-       unsigned int subm;
-
-       subm = READ_ONCE(rb->subm);
-       /* Commit read means updating submission pointer */
-       smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
-
-       return subm;
-}
-
-static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_line *line;
-       struct pblk_rb_entry *entry;
-       struct pblk_w_ctx *w_ctx;
-       unsigned int user_io = 0, gc_io = 0;
-       unsigned int i;
-       int flags;
-
-       for (i = 0; i < to_update; i++) {
-               entry = &rb->entries[rb->l2p_update];
-               w_ctx = &entry->w_ctx;
-
-               flags = READ_ONCE(entry->w_ctx.flags);
-               if (flags & PBLK_IOTYPE_USER)
-                       user_io++;
-               else if (flags & PBLK_IOTYPE_GC)
-                       gc_io++;
-               else
-                       WARN(1, "pblk: unknown IO type\n");
-
-               pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
-                                                       entry->cacheline);
-
-               line = pblk_ppa_to_line(pblk, w_ctx->ppa);
-               atomic_dec(&line->sec_to_update);
-               kref_put(&line->ref, pblk_line_put);
-               clean_wctx(w_ctx);
-               rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
-       }
-
-       pblk_rl_out(&pblk->rl, user_io, gc_io);
-
-       return 0;
-}
-
-/*
- * When we move the l2p_update pointer, we update the l2p table - lookups will
- * point to the physical address instead of to the cacheline in the write buffer
- * from this moment on.
- */
-static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
-                             unsigned int mem, unsigned int sync)
-{
-       unsigned int space, count;
-       int ret = 0;
-
-       lockdep_assert_held(&rb->w_lock);
-
-       /* Update l2p only as buffer entries are being overwritten */
-       space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
-       if (space > nr_entries)
-               goto out;
-
-       count = nr_entries - space;
-       /* l2p_update used exclusively under rb->w_lock */
-       ret = __pblk_rb_update_l2p(rb, count);
-
-out:
-       return ret;
-}
-
-/*
- * Update the l2p entry for all sectors stored on the write buffer. This means
- * that all future lookups to the l2p table will point to a device address, not
- * to the cacheline in the write buffer.
- */
-void pblk_rb_sync_l2p(struct pblk_rb *rb)
-{
-       unsigned int sync;
-       unsigned int to_update;
-
-       spin_lock(&rb->w_lock);
-
-       /* Protect from reads and writes */
-       sync = smp_load_acquire(&rb->sync);
-
-       to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
-       __pblk_rb_update_l2p(rb, to_update);
-
-       spin_unlock(&rb->w_lock);
-}
-
-/*
- * Write @nr_entries to ring buffer from @data buffer if there is enough space.
- * Typically, 4KB data chunks coming from a bio will be copied to the ring
- * buffer, thus the write will fail if not all incoming data can be copied.
- *
- */
-static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
-                                 struct pblk_w_ctx w_ctx,
-                                 struct pblk_rb_entry *entry)
-{
-       memcpy(entry->data, data, rb->seg_size);
-
-       entry->w_ctx.lba = w_ctx.lba;
-       entry->w_ctx.ppa = w_ctx.ppa;
-}
-
-void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
-                             struct pblk_w_ctx w_ctx, unsigned int ring_pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entry;
-       int flags;
-
-       entry = &rb->entries[ring_pos];
-       flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Caller must guarantee that the entry is free */
-       BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
-#endif
-
-       __pblk_rb_write_entry(rb, data, w_ctx, entry);
-
-       pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
-       flags = w_ctx.flags | PBLK_WRITTEN_DATA;
-
-       /* Release flags on write context. Protect from writes */
-       smp_store_release(&entry->w_ctx.flags, flags);
-}
-
-void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
-                           struct pblk_w_ctx w_ctx, struct pblk_line *line,
-                           u64 paddr, unsigned int ring_pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entry;
-       int flags;
-
-       entry = &rb->entries[ring_pos];
-       flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Caller must guarantee that the entry is free */
-       BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
-#endif
-
-       __pblk_rb_write_entry(rb, data, w_ctx, entry);
-
-       if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
-               entry->w_ctx.lba = ADDR_EMPTY;
-
-       flags = w_ctx.flags | PBLK_WRITTEN_DATA;
-
-       /* Release flags on write context. Protect from writes */
-       smp_store_release(&entry->w_ctx.flags, flags);
-}
-
-static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
-                                  unsigned int pos)
-{
-       struct pblk_rb_entry *entry;
-       unsigned int sync, flush_point;
-
-       pblk_rb_sync_init(rb, NULL);
-       sync = READ_ONCE(rb->sync);
-
-       if (pos == sync) {
-               pblk_rb_sync_end(rb, NULL);
-               return 0;
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_inc(&rb->inflight_flush_point);
-#endif
-
-       flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
-       entry = &rb->entries[flush_point];
-
-       /* Protect flush points */
-       smp_store_release(&rb->flush_point, flush_point);
-
-       if (bio)
-               bio_list_add(&entry->w_ctx.bios, bio);
-
-       pblk_rb_sync_end(rb, NULL);
-
-       return bio ? 1 : 0;
-}
-
-static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
-                              unsigned int *pos)
-{
-       unsigned int mem;
-       unsigned int sync;
-       unsigned int threshold;
-
-       sync = READ_ONCE(rb->sync);
-       mem = READ_ONCE(rb->mem);
-
-       threshold = nr_entries + rb->back_thres;
-
-       if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold)
-               return 0;
-
-       if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
-               return 0;
-
-       *pos = mem;
-
-       return 1;
-}
-
-static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
-                            unsigned int *pos)
-{
-       if (!__pblk_rb_may_write(rb, nr_entries, pos))
-               return 0;
-
-       /* Protect from read count */
-       smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
-       return 1;
-}
-
-void pblk_rb_flush(struct pblk_rb *rb)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       unsigned int mem = READ_ONCE(rb->mem);
-
-       if (pblk_rb_flush_point_set(rb, NULL, mem))
-               return;
-
-       pblk_write_kick(pblk);
-}
-
-static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
-                                  unsigned int *pos, struct bio *bio,
-                                  int *io_ret)
-{
-       unsigned int mem;
-
-       if (!__pblk_rb_may_write(rb, nr_entries, pos))
-               return 0;
-
-       mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
-       *io_ret = NVM_IO_DONE;
-
-       if (bio->bi_opf & REQ_PREFLUSH) {
-               struct pblk *pblk = container_of(rb, struct pblk, rwb);
-
-               atomic64_inc(&pblk->nr_flush);
-               if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
-                       *io_ret = NVM_IO_OK;
-       }
-
-       /* Protect from read count */
-       smp_store_release(&rb->mem, mem);
-
-       return 1;
-}
-
-/*
- * Atomically check that (i) there is space on the write buffer for the
- * incoming I/O, and (ii) the current I/O type has enough budget in the write
- * buffer (rate-limiter).
- */
-int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
-                          unsigned int nr_entries, unsigned int *pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       int io_ret;
-
-       spin_lock(&rb->w_lock);
-       io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
-       if (io_ret) {
-               spin_unlock(&rb->w_lock);
-               return io_ret;
-       }
-
-       if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
-               spin_unlock(&rb->w_lock);
-               return NVM_IO_REQUEUE;
-       }
-
-       pblk_rl_user_in(&pblk->rl, nr_entries);
-       spin_unlock(&rb->w_lock);
-
-       return io_ret;
-}
-
-/*
- * Look at pblk_rb_may_write_user comment
- */
-int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
-                        unsigned int *pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-
-       spin_lock(&rb->w_lock);
-       if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
-               spin_unlock(&rb->w_lock);
-               return 0;
-       }
-
-       if (!pblk_rb_may_write(rb, nr_entries, pos)) {
-               spin_unlock(&rb->w_lock);
-               return 0;
-       }
-
-       pblk_rl_gc_in(&pblk->rl, nr_entries);
-       spin_unlock(&rb->w_lock);
-
-       return 1;
-}
-
-/*
- * Read available entries on rb and add them to the given bio. To avoid a memory
- * copy, a page reference to the write buffer is used to be added to the bio.
- *
- * This function is used by the write thread to form the write bio that will
- * persist data on the write buffer to the media.
- */
-unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
-                                unsigned int pos, unsigned int nr_entries,
-                                unsigned int count)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct request_queue *q = pblk->dev->q;
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *bio = rqd->bio;
-       struct pblk_rb_entry *entry;
-       struct page *page;
-       unsigned int pad = 0, to_read = nr_entries;
-       unsigned int i;
-       int flags;
-
-       if (count < nr_entries) {
-               pad = nr_entries - count;
-               to_read = count;
-       }
-
-       /* Add space for packed metadata if in use*/
-       pad += (pblk->min_write_pgs - pblk->min_write_pgs_data);
-
-       c_ctx->sentry = pos;
-       c_ctx->nr_valid = to_read;
-       c_ctx->nr_padded = pad;
-
-       for (i = 0; i < to_read; i++) {
-               entry = &rb->entries[pos];
-
-               /* A write has been allowed into the buffer, but data is still
-                * being copied to it. It is ok to busy wait.
-                */
-try:
-               flags = READ_ONCE(entry->w_ctx.flags);
-               if (!(flags & PBLK_WRITTEN_DATA)) {
-                       io_schedule();
-                       goto try;
-               }
-
-               page = virt_to_page(entry->data);
-               if (!page) {
-                       pblk_err(pblk, "could not allocate write bio page\n");
-                       flags &= ~PBLK_WRITTEN_DATA;
-                       flags |= PBLK_SUBMITTED_ENTRY;
-                       /* Release flags on context. Protect from writes */
-                       smp_store_release(&entry->w_ctx.flags, flags);
-                       return NVM_IO_ERR;
-               }
-
-               if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
-                                                               rb->seg_size) {
-                       pblk_err(pblk, "could not add page to write bio\n");
-                       flags &= ~PBLK_WRITTEN_DATA;
-                       flags |= PBLK_SUBMITTED_ENTRY;
-                       /* Release flags on context. Protect from writes */
-                       smp_store_release(&entry->w_ctx.flags, flags);
-                       return NVM_IO_ERR;
-               }
-
-               flags &= ~PBLK_WRITTEN_DATA;
-               flags |= PBLK_SUBMITTED_ENTRY;
-
-               /* Release flags on context. Protect from writes */
-               smp_store_release(&entry->w_ctx.flags, flags);
-
-               pos = pblk_rb_ptr_wrap(rb, pos, 1);
-       }
-
-       if (pad) {
-               if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
-                       pblk_err(pblk, "could not pad page in write bio\n");
-                       return NVM_IO_ERR;
-               }
-
-               if (pad < pblk->min_write_pgs)
-                       atomic64_inc(&pblk->pad_dist[pad - 1]);
-               else
-                       pblk_warn(pblk, "padding more than min. sectors\n");
-
-               atomic64_add(pad, &pblk->pad_wa);
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(pad, &pblk->padded_writes);
-#endif
-
-       return NVM_IO_OK;
-}
-
-/*
- * Copy to bio only if the lba matches the one on the given cache entry.
- * Otherwise, it means that the entry has been overwritten, and the bio should
- * be directed to disk.
- */
-int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
-                       struct ppa_addr ppa)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entry;
-       struct pblk_w_ctx *w_ctx;
-       struct ppa_addr l2p_ppa;
-       u64 pos = pblk_addr_to_cacheline(ppa);
-       void *data;
-       int flags;
-       int ret = 1;
-
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Caller must ensure that the access will not cause an overflow */
-       BUG_ON(pos >= rb->nr_entries);
-#endif
-       entry = &rb->entries[pos];
-       w_ctx = &entry->w_ctx;
-       flags = READ_ONCE(w_ctx->flags);
-
-       spin_lock(&rb->w_lock);
-       spin_lock(&pblk->trans_lock);
-       l2p_ppa = pblk_trans_map_get(pblk, lba);
-       spin_unlock(&pblk->trans_lock);
-
-       /* Check if the entry has been overwritten or is scheduled to be */
-       if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
-                                               flags & PBLK_WRITABLE_ENTRY) {
-               ret = 0;
-               goto out;
-       }
-       data = bio_data(bio);
-       memcpy(data, entry->data, rb->seg_size);
-
-out:
-       spin_unlock(&rb->w_lock);
-       return ret;
-}
-
-struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
-{
-       unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
-
-       return &rb->entries[entry].w_ctx;
-}
-
-unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
-       __acquires(&rb->s_lock)
-{
-       if (flags)
-               spin_lock_irqsave(&rb->s_lock, *flags);
-       else
-               spin_lock_irq(&rb->s_lock);
-
-       return rb->sync;
-}
-
-void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
-       __releases(&rb->s_lock)
-{
-       lockdep_assert_held(&rb->s_lock);
-
-       if (flags)
-               spin_unlock_irqrestore(&rb->s_lock, *flags);
-       else
-               spin_unlock_irq(&rb->s_lock);
-}
-
-unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
-{
-       unsigned int sync, flush_point;
-       lockdep_assert_held(&rb->s_lock);
-
-       sync = READ_ONCE(rb->sync);
-       flush_point = READ_ONCE(rb->flush_point);
-
-       if (flush_point != EMPTY_ENTRY) {
-               unsigned int secs_to_flush;
-
-               secs_to_flush = pblk_rb_ring_count(flush_point, sync,
-                                       rb->nr_entries);
-               if (secs_to_flush < nr_entries) {
-                       /* Protect flush points */
-                       smp_store_release(&rb->flush_point, EMPTY_ENTRY);
-               }
-       }
-
-       sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
-
-       /* Protect from counts */
-       smp_store_release(&rb->sync, sync);
-
-       return sync;
-}
-
-/* Calculate how many sectors to submit up to the current flush point. */
-unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
-{
-       unsigned int subm, sync, flush_point;
-       unsigned int submitted, to_flush;
-
-       /* Protect flush points */
-       flush_point = smp_load_acquire(&rb->flush_point);
-       if (flush_point == EMPTY_ENTRY)
-               return 0;
-
-       /* Protect syncs */
-       sync = smp_load_acquire(&rb->sync);
-
-       subm = READ_ONCE(rb->subm);
-       submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
-
-       /* The sync point itself counts as a sector to sync */
-       to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
-
-       return (submitted < to_flush) ? (to_flush - submitted) : 0;
-}
-
-int pblk_rb_tear_down_check(struct pblk_rb *rb)
-{
-       struct pblk_rb_entry *entry;
-       int i;
-       int ret = 0;
-
-       spin_lock(&rb->w_lock);
-       spin_lock_irq(&rb->s_lock);
-
-       if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
-                               (rb->sync == rb->l2p_update) &&
-                               (rb->flush_point == EMPTY_ENTRY)) {
-               goto out;
-       }
-
-       if (!rb->entries) {
-               ret = 1;
-               goto out;
-       }
-
-       for (i = 0; i < rb->nr_entries; i++) {
-               entry = &rb->entries[i];
-
-               if (!entry->data) {
-                       ret = 1;
-                       goto out;
-               }
-       }
-
-out:
-       spin_unlock_irq(&rb->s_lock);
-       spin_unlock(&rb->w_lock);
-
-       return ret;
-}
-
-unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
-{
-       return (pos & (rb->nr_entries - 1));
-}
-
-int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
-{
-       return (pos >= rb->nr_entries);
-}
-
-ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_c_ctx *c;
-       ssize_t offset;
-       int queued_entries = 0;
-
-       spin_lock_irq(&rb->s_lock);
-       list_for_each_entry(c, &pblk->compl_list, list)
-               queued_entries++;
-       spin_unlock_irq(&rb->s_lock);
-
-       if (rb->flush_point != EMPTY_ENTRY)
-               offset = scnprintf(buf, PAGE_SIZE,
-                       "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
-                       rb->nr_entries,
-                       rb->mem,
-                       rb->subm,
-                       rb->sync,
-                       rb->l2p_update,
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_read(&rb->inflight_flush_point),
-#else
-                       0,
-#endif
-                       rb->flush_point,
-                       pblk_rb_read_count(rb),
-                       pblk_rb_space(rb),
-                       pblk_rb_flush_point_count(rb),
-                       queued_entries);
-       else
-               offset = scnprintf(buf, PAGE_SIZE,
-                       "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
-                       rb->nr_entries,
-                       rb->mem,
-                       rb->subm,
-                       rb->sync,
-                       rb->l2p_update,
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_read(&rb->inflight_flush_point),
-#else
-                       0,
-#endif
-                       pblk_rb_read_count(rb),
-                       pblk_rb_space(rb),
-                       pblk_rb_flush_point_count(rb),
-                       queued_entries);
-
-       return offset;
-}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
deleted file mode 100644 (file)
index c28537a..0000000
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-read.c - pblk's read path
- */
-
-#include "pblk.h"
-
-/*
- * There is no guarantee that the value read from cache has not been updated and
- * resides at another location in the cache. We guarantee though that if the
- * value is read from the cache, it belongs to the mapped lba. In order to
- * guarantee and order between writes and reads are ordered, a flush must be
- * issued.
- */
-static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
-                               sector_t lba, struct ppa_addr ppa)
-{
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a cache address */
-       BUG_ON(pblk_ppa_empty(ppa));
-       BUG_ON(!pblk_addr_in_cache(ppa));
-#endif
-
-       return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
-}
-
-static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                                struct bio *bio, sector_t blba,
-                                bool *from_cache)
-{
-       void *meta_list = rqd->meta_list;
-       int nr_secs, i;
-
-retry:
-       nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
-                                       from_cache);
-
-       if (!*from_cache)
-               goto end;
-
-       for (i = 0; i < nr_secs; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               sector_t lba = blba + i;
-
-               if (pblk_ppa_empty(rqd->ppa_list[i])) {
-                       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-                       meta->lba = addr_empty;
-               } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
-                       /*
-                        * Try to read from write buffer. The address is later
-                        * checked on the write buffer to prevent retrieving
-                        * overwritten data.
-                        */
-                       if (!pblk_read_from_cache(pblk, bio, lba,
-                                                       rqd->ppa_list[i])) {
-                               if (i == 0) {
-                                       /*
-                                        * We didn't call with bio_advance()
-                                        * yet, so we can just retry.
-                                        */
-                                       goto retry;
-                               } else {
-                                       /*
-                                        * We already call bio_advance()
-                                        * so we cannot retry and we need
-                                        * to quit that function in order
-                                        * to allow caller to handle the bio
-                                        * splitting in the current sector
-                                        * position.
-                                        */
-                                       nr_secs = i;
-                                       goto end;
-                               }
-                       }
-                       meta->lba = cpu_to_le64(lba);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_long_inc(&pblk->cache_reads);
-#endif
-               }
-               bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
-       }
-
-end:
-       if (pblk_io_aligned(pblk, nr_secs))
-               rqd->is_seq = 1;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(nr_secs, &pblk->inflight_reads);
-#endif
-
-       return nr_secs;
-}
-
-
-static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
-                               sector_t blba)
-{
-       void *meta_list = rqd->meta_list;
-       int nr_lbas = rqd->nr_ppas;
-       int i;
-
-       if (!pblk_is_oob_meta_supported(pblk))
-               return;
-
-       for (i = 0; i < nr_lbas; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               u64 lba = le64_to_cpu(meta->lba);
-
-               if (lba == ADDR_EMPTY)
-                       continue;
-
-               if (lba != blba + i) {
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-                       print_ppa(pblk, &ppa_list[i], "seq", i);
-#endif
-                       pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
-                                                       lba, (u64)blba + i);
-                       WARN_ON(1);
-               }
-       }
-}
-
-/*
- * There can be holes in the lba list.
- */
-static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
-                                u64 *lba_list, int nr_lbas)
-{
-       void *meta_lba_list = rqd->meta_list;
-       int i, j;
-
-       if (!pblk_is_oob_meta_supported(pblk))
-               return;
-
-       for (i = 0, j = 0; i < nr_lbas; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk,
-                                                          meta_lba_list, j);
-               u64 lba = lba_list[i];
-               u64 meta_lba;
-
-               if (lba == ADDR_EMPTY)
-                       continue;
-
-               meta_lba = le64_to_cpu(meta->lba);
-
-               if (lba != meta_lba) {
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-                       print_ppa(pblk, &ppa_list[j], "rnd", j);
-#endif
-                       pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
-                                                       meta_lba, lba);
-                       WARN_ON(1);
-               }
-
-               j++;
-       }
-
-       WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
-}
-
-static void pblk_end_user_read(struct bio *bio, int error)
-{
-       if (error && error != NVM_RSP_WARN_HIGHECC)
-               bio_io_error(bio);
-       else
-               bio_endio(bio);
-}
-
-static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
-                              bool put_line)
-{
-       struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *int_bio = rqd->bio;
-       unsigned long start_time = r_ctx->start_time;
-
-       bio_end_io_acct(int_bio, start_time);
-
-       if (rqd->error)
-               pblk_log_read_err(pblk, rqd);
-
-       pblk_read_check_seq(pblk, rqd, r_ctx->lba);
-       bio_put(int_bio);
-
-       if (put_line)
-               pblk_rq_to_line_put(pblk, rqd);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
-       atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
-#endif
-
-       pblk_free_rqd(pblk, rqd, PBLK_READ);
-       atomic_dec(&pblk->inflight_io);
-}
-
-static void pblk_end_io_read(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-       struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *bio = (struct bio *)r_ctx->private;
-
-       pblk_end_user_read(bio, rqd->error);
-       __pblk_end_io_read(pblk, rqd, true);
-}
-
-static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
-                        sector_t lba, bool *from_cache)
-{
-       struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
-       struct ppa_addr ppa;
-
-       pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_inc(&pblk->inflight_reads);
-#endif
-
-retry:
-       if (pblk_ppa_empty(ppa)) {
-               __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-               meta->lba = addr_empty;
-               return;
-       }
-
-       /* Try to read from write buffer. The address is later checked on the
-        * write buffer to prevent retrieving overwritten data.
-        */
-       if (pblk_addr_in_cache(ppa)) {
-               if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
-                       pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
-                       goto retry;
-               }
-
-               meta->lba = cpu_to_le64(lba);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               atomic_long_inc(&pblk->cache_reads);
-#endif
-       } else {
-               rqd->ppa_addr = ppa;
-       }
-}
-
-void pblk_submit_read(struct pblk *pblk, struct bio *bio)
-{
-       sector_t blba = pblk_get_lba(bio);
-       unsigned int nr_secs = pblk_get_secs(bio);
-       bool from_cache;
-       struct pblk_g_ctx *r_ctx;
-       struct nvm_rq *rqd;
-       struct bio *int_bio, *split_bio;
-       unsigned long start_time;
-
-       start_time = bio_start_io_acct(bio);
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_READ);
-
-       rqd->opcode = NVM_OP_PREAD;
-       rqd->nr_ppas = nr_secs;
-       rqd->private = pblk;
-       rqd->end_io = pblk_end_io_read;
-
-       r_ctx = nvm_rq_to_pdu(rqd);
-       r_ctx->start_time = start_time;
-       r_ctx->lba = blba;
-
-       if (pblk_alloc_rqd_meta(pblk, rqd)) {
-               bio_io_error(bio);
-               pblk_free_rqd(pblk, rqd, PBLK_READ);
-               return;
-       }
-
-       /* Clone read bio to deal internally with:
-        * -read errors when reading from drive
-        * -bio_advance() calls during cache reads
-        */
-       int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
-
-       if (nr_secs > 1)
-               nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
-                                               &from_cache);
-       else
-               pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
-
-split_retry:
-       r_ctx->private = bio; /* original bio */
-       rqd->bio = int_bio; /* internal bio */
-
-       if (from_cache && nr_secs == rqd->nr_ppas) {
-               /* All data was read from cache, we can complete the IO. */
-               pblk_end_user_read(bio, 0);
-               atomic_inc(&pblk->inflight_io);
-               __pblk_end_io_read(pblk, rqd, false);
-       } else if (nr_secs != rqd->nr_ppas) {
-               /* The read bio request could be partially filled by the write
-                * buffer, but there are some holes that need to be read from
-                * the drive. In order to handle this, we will use block layer
-                * mechanism to split this request in to smaller ones and make
-                * a chain of it.
-                */
-               split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
-                                       &pblk_bio_set);
-               bio_chain(split_bio, bio);
-               submit_bio_noacct(bio);
-
-               /* New bio contains first N sectors of the previous one, so
-                * we can continue to use existing rqd, but we need to shrink
-                * the number of PPAs in it. New bio is also guaranteed that
-                * it contains only either data from cache or from drive, newer
-                * mix of them.
-                */
-               bio = split_bio;
-               rqd->nr_ppas = nr_secs;
-               if (rqd->nr_ppas == 1)
-                       rqd->ppa_addr = rqd->ppa_list[0];
-
-               /* Recreate int_bio - existing might have some needed internal
-                * fields modified already.
-                */
-               bio_put(int_bio);
-               int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
-               goto split_retry;
-       } else if (pblk_submit_io(pblk, rqd, NULL)) {
-               /* Submitting IO to drive failed, let's report an error */
-               rqd->error = -ENODEV;
-               pblk_end_io_read(rqd);
-       }
-}
-
-static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
-                             struct pblk_line *line, u64 *lba_list,
-                             u64 *paddr_list_gc, unsigned int nr_secs)
-{
-       struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
-       struct ppa_addr ppa_gc;
-       int valid_secs = 0;
-       int i;
-
-       pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
-
-       for (i = 0; i < nr_secs; i++) {
-               if (lba_list[i] == ADDR_EMPTY)
-                       continue;
-
-               ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
-               if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
-                       paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
-                       continue;
-               }
-
-               rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(valid_secs, &pblk->inflight_reads);
-#endif
-
-       return valid_secs;
-}
-
-static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
-                     struct pblk_line *line, sector_t lba,
-                     u64 paddr_gc)
-{
-       struct ppa_addr ppa_l2p, ppa_gc;
-       int valid_secs = 0;
-
-       if (lba == ADDR_EMPTY)
-               goto out;
-
-       /* logic error: lba out-of-bounds */
-       if (lba >= pblk->capacity) {
-               WARN(1, "pblk: read lba out of bounds\n");
-               goto out;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-       spin_unlock(&pblk->trans_lock);
-
-       ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
-       if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
-               goto out;
-
-       rqd->ppa_addr = ppa_l2p;
-       valid_secs = 1;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_inc(&pblk->inflight_reads);
-#endif
-
-out:
-       return valid_secs;
-}
-
-int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
-{
-       struct nvm_rq rqd;
-       int ret = NVM_IO_OK;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       ret = pblk_alloc_rqd_meta(pblk, &rqd);
-       if (ret)
-               return ret;
-
-       if (gc_rq->nr_secs > 1) {
-               gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
-                                                       gc_rq->lba_list,
-                                                       gc_rq->paddr_list,
-                                                       gc_rq->nr_secs);
-               if (gc_rq->secs_to_gc == 1)
-                       rqd.ppa_addr = rqd.ppa_list[0];
-       } else {
-               gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
-                                                       gc_rq->lba_list[0],
-                                                       gc_rq->paddr_list[0]);
-       }
-
-       if (!(gc_rq->secs_to_gc))
-               goto out;
-
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.nr_ppas = gc_rq->secs_to_gc;
-
-       if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
-               ret = -EIO;
-               goto err_free_dma;
-       }
-
-       pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error) {
-               atomic_long_inc(&pblk->read_failed_gc);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               pblk_print_failed_rqd(pblk, &rqd, rqd.error);
-#endif
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
-       atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
-       atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
-#endif
-
-out:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-
-err_free_dma:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
deleted file mode 100644 (file)
index 0e6f0c7..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial: Javier Gonzalez <javier@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-recovery.c - pblk's recovery path
- *
- * The L2P recovery path is single threaded as the L2P table is updated in order
- * following the line sequence ID.
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
-{
-       u32 crc;
-
-       crc = pblk_calc_emeta_crc(pblk, emeta_buf);
-       if (le32_to_cpu(emeta_buf->crc) != crc)
-               return 1;
-
-       if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
-               return 1;
-
-       return 0;
-}
-
-static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_emeta *emeta = line->emeta;
-       struct line_emeta *emeta_buf = emeta->buf;
-       __le64 *lba_list;
-       u64 data_start, data_end;
-       u64 nr_valid_lbas, nr_lbas = 0;
-       u64 i;
-
-       lba_list = emeta_to_lbas(pblk, emeta_buf);
-       if (!lba_list)
-               return 1;
-
-       data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
-       data_end = line->emeta_ssec;
-       nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
-
-       for (i = data_start; i < data_end; i++) {
-               struct ppa_addr ppa;
-               int pos;
-
-               ppa = addr_to_gen_ppa(pblk, i, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-
-               /* Do not update bad blocks */
-               if (test_bit(pos, line->blk_bitmap))
-                       continue;
-
-               if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
-                       spin_lock(&line->lock);
-                       if (test_and_set_bit(i, line->invalid_bitmap))
-                               WARN_ONCE(1, "pblk: rec. double invalidate:\n");
-                       else
-                               le32_add_cpu(line->vsc, -1);
-                       spin_unlock(&line->lock);
-
-                       continue;
-               }
-
-               pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
-               nr_lbas++;
-       }
-
-       if (nr_valid_lbas != nr_lbas)
-               pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
-                               line->id, nr_valid_lbas, nr_lbas);
-
-       line->left_msecs = 0;
-
-       return 0;
-}
-
-static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
-                               u64 written_secs)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int i;
-
-       for (i = 0; i < written_secs; i += pblk->min_write_pgs)
-               __pblk_alloc_page(pblk, line, pblk->min_write_pgs);
-
-       spin_lock(&l_mg->free_lock);
-       if (written_secs > line->left_msecs) {
-               /*
-                * We have all data sectors written
-                * and some emeta sectors written too.
-                */
-               line->left_msecs = 0;
-       } else {
-               /* We have only some data sectors written. */
-               line->left_msecs -= written_secs;
-       }
-       spin_unlock(&l_mg->free_lock);
-}
-
-static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
-       u64 written_secs = 0;
-       int valid_chunks = 0;
-       int i;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct nvm_chk_meta *chunk = &line->chks[i];
-
-               if (chunk->state & NVM_CHK_ST_OFFLINE)
-                       continue;
-
-               written_secs += chunk->wp;
-               valid_chunks++;
-       }
-
-       if (lm->blk_per_line - nr_bb != valid_chunks)
-               pblk_err(pblk, "recovery line %d is bad\n", line->id);
-
-       pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
-
-       return written_secs;
-}
-
-struct pblk_recov_alloc {
-       struct ppa_addr *ppa_list;
-       void *meta_list;
-       struct nvm_rq *rqd;
-       void *data;
-       dma_addr_t dma_ppa_list;
-       dma_addr_t dma_meta_list;
-};
-
-static void pblk_recov_complete(struct kref *ref)
-{
-       struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
-
-       complete(&pad_rq->wait);
-}
-
-static void pblk_end_io_recov(struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       struct pblk_pad_rq *pad_rq = rqd->private;
-       struct pblk *pblk = pad_rq->pblk;
-
-       pblk_up_chunk(pblk, ppa_list[0]);
-
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-
-       atomic_dec(&pblk->inflight_io);
-       kref_put(&pad_rq->ref, pblk_recov_complete);
-}
-
-/* pad line using line bitmap.  */
-static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
-                              int left_ppas)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       void *meta_list;
-       struct pblk_pad_rq *pad_rq;
-       struct nvm_rq *rqd;
-       struct ppa_addr *ppa_list;
-       void *data;
-       __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-       u64 w_ptr = line->cur_sec;
-       int left_line_ppas, rq_ppas;
-       int i, j;
-       int ret = 0;
-
-       spin_lock(&line->lock);
-       left_line_ppas = line->left_msecs;
-       spin_unlock(&line->lock);
-
-       pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
-       if (!pad_rq)
-               return -ENOMEM;
-
-       data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
-       if (!data) {
-               ret = -ENOMEM;
-               goto free_rq;
-       }
-
-       pad_rq->pblk = pblk;
-       init_completion(&pad_rq->wait);
-       kref_init(&pad_rq->ref);
-
-next_pad_rq:
-       rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
-       if (rq_ppas < pblk->min_write_pgs) {
-               pblk_err(pblk, "corrupted pad line %d\n", line->id);
-               goto fail_complete;
-       }
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
-
-       ret = pblk_alloc_rqd_meta(pblk, rqd);
-       if (ret) {
-               pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-               goto fail_complete;
-       }
-
-       rqd->bio = NULL;
-       rqd->opcode = NVM_OP_PWRITE;
-       rqd->is_seq = 1;
-       rqd->nr_ppas = rq_ppas;
-       rqd->end_io = pblk_end_io_recov;
-       rqd->private = pad_rq;
-
-       ppa_list = nvm_rq_to_ppa_list(rqd);
-       meta_list = rqd->meta_list;
-
-       for (i = 0; i < rqd->nr_ppas; ) {
-               struct ppa_addr ppa;
-               int pos;
-
-               w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
-               ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-
-               while (test_bit(pos, line->blk_bitmap)) {
-                       w_ptr += pblk->min_write_pgs;
-                       ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
-                       pos = pblk_ppa_to_pos(geo, ppa);
-               }
-
-               for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
-                       struct ppa_addr dev_ppa;
-                       struct pblk_sec_meta *meta;
-                       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-                       dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
-
-                       pblk_map_invalidate(pblk, dev_ppa);
-                       lba_list[w_ptr] = addr_empty;
-                       meta = pblk_get_meta(pblk, meta_list, i);
-                       meta->lba = addr_empty;
-                       ppa_list[i] = dev_ppa;
-               }
-       }
-
-       kref_get(&pad_rq->ref);
-       pblk_down_chunk(pblk, ppa_list[0]);
-
-       ret = pblk_submit_io(pblk, rqd, data);
-       if (ret) {
-               pblk_err(pblk, "I/O submission failed: %d\n", ret);
-               pblk_up_chunk(pblk, ppa_list[0]);
-               kref_put(&pad_rq->ref, pblk_recov_complete);
-               pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-               goto fail_complete;
-       }
-
-       left_line_ppas -= rq_ppas;
-       left_ppas -= rq_ppas;
-       if (left_ppas && left_line_ppas)
-               goto next_pad_rq;
-
-fail_complete:
-       kref_put(&pad_rq->ref, pblk_recov_complete);
-       wait_for_completion(&pad_rq->wait);
-
-       if (!pblk_line_is_full(line))
-               pblk_err(pblk, "corrupted padded line: %d\n", line->id);
-
-       vfree(data);
-free_rq:
-       kfree(pad_rq);
-       return ret;
-}
-
-static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
-
-       return (distance > line->left_msecs) ? line->left_msecs : distance;
-}
-
-/* Return a chunk belonging to a line by stripe(write order) index */
-static struct nvm_chk_meta *pblk_get_stripe_chunk(struct pblk *pblk,
-                                                 struct pblk_line *line,
-                                                 int index)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       struct ppa_addr ppa;
-       int pos;
-
-       rlun = &pblk->luns[index];
-       ppa = rlun->bppa;
-       pos = pblk_ppa_to_pos(geo, ppa);
-
-       return &line->chks[pos];
-}
-
-static int pblk_line_wps_are_unbalanced(struct pblk *pblk,
-                                     struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int blk_in_line = lm->blk_per_line;
-       struct nvm_chk_meta *chunk;
-       u64 max_wp, min_wp;
-       int i;
-
-       i = find_first_zero_bit(line->blk_bitmap, blk_in_line);
-
-       /* If there is one or zero good chunks in the line,
-        * the write pointers can't be unbalanced.
-        */
-       if (i >= (blk_in_line - 1))
-               return 0;
-
-       chunk = pblk_get_stripe_chunk(pblk, line, i);
-       max_wp = chunk->wp;
-       if (max_wp > pblk->max_write_pgs)
-               min_wp = max_wp - pblk->max_write_pgs;
-       else
-               min_wp = 0;
-
-       i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
-       while (i < blk_in_line) {
-               chunk = pblk_get_stripe_chunk(pblk, line, i);
-               if (chunk->wp > max_wp || chunk->wp < min_wp)
-                       return 1;
-
-               i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
-       }
-
-       return 0;
-}
-
-static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
-                              struct pblk_recov_alloc p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr *ppa_list;
-       void *meta_list;
-       struct nvm_rq *rqd;
-       void *data;
-       dma_addr_t dma_ppa_list, dma_meta_list;
-       __le64 *lba_list;
-       u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
-       bool padded = false;
-       int rq_ppas;
-       int i, j;
-       int ret;
-       u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
-
-       if (pblk_line_wps_are_unbalanced(pblk, line))
-               pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
-
-       ppa_list = p.ppa_list;
-       meta_list = p.meta_list;
-       rqd = p.rqd;
-       data = p.data;
-       dma_ppa_list = p.dma_ppa_list;
-       dma_meta_list = p.dma_meta_list;
-
-       lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-
-next_rq:
-       memset(rqd, 0, pblk_g_rq_size);
-
-       rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
-       if (!rq_ppas)
-               rq_ppas = pblk->min_write_pgs;
-
-retry_rq:
-       rqd->bio = NULL;
-       rqd->opcode = NVM_OP_PREAD;
-       rqd->meta_list = meta_list;
-       rqd->nr_ppas = rq_ppas;
-       rqd->ppa_list = ppa_list;
-       rqd->dma_ppa_list = dma_ppa_list;
-       rqd->dma_meta_list = dma_meta_list;
-       ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       if (pblk_io_aligned(pblk, rq_ppas))
-               rqd->is_seq = 1;
-
-       for (i = 0; i < rqd->nr_ppas; ) {
-               struct ppa_addr ppa;
-               int pos;
-
-               ppa = addr_to_gen_ppa(pblk, paddr, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-
-               while (test_bit(pos, line->blk_bitmap)) {
-                       paddr += pblk->min_write_pgs;
-                       ppa = addr_to_gen_ppa(pblk, paddr, line->id);
-                       pos = pblk_ppa_to_pos(geo, ppa);
-               }
-
-               for (j = 0; j < pblk->min_write_pgs; j++, i++)
-                       ppa_list[i] =
-                               addr_to_gen_ppa(pblk, paddr + j, line->id);
-       }
-
-       ret = pblk_submit_io_sync(pblk, rqd, data);
-       if (ret) {
-               pblk_err(pblk, "I/O submission failed: %d\n", ret);
-               return ret;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       /* If a read fails, do a best effort by padding the line and retrying */
-       if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
-               int pad_distance, ret;
-
-               if (padded) {
-                       pblk_log_read_err(pblk, rqd);
-                       return -EINTR;
-               }
-
-               pad_distance = pblk_pad_distance(pblk, line);
-               ret = pblk_recov_pad_line(pblk, line, pad_distance);
-               if (ret) {
-                       return ret;
-               }
-
-               padded = true;
-               goto retry_rq;
-       }
-
-       pblk_get_packed_meta(pblk, rqd);
-
-       for (i = 0; i < rqd->nr_ppas; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               u64 lba = le64_to_cpu(meta->lba);
-
-               lba_list[paddr++] = cpu_to_le64(lba);
-
-               if (lba == ADDR_EMPTY || lba >= pblk->capacity)
-                       continue;
-
-               line->nr_valid_lbas++;
-               pblk_update_map(pblk, lba, ppa_list[i]);
-       }
-
-       left_ppas -= rq_ppas;
-       if (left_ppas > 0)
-               goto next_rq;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       WARN_ON(padded && !pblk_line_is_full(line));
-#endif
-
-       return 0;
-}
-
-/* Scan line for lbas on out of bound area */
-static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_rq *rqd;
-       struct ppa_addr *ppa_list;
-       void *meta_list;
-       struct pblk_recov_alloc p;
-       void *data;
-       dma_addr_t dma_ppa_list, dma_meta_list;
-       int ret = 0;
-
-       meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
-       if (!meta_list)
-               return -ENOMEM;
-
-       ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
-       dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
-
-       data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
-       if (!data) {
-               ret = -ENOMEM;
-               goto free_meta_list;
-       }
-
-       rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
-       memset(rqd, 0, pblk_g_rq_size);
-
-       p.ppa_list = ppa_list;
-       p.meta_list = meta_list;
-       p.rqd = rqd;
-       p.data = data;
-       p.dma_ppa_list = dma_ppa_list;
-       p.dma_meta_list = dma_meta_list;
-
-       ret = pblk_recov_scan_oob(pblk, line, p);
-       if (ret) {
-               pblk_err(pblk, "could not recover L2P form OOB\n");
-               goto out;
-       }
-
-       if (pblk_line_is_full(line))
-               pblk_line_recov_close(pblk, line);
-
-out:
-       mempool_free(rqd, &pblk->r_rq_pool);
-       kfree(data);
-free_meta_list:
-       nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
-
-       return ret;
-}
-
-/* Insert lines ordered by sequence number (seq_num) on list */
-static void pblk_recov_line_add_ordered(struct list_head *head,
-                                       struct pblk_line *line)
-{
-       struct pblk_line *t = NULL;
-
-       list_for_each_entry(t, head, list)
-               if (t->seq_nr > line->seq_nr)
-                       break;
-
-       __list_add(&line->list, t->list.prev, &t->list);
-}
-
-static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int emeta_secs;
-       u64 emeta_start;
-       struct ppa_addr ppa;
-       int pos;
-
-       emeta_secs = lm->emeta_sec[0];
-       emeta_start = lm->sec_per_line;
-
-       while (emeta_secs) {
-               emeta_start--;
-               ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-               if (!test_bit(pos, line->blk_bitmap))
-                       emeta_secs--;
-       }
-
-       return emeta_start;
-}
-
-static int pblk_recov_check_line_version(struct pblk *pblk,
-                                        struct line_emeta *emeta)
-{
-       struct line_header *header = &emeta->header;
-
-       if (header->version_major != EMETA_VERSION_MAJOR) {
-               pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
-                        header->version_major, EMETA_VERSION_MAJOR);
-               return 1;
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if (header->version_minor > EMETA_VERSION_MINOR)
-               pblk_info(pblk, "newer line minor version found: %d\n",
-                               header->version_minor);
-#endif
-
-       return 0;
-}
-
-static void pblk_recov_wa_counters(struct pblk *pblk,
-                                  struct line_emeta *emeta)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct line_header *header = &emeta->header;
-       struct wa_counters *wa = emeta_to_wa(lm, emeta);
-
-       /* WA counters were introduced in emeta version 0.2 */
-       if (header->version_major > 0 || header->version_minor >= 2) {
-               u64 user = le64_to_cpu(wa->user);
-               u64 pad = le64_to_cpu(wa->pad);
-               u64 gc = le64_to_cpu(wa->gc);
-
-               atomic64_set(&pblk->user_wa, user);
-               atomic64_set(&pblk->pad_wa, pad);
-               atomic64_set(&pblk->gc_wa, gc);
-
-               pblk->user_rst_wa = user;
-               pblk->pad_rst_wa = pad;
-               pblk->gc_rst_wa = gc;
-       }
-}
-
-static int pblk_line_was_written(struct pblk_line *line,
-                                struct pblk *pblk)
-{
-
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_chk_meta *chunk;
-       struct ppa_addr bppa;
-       int smeta_blk;
-
-       if (line->state == PBLK_LINESTATE_BAD)
-               return 0;
-
-       smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       if (smeta_blk >= lm->blk_per_line)
-               return 0;
-
-       bppa = pblk->luns[smeta_blk].bppa;
-       chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
-
-       if (chunk->state & NVM_CHK_ST_CLOSED ||
-           (chunk->state & NVM_CHK_ST_OPEN
-            && chunk->wp >= lm->smeta_sec))
-               return 1;
-
-       return 0;
-}
-
-static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int i;
-
-       for (i = 0; i < lm->blk_per_line; i++)
-               if (line->chks[i].state & NVM_CHK_ST_OPEN)
-                       return true;
-
-       return false;
-}
-
-struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line, *tline, *data_line = NULL;
-       struct pblk_smeta *smeta;
-       struct pblk_emeta *emeta;
-       struct line_smeta *smeta_buf;
-       int found_lines = 0, recovered_lines = 0, open_lines = 0;
-       int is_next = 0;
-       int meta_line;
-       int i, valid_uuid = 0;
-       LIST_HEAD(recov_list);
-
-       /* TODO: Implement FTL snapshot */
-
-       /* Scan recovery - takes place when FTL snapshot fails */
-       spin_lock(&l_mg->free_lock);
-       meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-       set_bit(meta_line, &l_mg->meta_bitmap);
-       smeta = l_mg->sline_meta[meta_line];
-       emeta = l_mg->eline_meta[meta_line];
-       smeta_buf = (struct line_smeta *)smeta;
-       spin_unlock(&l_mg->free_lock);
-
-       /* Order data lines using their sequence number */
-       for (i = 0; i < l_mg->nr_lines; i++) {
-               u32 crc;
-
-               line = &pblk->lines[i];
-
-               memset(smeta, 0, lm->smeta_len);
-               line->smeta = smeta;
-               line->lun_bitmap = ((void *)(smeta_buf)) +
-                                               sizeof(struct line_smeta);
-
-               if (!pblk_line_was_written(line, pblk))
-                       continue;
-
-               /* Lines that cannot be read are assumed as not written here */
-               if (pblk_line_smeta_read(pblk, line))
-                       continue;
-
-               crc = pblk_calc_smeta_crc(pblk, smeta_buf);
-               if (le32_to_cpu(smeta_buf->crc) != crc)
-                       continue;
-
-               if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
-                       continue;
-
-               if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
-                       pblk_err(pblk, "found incompatible line version %u\n",
-                                       smeta_buf->header.version_major);
-                       return ERR_PTR(-EINVAL);
-               }
-
-               /* The first valid instance uuid is used for initialization */
-               if (!valid_uuid) {
-                       import_guid(&pblk->instance_uuid, smeta_buf->header.uuid);
-                       valid_uuid = 1;
-               }
-
-               if (!guid_equal(&pblk->instance_uuid,
-                               (guid_t *)&smeta_buf->header.uuid)) {
-                       pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
-                                       i);
-                       continue;
-               }
-
-               /* Update line metadata */
-               spin_lock(&line->lock);
-               line->id = le32_to_cpu(smeta_buf->header.id);
-               line->type = le16_to_cpu(smeta_buf->header.type);
-               line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
-               spin_unlock(&line->lock);
-
-               /* Update general metadata */
-               spin_lock(&l_mg->free_lock);
-               if (line->seq_nr >= l_mg->d_seq_nr)
-                       l_mg->d_seq_nr = line->seq_nr + 1;
-               l_mg->nr_free_lines--;
-               spin_unlock(&l_mg->free_lock);
-
-               if (pblk_line_recov_alloc(pblk, line))
-                       goto out;
-
-               pblk_recov_line_add_ordered(&recov_list, line);
-               found_lines++;
-               pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
-                                               line->id, smeta_buf->seq_nr);
-       }
-
-       if (!found_lines) {
-               guid_gen(&pblk->instance_uuid);
-
-               spin_lock(&l_mg->free_lock);
-               WARN_ON_ONCE(!test_and_clear_bit(meta_line,
-                                                       &l_mg->meta_bitmap));
-               spin_unlock(&l_mg->free_lock);
-
-               goto out;
-       }
-
-       /* Verify closed blocks and recover this portion of L2P table*/
-       list_for_each_entry_safe(line, tline, &recov_list, list) {
-               recovered_lines++;
-
-               line->emeta_ssec = pblk_line_emeta_start(pblk, line);
-               line->emeta = emeta;
-               memset(line->emeta->buf, 0, lm->emeta_len[0]);
-
-               if (pblk_line_is_open(pblk, line)) {
-                       pblk_recov_l2p_from_oob(pblk, line);
-                       goto next;
-               }
-
-               if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
-                       pblk_recov_l2p_from_oob(pblk, line);
-                       goto next;
-               }
-
-               if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
-                       pblk_recov_l2p_from_oob(pblk, line);
-                       goto next;
-               }
-
-               if (pblk_recov_check_line_version(pblk, line->emeta->buf))
-                       return ERR_PTR(-EINVAL);
-
-               pblk_recov_wa_counters(pblk, line->emeta->buf);
-
-               if (pblk_recov_l2p_from_emeta(pblk, line))
-                       pblk_recov_l2p_from_oob(pblk, line);
-
-next:
-               if (pblk_line_is_full(line)) {
-                       struct list_head *move_list;
-
-                       spin_lock(&line->lock);
-                       line->state = PBLK_LINESTATE_CLOSED;
-                       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-                       move_list = pblk_line_gc_list(pblk, line);
-                       spin_unlock(&line->lock);
-
-                       spin_lock(&l_mg->gc_lock);
-                       list_move_tail(&line->list, move_list);
-                       spin_unlock(&l_mg->gc_lock);
-
-                       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-                       line->map_bitmap = NULL;
-                       line->smeta = NULL;
-                       line->emeta = NULL;
-               } else {
-                       spin_lock(&line->lock);
-                       line->state = PBLK_LINESTATE_OPEN;
-                       spin_unlock(&line->lock);
-
-                       line->emeta->mem = 0;
-                       atomic_set(&line->emeta->sync, 0);
-
-                       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-
-                       data_line = line;
-                       line->meta_line = meta_line;
-
-                       open_lines++;
-               }
-       }
-
-       if (!open_lines) {
-               spin_lock(&l_mg->free_lock);
-               WARN_ON_ONCE(!test_and_clear_bit(meta_line,
-                                                       &l_mg->meta_bitmap));
-               spin_unlock(&l_mg->free_lock);
-       } else {
-               spin_lock(&l_mg->free_lock);
-               l_mg->data_line = data_line;
-               /* Allocate next line for preparation */
-               l_mg->data_next = pblk_line_get(pblk);
-               if (l_mg->data_next) {
-                       l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
-                       l_mg->data_next->type = PBLK_LINETYPE_DATA;
-                       is_next = 1;
-               }
-               spin_unlock(&l_mg->free_lock);
-       }
-
-       if (is_next)
-               pblk_line_erase(pblk, l_mg->data_next);
-
-out:
-       if (found_lines != recovered_lines)
-               pblk_err(pblk, "failed to recover all found lines %d/%d\n",
-                                               found_lines, recovered_lines);
-
-       return data_line;
-}
-
-/*
- * Pad current line
- */
-int pblk_recov_pad(struct pblk *pblk)
-{
-       struct pblk_line *line;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int left_msecs;
-       int ret = 0;
-
-       spin_lock(&l_mg->free_lock);
-       line = l_mg->data_line;
-       left_msecs = line->left_msecs;
-       spin_unlock(&l_mg->free_lock);
-
-       ret = pblk_recov_pad_line(pblk, line, left_msecs);
-       if (ret) {
-               pblk_err(pblk, "tear down padding failed (%d)\n", ret);
-               return ret;
-       }
-
-       pblk_line_close_meta(pblk, line);
-       return ret;
-}
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
deleted file mode 100644 (file)
index a5f8bc2..0000000
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-rl.c - pblk's rate limiter for user I/O
- *
- */
-
-#include "pblk.h"
-
-static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
-{
-       mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
-}
-
-int pblk_rl_is_limit(struct pblk_rl *rl)
-{
-       int rb_space;
-
-       rb_space = atomic_read(&rl->rb_space);
-
-       return (rb_space == 0);
-}
-
-int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
-{
-       int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
-       int rb_space = atomic_read(&rl->rb_space);
-
-       if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
-               return NVM_IO_ERR;
-
-       if (rb_user_cnt >= rl->rb_user_max)
-               return NVM_IO_REQUEUE;
-
-       return NVM_IO_OK;
-}
-
-void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
-{
-       int rb_space = atomic_read(&rl->rb_space);
-
-       if (unlikely(rb_space >= 0))
-               atomic_sub(nr_entries, &rl->rb_space);
-}
-
-int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
-{
-       int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
-       int rb_user_active;
-
-       /* If there is no user I/O let GC take over space on the write buffer */
-       rb_user_active = READ_ONCE(rl->rb_user_active);
-       return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
-}
-
-void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
-{
-       atomic_add(nr_entries, &rl->rb_user_cnt);
-
-       /* Release user I/O state. Protect from GC */
-       smp_store_release(&rl->rb_user_active, 1);
-       pblk_rl_kick_u_timer(rl);
-}
-
-void pblk_rl_werr_line_in(struct pblk_rl *rl)
-{
-       atomic_inc(&rl->werr_lines);
-}
-
-void pblk_rl_werr_line_out(struct pblk_rl *rl)
-{
-       atomic_dec(&rl->werr_lines);
-}
-
-void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
-{
-       atomic_add(nr_entries, &rl->rb_gc_cnt);
-}
-
-void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
-{
-       atomic_sub(nr_user, &rl->rb_user_cnt);
-       atomic_sub(nr_gc, &rl->rb_gc_cnt);
-}
-
-unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
-{
-       return atomic_read(&rl->free_blocks);
-}
-
-unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
-{
-       return atomic_read(&rl->free_user_blocks);
-}
-
-static void __pblk_rl_update_rates(struct pblk_rl *rl,
-                                  unsigned long free_blocks)
-{
-       struct pblk *pblk = container_of(rl, struct pblk, rl);
-       int max = rl->rb_budget;
-       int werr_gc_needed = atomic_read(&rl->werr_lines);
-
-       if (free_blocks >= rl->high) {
-               if (werr_gc_needed) {
-                       /* Allocate a small budget for recovering
-                        * lines with write errors
-                        */
-                       rl->rb_gc_max = 1 << rl->rb_windows_pw;
-                       rl->rb_user_max = max - rl->rb_gc_max;
-                       rl->rb_state = PBLK_RL_WERR;
-               } else {
-                       rl->rb_user_max = max;
-                       rl->rb_gc_max = 0;
-                       rl->rb_state = PBLK_RL_OFF;
-               }
-       } else if (free_blocks < rl->high) {
-               int shift = rl->high_pw - rl->rb_windows_pw;
-               int user_windows = free_blocks >> shift;
-               int user_max = user_windows << ilog2(NVM_MAX_VLBA);
-
-               rl->rb_user_max = user_max;
-               rl->rb_gc_max = max - user_max;
-
-               if (free_blocks <= rl->rsv_blocks) {
-                       rl->rb_user_max = 0;
-                       rl->rb_gc_max = max;
-               }
-
-               /* In the worst case, we will need to GC lines in the low list
-                * (high valid sector count). If there are lines to GC on high
-                * or mid lists, these will be prioritized
-                */
-               rl->rb_state = PBLK_RL_LOW;
-       }
-
-       if (rl->rb_state != PBLK_RL_OFF)
-               pblk_gc_should_start(pblk);
-       else
-               pblk_gc_should_stop(pblk);
-}
-
-void pblk_rl_update_rates(struct pblk_rl *rl)
-{
-       __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
-}
-
-void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
-{
-       int blk_in_line = atomic_read(&line->blk_in_line);
-       int free_blocks;
-
-       atomic_add(blk_in_line, &rl->free_blocks);
-       free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
-
-       __pblk_rl_update_rates(rl, free_blocks);
-}
-
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
-                           bool used)
-{
-       int blk_in_line = atomic_read(&line->blk_in_line);
-       int free_blocks;
-
-       atomic_sub(blk_in_line, &rl->free_blocks);
-
-       if (used)
-               free_blocks = atomic_sub_return(blk_in_line,
-                                                       &rl->free_user_blocks);
-       else
-               free_blocks = atomic_read(&rl->free_user_blocks);
-
-       __pblk_rl_update_rates(rl, free_blocks);
-}
-
-int pblk_rl_high_thrs(struct pblk_rl *rl)
-{
-       return rl->high;
-}
-
-int pblk_rl_max_io(struct pblk_rl *rl)
-{
-       return rl->rb_max_io;
-}
-
-static void pblk_rl_u_timer(struct timer_list *t)
-{
-       struct pblk_rl *rl = from_timer(rl, t, u_timer);
-
-       /* Release user I/O state. Protect from GC */
-       smp_store_release(&rl->rb_user_active, 0);
-}
-
-void pblk_rl_free(struct pblk_rl *rl)
-{
-       del_timer(&rl->u_timer);
-}
-
-void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
-{
-       struct pblk *pblk = container_of(rl, struct pblk, rl);
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int sec_meta, blk_meta;
-       unsigned int rb_windows;
-
-       /* Consider sectors used for metadata */
-       sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
-       blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
-
-       rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
-       rl->high_pw = get_count_order(rl->high);
-
-       rl->rsv_blocks = pblk_get_min_chks(pblk);
-
-       /* This will always be a power-of-2 */
-       rb_windows = budget / NVM_MAX_VLBA;
-       rl->rb_windows_pw = get_count_order(rb_windows);
-
-       /* To start with, all buffer is available to user I/O writers */
-       rl->rb_budget = budget;
-       rl->rb_user_max = budget;
-       rl->rb_gc_max = 0;
-       rl->rb_state = PBLK_RL_HIGH;
-
-       /* Maximize I/O size and ansure that back threshold is respected */
-       if (threshold)
-               rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
-       else
-               rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
-
-       atomic_set(&rl->rb_user_cnt, 0);
-       atomic_set(&rl->rb_gc_cnt, 0);
-       atomic_set(&rl->rb_space, -1);
-       atomic_set(&rl->werr_lines, 0);
-
-       timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
-
-       rl->rb_user_active = 0;
-       rl->rb_gc_active = 0;
-}
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
deleted file mode 100644 (file)
index 6387302..0000000
+++ /dev/null
@@ -1,728 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * Implementation of a physical block-device target for Open-channel SSDs.
- *
- * pblk-sysfs.c - pblk's sysfs
- *
- */
-
-#include "pblk.h"
-
-static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       ssize_t sz = 0;
-       int i;
-
-       for (i = 0; i < geo->all_luns; i++) {
-               int active = 1;
-
-               rlun = &pblk->luns[i];
-               if (!down_trylock(&rlun->wr_sem)) {
-                       active = 0;
-                       up(&rlun->wr_sem);
-               }
-               sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "pblk: pos:%d, ch:%d, lun:%d - %d\n",
-                                       i,
-                                       rlun->bppa.a.ch,
-                                       rlun->bppa.a.lun,
-                                       active);
-       }
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
-{
-       int free_blocks, free_user_blocks, total_blocks;
-       int rb_user_max, rb_user_cnt;
-       int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
-
-       free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
-       free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
-       rb_user_max = pblk->rl.rb_user_max;
-       rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
-       rb_gc_max = pblk->rl.rb_gc_max;
-       rb_gc_cnt = atomic_read(&pblk->rl.rb_gc_cnt);
-       rb_budget = pblk->rl.rb_budget;
-       rb_state = pblk->rl.rb_state;
-
-       total_blocks = pblk->rl.total_blocks;
-
-       return snprintf(page, PAGE_SIZE,
-               "u:%u/%u,gc:%u/%u(%u)(stop:<%u,full:>%u,free:%d/%d/%d)-%d\n",
-                               rb_user_cnt,
-                               rb_user_max,
-                               rb_gc_cnt,
-                               rb_gc_max,
-                               rb_state,
-                               rb_budget,
-                               pblk->rl.high,
-                               free_blocks,
-                               free_user_blocks,
-                               total_blocks,
-                               READ_ONCE(pblk->rl.rb_user_active));
-}
-
-static ssize_t pblk_sysfs_gc_state_show(struct pblk *pblk, char *page)
-{
-       int gc_enabled, gc_active;
-
-       pblk_gc_sysfs_state_show(pblk, &gc_enabled, &gc_active);
-       return snprintf(page, PAGE_SIZE, "gc_enabled=%d, gc_active=%d\n",
-                                       gc_enabled, gc_active);
-}
-
-static ssize_t pblk_sysfs_stats(struct pblk *pblk, char *page)
-{
-       ssize_t sz;
-
-       sz = snprintf(page, PAGE_SIZE,
-                       "read_failed=%lu, read_high_ecc=%lu, read_empty=%lu, read_failed_gc=%lu, write_failed=%lu, erase_failed=%lu\n",
-                       atomic_long_read(&pblk->read_failed),
-                       atomic_long_read(&pblk->read_high_ecc),
-                       atomic_long_read(&pblk->read_empty),
-                       atomic_long_read(&pblk->read_failed_gc),
-                       atomic_long_read(&pblk->write_failed),
-                       atomic_long_read(&pblk->erase_failed));
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_write_buffer(struct pblk *pblk, char *page)
-{
-       return pblk_rb_sysfs(&pblk->rwb, page);
-}
-
-static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       ssize_t sz = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
-               struct nvm_addrf_12 *gppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
-               sz = scnprintf(page, PAGE_SIZE,
-                       "g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
-                       pblk->addrf_len,
-                       ppaf->blk_offset, ppaf->blk_len,
-                       ppaf->pg_offset, ppaf->pg_len,
-                       ppaf->lun_offset, ppaf->lun_len,
-                       ppaf->ch_offset, ppaf->ch_len,
-                       ppaf->pln_offset, ppaf->pln_len,
-                       ppaf->sec_offset, ppaf->sec_len);
-
-               sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                       "d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
-                       gppaf->blk_offset, gppaf->blk_len,
-                       gppaf->pg_offset, gppaf->pg_len,
-                       gppaf->lun_offset, gppaf->lun_len,
-                       gppaf->ch_offset, gppaf->ch_len,
-                       gppaf->pln_offset, gppaf->pln_len,
-                       gppaf->sec_offset, gppaf->sec_len);
-       } else {
-               struct nvm_addrf *ppaf = &pblk->addrf;
-               struct nvm_addrf *gppaf = &geo->addrf;
-
-               sz = scnprintf(page, PAGE_SIZE,
-                       "pblk:(s:%d)ch:%d/%d,lun:%d/%d,chk:%d/%d/sec:%d/%d\n",
-                       pblk->addrf_len,
-                       ppaf->ch_offset, ppaf->ch_len,
-                       ppaf->lun_offset, ppaf->lun_len,
-                       ppaf->chk_offset, ppaf->chk_len,
-                       ppaf->sec_offset, ppaf->sec_len);
-
-               sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                       "device:ch:%d/%d,lun:%d/%d,chk:%d/%d,sec:%d/%d\n",
-                       gppaf->ch_offset, gppaf->ch_len,
-                       gppaf->lun_offset, gppaf->lun_len,
-                       gppaf->chk_offset, gppaf->chk_len,
-                       gppaf->sec_offset, gppaf->sec_len);
-       }
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-       ssize_t sz = 0;
-       int nr_free_lines;
-       int cur_data, cur_log;
-       int free_line_cnt = 0, closed_line_cnt = 0, emeta_line_cnt = 0;
-       int d_line_cnt = 0, l_line_cnt = 0;
-       int gc_full = 0, gc_high = 0, gc_mid = 0, gc_low = 0, gc_empty = 0;
-       int gc_werr = 0;
-
-       int bad = 0, cor = 0;
-       int msecs = 0, cur_sec = 0, vsc = 0, sec_in_line = 0;
-       int map_weight = 0, meta_weight = 0;
-
-       spin_lock(&l_mg->free_lock);
-       cur_data = (l_mg->data_line) ? l_mg->data_line->id : -1;
-       cur_log = (l_mg->log_line) ? l_mg->log_line->id : -1;
-       nr_free_lines = l_mg->nr_free_lines;
-
-       list_for_each_entry(line, &l_mg->free_list, list)
-               free_line_cnt++;
-       spin_unlock(&l_mg->free_lock);
-
-       spin_lock(&l_mg->close_lock);
-       list_for_each_entry(line, &l_mg->emeta_list, list)
-               emeta_line_cnt++;
-       spin_unlock(&l_mg->close_lock);
-
-       spin_lock(&l_mg->gc_lock);
-       list_for_each_entry(line, &l_mg->gc_full_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_full++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_high_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_high++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_mid_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_mid++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_low_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_low++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_empty_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_empty++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_werr_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_werr++;
-       }
-
-       list_for_each_entry(line, &l_mg->bad_list, list)
-               bad++;
-       list_for_each_entry(line, &l_mg->corrupt_list, list)
-               cor++;
-       spin_unlock(&l_mg->gc_lock);
-
-       spin_lock(&l_mg->free_lock);
-       if (l_mg->data_line) {
-               cur_sec = l_mg->data_line->cur_sec;
-               msecs = l_mg->data_line->left_msecs;
-               vsc = le32_to_cpu(*l_mg->data_line->vsc);
-               sec_in_line = l_mg->data_line->sec_in_line;
-               meta_weight = bitmap_weight(&l_mg->meta_bitmap,
-                                                       PBLK_DATA_LINES);
-
-               spin_lock(&l_mg->data_line->lock);
-               if (l_mg->data_line->map_bitmap)
-                       map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
-                                                       lm->sec_per_line);
-               else
-                       map_weight = 0;
-               spin_unlock(&l_mg->data_line->lock);
-       }
-       spin_unlock(&l_mg->free_lock);
-
-       if (nr_free_lines != free_line_cnt)
-               pblk_err(pblk, "corrupted free line list:%d/%d\n",
-                                               nr_free_lines, free_line_cnt);
-
-       sz = scnprintf(page, PAGE_SIZE - sz,
-               "line: nluns:%d, nblks:%d, nsecs:%d\n",
-               geo->all_luns, lm->blk_per_line, lm->sec_per_line);
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-               "lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
-                                       cur_data, cur_log,
-                                       nr_free_lines,
-                                       emeta_line_cnt, meta_weight,
-                                       closed_line_cnt,
-                                       bad, cor,
-                                       d_line_cnt, l_line_cnt,
-                                       l_mg->nr_lines);
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-               "GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, werr: %d, queue:%d\n",
-                       gc_full, gc_high, gc_mid, gc_low, gc_empty, gc_werr,
-                       atomic_read(&pblk->gc.read_inflight_gc));
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-               "data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
-                       cur_data, cur_sec, msecs, vsc, sec_in_line,
-                       map_weight, lm->sec_per_line,
-                       atomic_read(&pblk->inflight_io));
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       ssize_t sz = 0;
-
-       sz = scnprintf(page, PAGE_SIZE - sz,
-                               "smeta - len:%d, secs:%d\n",
-                                       lm->smeta_len, lm->smeta_sec);
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "emeta - len:%d, sec:%d, bb_start:%d\n",
-                                       lm->emeta_len[0], lm->emeta_sec[0],
-                                       lm->emeta_bb);
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "bitmap lengths: sec:%d, blk:%d, lun:%d\n",
-                                       lm->sec_bitmap_len,
-                                       lm->blk_bitmap_len,
-                                       lm->lun_bitmap_len);
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "blk_line:%d, sec_line:%d, sec_blk:%d\n",
-                                       lm->blk_per_line,
-                                       lm->sec_per_line,
-                                       geo->clba);
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_get_sec_per_write(struct pblk *pblk, char *page)
-{
-       return snprintf(page, PAGE_SIZE, "%d\n", pblk->sec_per_write);
-}
-
-static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
-                                 char *page)
-{
-       int sz;
-
-       sz = scnprintf(page, PAGE_SIZE,
-                       "user:%lld gc:%lld pad:%lld WA:",
-                       user, gc, pad);
-
-       if (!user) {
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "NaN\n");
-       } else {
-               u64 wa_int;
-               u32 wa_frac;
-
-               wa_int = (user + gc + pad) * 100000;
-               wa_int = div64_u64(wa_int, user);
-               wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
-
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
-                                                       wa_int, wa_frac);
-       }
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_get_write_amp_mileage(struct pblk *pblk, char *page)
-{
-       return pblk_get_write_amp(atomic64_read(&pblk->user_wa),
-               atomic64_read(&pblk->gc_wa), atomic64_read(&pblk->pad_wa),
-               page);
-}
-
-static ssize_t pblk_sysfs_get_write_amp_trip(struct pblk *pblk, char *page)
-{
-       return pblk_get_write_amp(
-               atomic64_read(&pblk->user_wa) - pblk->user_rst_wa,
-               atomic64_read(&pblk->gc_wa) - pblk->gc_rst_wa,
-               atomic64_read(&pblk->pad_wa) - pblk->pad_rst_wa, page);
-}
-
-static long long bucket_percentage(unsigned long long bucket,
-                                  unsigned long long total)
-{
-       int p = bucket * 100;
-
-       p = div_u64(p, total);
-
-       return p;
-}
-
-static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
-{
-       int sz = 0;
-       unsigned long long total;
-       unsigned long long total_buckets = 0;
-       int buckets = pblk->min_write_pgs - 1;
-       int i;
-
-       total = atomic64_read(&pblk->nr_flush) - pblk->nr_flush_rst;
-       if (!total) {
-               for (i = 0; i < (buckets + 1); i++)
-                       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "%d:0 ", i);
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
-
-               return sz;
-       }
-
-       for (i = 0; i < buckets; i++)
-               total_buckets += atomic64_read(&pblk->pad_dist[i]);
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ",
-               bucket_percentage(total - total_buckets, total));
-
-       for (i = 0; i < buckets; i++) {
-               unsigned long long p;
-
-               p = bucket_percentage(atomic64_read(&pblk->pad_dist[i]),
-                                         total);
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ",
-                               i + 1, p);
-       }
-       sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
-
-       return sz;
-}
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
-{
-       return snprintf(page, PAGE_SIZE,
-               "%lu\t%lu\t%ld\t%llu\t%ld\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\n",
-                       atomic_long_read(&pblk->inflight_writes),
-                       atomic_long_read(&pblk->inflight_reads),
-                       atomic_long_read(&pblk->req_writes),
-                       (u64)atomic64_read(&pblk->nr_flush),
-                       atomic_long_read(&pblk->padded_writes),
-                       atomic_long_read(&pblk->padded_wb),
-                       atomic_long_read(&pblk->sub_writes),
-                       atomic_long_read(&pblk->sync_writes),
-                       atomic_long_read(&pblk->recov_writes),
-                       atomic_long_read(&pblk->recov_gc_writes),
-                       atomic_long_read(&pblk->recov_gc_reads),
-                       atomic_long_read(&pblk->cache_reads),
-                       atomic_long_read(&pblk->sync_reads));
-}
-#endif
-
-static ssize_t pblk_sysfs_gc_force(struct pblk *pblk, const char *page,
-                                  size_t len)
-{
-       size_t c_len;
-       int force;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &force))
-               return -EINVAL;
-
-       pblk_gc_sysfs_force(pblk, force);
-
-       return len;
-}
-
-static ssize_t pblk_sysfs_set_sec_per_write(struct pblk *pblk,
-                                            const char *page, size_t len)
-{
-       size_t c_len;
-       int sec_per_write;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &sec_per_write))
-               return -EINVAL;
-
-       if (!pblk_is_oob_meta_supported(pblk)) {
-               /* For packed metadata case it is
-                * not allowed to change sec_per_write.
-                */
-               return -EINVAL;
-       }
-
-       if (sec_per_write < pblk->min_write_pgs
-                               || sec_per_write > pblk->max_write_pgs
-                               || sec_per_write % pblk->min_write_pgs != 0)
-               return -EINVAL;
-
-       pblk_set_sec_per_write(pblk, sec_per_write);
-
-       return len;
-}
-
-static ssize_t pblk_sysfs_set_write_amp_trip(struct pblk *pblk,
-                       const char *page, size_t len)
-{
-       size_t c_len;
-       int reset_value;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &reset_value))
-               return -EINVAL;
-
-       if (reset_value !=  0)
-               return -EINVAL;
-
-       pblk->user_rst_wa = atomic64_read(&pblk->user_wa);
-       pblk->pad_rst_wa = atomic64_read(&pblk->pad_wa);
-       pblk->gc_rst_wa = atomic64_read(&pblk->gc_wa);
-
-       return len;
-}
-
-
-static ssize_t pblk_sysfs_set_padding_dist(struct pblk *pblk,
-                       const char *page, size_t len)
-{
-       size_t c_len;
-       int reset_value;
-       int buckets = pblk->min_write_pgs - 1;
-       int i;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &reset_value))
-               return -EINVAL;
-
-       if (reset_value !=  0)
-               return -EINVAL;
-
-       for (i = 0; i < buckets; i++)
-               atomic64_set(&pblk->pad_dist[i], 0);
-
-       pblk->nr_flush_rst = atomic64_read(&pblk->nr_flush);
-
-       return len;
-}
-
-static struct attribute sys_write_luns = {
-       .name = "write_luns",
-       .mode = 0444,
-};
-
-static struct attribute sys_rate_limiter_attr = {
-       .name = "rate_limiter",
-       .mode = 0444,
-};
-
-static struct attribute sys_gc_state = {
-       .name = "gc_state",
-       .mode = 0444,
-};
-
-static struct attribute sys_errors_attr = {
-       .name = "errors",
-       .mode = 0444,
-};
-
-static struct attribute sys_rb_attr = {
-       .name = "write_buffer",
-       .mode = 0444,
-};
-
-static struct attribute sys_stats_ppaf_attr = {
-       .name = "ppa_format",
-       .mode = 0444,
-};
-
-static struct attribute sys_lines_attr = {
-       .name = "lines",
-       .mode = 0444,
-};
-
-static struct attribute sys_lines_info_attr = {
-       .name = "lines_info",
-       .mode = 0444,
-};
-
-static struct attribute sys_gc_force = {
-       .name = "gc_force",
-       .mode = 0200,
-};
-
-static struct attribute sys_max_sec_per_write = {
-       .name = "max_sec_per_write",
-       .mode = 0644,
-};
-
-static struct attribute sys_write_amp_mileage = {
-       .name = "write_amp_mileage",
-       .mode = 0444,
-};
-
-static struct attribute sys_write_amp_trip = {
-       .name = "write_amp_trip",
-       .mode = 0644,
-};
-
-static struct attribute sys_padding_dist = {
-       .name = "padding_dist",
-       .mode = 0644,
-};
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static struct attribute sys_stats_debug_attr = {
-       .name = "stats",
-       .mode = 0444,
-};
-#endif
-
-static struct attribute *pblk_attrs[] = {
-       &sys_write_luns,
-       &sys_rate_limiter_attr,
-       &sys_errors_attr,
-       &sys_gc_state,
-       &sys_gc_force,
-       &sys_max_sec_per_write,
-       &sys_rb_attr,
-       &sys_stats_ppaf_attr,
-       &sys_lines_attr,
-       &sys_lines_info_attr,
-       &sys_write_amp_mileage,
-       &sys_write_amp_trip,
-       &sys_padding_dist,
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       &sys_stats_debug_attr,
-#endif
-       NULL,
-};
-
-static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr,
-                              char *buf)
-{
-       struct pblk *pblk = container_of(kobj, struct pblk, kobj);
-
-       if (strcmp(attr->name, "rate_limiter") == 0)
-               return pblk_sysfs_rate_limiter(pblk, buf);
-       else if (strcmp(attr->name, "write_luns") == 0)
-               return pblk_sysfs_luns_show(pblk, buf);
-       else if (strcmp(attr->name, "gc_state") == 0)
-               return pblk_sysfs_gc_state_show(pblk, buf);
-       else if (strcmp(attr->name, "errors") == 0)
-               return pblk_sysfs_stats(pblk, buf);
-       else if (strcmp(attr->name, "write_buffer") == 0)
-               return pblk_sysfs_write_buffer(pblk, buf);
-       else if (strcmp(attr->name, "ppa_format") == 0)
-               return pblk_sysfs_ppaf(pblk, buf);
-       else if (strcmp(attr->name, "lines") == 0)
-               return pblk_sysfs_lines(pblk, buf);
-       else if (strcmp(attr->name, "lines_info") == 0)
-               return pblk_sysfs_lines_info(pblk, buf);
-       else if (strcmp(attr->name, "max_sec_per_write") == 0)
-               return pblk_sysfs_get_sec_per_write(pblk, buf);
-       else if (strcmp(attr->name, "write_amp_mileage") == 0)
-               return pblk_sysfs_get_write_amp_mileage(pblk, buf);
-       else if (strcmp(attr->name, "write_amp_trip") == 0)
-               return pblk_sysfs_get_write_amp_trip(pblk, buf);
-       else if (strcmp(attr->name, "padding_dist") == 0)
-               return pblk_sysfs_get_padding_dist(pblk, buf);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       else if (strcmp(attr->name, "stats") == 0)
-               return pblk_sysfs_stats_debug(pblk, buf);
-#endif
-       return 0;
-}
-
-static ssize_t pblk_sysfs_store(struct kobject *kobj, struct attribute *attr,
-                               const char *buf, size_t len)
-{
-       struct pblk *pblk = container_of(kobj, struct pblk, kobj);
-
-       if (strcmp(attr->name, "gc_force") == 0)
-               return pblk_sysfs_gc_force(pblk, buf, len);
-       else if (strcmp(attr->name, "max_sec_per_write") == 0)
-               return pblk_sysfs_set_sec_per_write(pblk, buf, len);
-       else if (strcmp(attr->name, "write_amp_trip") == 0)
-               return pblk_sysfs_set_write_amp_trip(pblk, buf, len);
-       else if (strcmp(attr->name, "padding_dist") == 0)
-               return pblk_sysfs_set_padding_dist(pblk, buf, len);
-       return 0;
-}
-
-static const struct sysfs_ops pblk_sysfs_ops = {
-       .show = pblk_sysfs_show,
-       .store = pblk_sysfs_store,
-};
-
-static struct kobj_type pblk_ktype = {
-       .sysfs_ops      = &pblk_sysfs_ops,
-       .default_attrs  = pblk_attrs,
-};
-
-int pblk_sysfs_init(struct gendisk *tdisk)
-{
-       struct pblk *pblk = tdisk->private_data;
-       struct device *parent_dev = disk_to_dev(pblk->disk);
-       int ret;
-
-       ret = kobject_init_and_add(&pblk->kobj, &pblk_ktype,
-                                       kobject_get(&parent_dev->kobj),
-                                       "%s", "pblk");
-       if (ret) {
-               pblk_err(pblk, "could not register\n");
-               return ret;
-       }
-
-       kobject_uevent(&pblk->kobj, KOBJ_ADD);
-       return 0;
-}
-
-void pblk_sysfs_exit(struct gendisk *tdisk)
-{
-       struct pblk *pblk = tdisk->private_data;
-
-       kobject_uevent(&pblk->kobj, KOBJ_REMOVE);
-       kobject_del(&pblk->kobj);
-       kobject_put(&pblk->kobj);
-}
diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h
deleted file mode 100644 (file)
index 47b67c6..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM pblk
-
-#if !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_PBLK_H
-
-#include <linux/tracepoint.h>
-
-struct ppa_addr;
-
-#define show_chunk_flags(state) __print_flags(state, "",       \
-       { NVM_CHK_ST_FREE,              "FREE",         },      \
-       { NVM_CHK_ST_CLOSED,            "CLOSED",       },      \
-       { NVM_CHK_ST_OPEN,              "OPEN",         },      \
-       { NVM_CHK_ST_OFFLINE,           "OFFLINE",      })
-
-#define show_line_state(state) __print_symbolic(state,         \
-       { PBLK_LINESTATE_NEW,           "NEW",          },      \
-       { PBLK_LINESTATE_FREE,          "FREE",         },      \
-       { PBLK_LINESTATE_OPEN,          "OPEN",         },      \
-       { PBLK_LINESTATE_CLOSED,        "CLOSED",       },      \
-       { PBLK_LINESTATE_GC,            "GC",           },      \
-       { PBLK_LINESTATE_BAD,           "BAD",          },      \
-       { PBLK_LINESTATE_CORRUPT,       "CORRUPT"       })
-
-
-#define show_pblk_state(state) __print_symbolic(state,         \
-       { PBLK_STATE_RUNNING,           "RUNNING",      },      \
-       { PBLK_STATE_STOPPING,          "STOPPING",     },      \
-       { PBLK_STATE_RECOVERING,        "RECOVERING",   },      \
-       { PBLK_STATE_STOPPED,           "STOPPED"       })
-
-#define show_chunk_erase_state(state) __print_symbolic(state,  \
-       { PBLK_CHUNK_RESET_START,       "START",        },      \
-       { PBLK_CHUNK_RESET_DONE,        "OK",           },      \
-       { PBLK_CHUNK_RESET_FAILED,      "FAILED"        })
-
-
-TRACE_EVENT(pblk_chunk_reset,
-
-       TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
-
-       TP_ARGS(name, ppa, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(u64, ppa)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->ppa = ppa->ppa;
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
-                       show_chunk_erase_state((int)__entry->state))
-
-);
-
-TRACE_EVENT(pblk_chunk_state,
-
-       TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
-
-       TP_ARGS(name, ppa, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(u64, ppa)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->ppa = ppa->ppa;
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
-                       show_chunk_flags((int)__entry->state))
-
-);
-
-TRACE_EVENT(pblk_line_state,
-
-       TP_PROTO(const char *name, int line, int state),
-
-       TP_ARGS(name, line, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(int, line)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->line = line;
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s line=%d state=%s", __get_str(name),
-                       (int)__entry->line,
-                       show_line_state((int)__entry->state))
-
-);
-
-TRACE_EVENT(pblk_state,
-
-       TP_PROTO(const char *name, int state),
-
-       TP_ARGS(name, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s state=%s", __get_str(name),
-                       show_pblk_state((int)__entry->state))
-
-);
-
-#endif /* !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/lightnvm
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE pblk-trace
-#include <trace/define_trace.h>
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
deleted file mode 100644 (file)
index b9a2aeb..0000000
+++ /dev/null
@@ -1,665 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-write.c - pblk's write path from write buffer to media
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
-                                   struct pblk_c_ctx *c_ctx)
-{
-       struct bio *original_bio;
-       struct pblk_rb *rwb = &pblk->rwb;
-       unsigned long ret;
-       int i;
-
-       for (i = 0; i < c_ctx->nr_valid; i++) {
-               struct pblk_w_ctx *w_ctx;
-               int pos = c_ctx->sentry + i;
-               int flags;
-
-               w_ctx = pblk_rb_w_ctx(rwb, pos);
-               flags = READ_ONCE(w_ctx->flags);
-
-               if (flags & PBLK_FLUSH_ENTRY) {
-                       flags &= ~PBLK_FLUSH_ENTRY;
-                       /* Release flags on context. Protect from writes */
-                       smp_store_release(&w_ctx->flags, flags);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_dec(&rwb->inflight_flush_point);
-#endif
-               }
-
-               while ((original_bio = bio_list_pop(&w_ctx->bios)))
-                       bio_endio(original_bio);
-       }
-
-       if (c_ctx->nr_padded)
-               pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
-                                                       c_ctx->nr_padded);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
-#endif
-
-       ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
-
-       bio_put(rqd->bio);
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
-
-       return ret;
-}
-
-static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
-                                          struct nvm_rq *rqd,
-                                          struct pblk_c_ctx *c_ctx)
-{
-       list_del(&c_ctx->list);
-       return pblk_end_w_bio(pblk, rqd, c_ctx);
-}
-
-static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
-                               struct pblk_c_ctx *c_ctx)
-{
-       struct pblk_c_ctx *c, *r;
-       unsigned long flags;
-       unsigned long pos;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
-#endif
-       pblk_up_rq(pblk, c_ctx->lun_bitmap);
-
-       pos = pblk_rb_sync_init(&pblk->rwb, &flags);
-       if (pos == c_ctx->sentry) {
-               pos = pblk_end_w_bio(pblk, rqd, c_ctx);
-
-retry:
-               list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
-                       rqd = nvm_rq_from_c_ctx(c);
-                       if (c->sentry == pos) {
-                               pos = pblk_end_queued_w_bio(pblk, rqd, c);
-                               goto retry;
-                       }
-               }
-       } else {
-               WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
-               list_add_tail(&c_ctx->list, &pblk->compl_list);
-       }
-       pblk_rb_sync_end(&pblk->rwb, &flags);
-}
-
-/* Map remaining sectors in chunk, starting from ppa */
-static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
-               int rqd_ppas)
-{
-       struct pblk_line *line;
-       struct ppa_addr map_ppa = *ppa;
-       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-       __le64 *lba_list;
-       u64 paddr;
-       int done = 0;
-       int n = 0;
-
-       line = pblk_ppa_to_line(pblk, *ppa);
-       lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-
-       spin_lock(&line->lock);
-
-       while (!done)  {
-               paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
-
-               if (!test_and_set_bit(paddr, line->map_bitmap))
-                       line->left_msecs--;
-
-               if (n < rqd_ppas && lba_list[paddr] != addr_empty)
-                       line->nr_valid_lbas--;
-
-               lba_list[paddr] = addr_empty;
-
-               if (!test_and_set_bit(paddr, line->invalid_bitmap))
-                       le32_add_cpu(line->vsc, -1);
-
-               done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
-
-               n++;
-       }
-
-       line->w_err_gc->has_write_err = 1;
-       spin_unlock(&line->lock);
-}
-
-static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
-                                 unsigned int nr_entries)
-{
-       struct pblk_rb *rb = &pblk->rwb;
-       struct pblk_rb_entry *entry;
-       struct pblk_line *line;
-       struct pblk_w_ctx *w_ctx;
-       struct ppa_addr ppa_l2p;
-       int flags;
-       unsigned int i;
-
-       spin_lock(&pblk->trans_lock);
-       for (i = 0; i < nr_entries; i++) {
-               entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
-               w_ctx = &entry->w_ctx;
-
-               /* Check if the lba has been overwritten */
-               if (w_ctx->lba != ADDR_EMPTY) {
-                       ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
-                       if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
-                               w_ctx->lba = ADDR_EMPTY;
-               }
-
-               /* Mark up the entry as submittable again */
-               flags = READ_ONCE(w_ctx->flags);
-               flags |= PBLK_WRITTEN_DATA;
-               /* Release flags on write context. Protect from writes */
-               smp_store_release(&w_ctx->flags, flags);
-
-               /* Decrease the reference count to the line as we will
-                * re-map these entries
-                */
-               line = pblk_ppa_to_line(pblk, w_ctx->ppa);
-               atomic_dec(&line->sec_to_update);
-               kref_put(&line->ref, pblk_line_put);
-       }
-       spin_unlock(&pblk->trans_lock);
-}
-
-static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
-{
-       struct pblk_c_ctx *r_ctx;
-
-       r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
-       if (!r_ctx)
-               return;
-
-       r_ctx->lun_bitmap = NULL;
-       r_ctx->sentry = c_ctx->sentry;
-       r_ctx->nr_valid = c_ctx->nr_valid;
-       r_ctx->nr_padded = c_ctx->nr_padded;
-
-       spin_lock(&pblk->resubmit_lock);
-       list_add_tail(&r_ctx->list, &pblk->resubmit_list);
-       spin_unlock(&pblk->resubmit_lock);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
-#endif
-}
-
-static void pblk_submit_rec(struct work_struct *work)
-{
-       struct pblk_rec_ctx *recovery =
-                       container_of(work, struct pblk_rec_ctx, ws_rec);
-       struct pblk *pblk = recovery->pblk;
-       struct nvm_rq *rqd = recovery->rqd;
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       pblk_log_write_err(pblk, rqd);
-
-       pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
-       pblk_queue_resubmit(pblk, c_ctx);
-
-       pblk_up_rq(pblk, c_ctx->lun_bitmap);
-       if (c_ctx->nr_padded)
-               pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
-                                                       c_ctx->nr_padded);
-       bio_put(rqd->bio);
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
-       mempool_free(recovery, &pblk->rec_pool);
-
-       atomic_dec(&pblk->inflight_io);
-       pblk_write_kick(pblk);
-}
-
-
-static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct pblk_rec_ctx *recovery;
-
-       recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
-       if (!recovery) {
-               pblk_err(pblk, "could not allocate recovery work\n");
-               return;
-       }
-
-       recovery->pblk = pblk;
-       recovery->rqd = rqd;
-
-       INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
-       queue_work(pblk->close_wq, &recovery->ws_rec);
-}
-
-static void pblk_end_io_write(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-
-       if (rqd->error) {
-               pblk_end_w_fail(pblk, rqd);
-               return;
-       } else {
-               if (trace_pblk_chunk_state_enabled())
-                       pblk_check_chunk_state_update(pblk, rqd);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
-#endif
-       }
-
-       pblk_complete_write(pblk, rqd, c_ctx);
-       atomic_dec(&pblk->inflight_io);
-}
-
-static void pblk_end_io_write_meta(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-       struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
-       struct pblk_line *line = m_ctx->private;
-       struct pblk_emeta *emeta = line->emeta;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       int sync;
-
-       pblk_up_chunk(pblk, ppa_list[0]);
-
-       if (rqd->error) {
-               pblk_log_write_err(pblk, rqd);
-               pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
-               line->w_err_gc->has_write_err = 1;
-       } else {
-               if (trace_pblk_chunk_state_enabled())
-                       pblk_check_chunk_state_update(pblk, rqd);
-       }
-
-       sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
-       if (sync == emeta->nr_entries)
-               pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
-                                               GFP_ATOMIC, pblk->close_wq);
-
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-
-       atomic_dec(&pblk->inflight_io);
-}
-
-static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                          unsigned int nr_secs, nvm_end_io_fn(*end_io))
-{
-       /* Setup write request */
-       rqd->opcode = NVM_OP_PWRITE;
-       rqd->nr_ppas = nr_secs;
-       rqd->is_seq = 1;
-       rqd->private = pblk;
-       rqd->end_io = end_io;
-
-       return pblk_alloc_rqd_meta(pblk, rqd);
-}
-
-static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                          struct ppa_addr *erase_ppa)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line *e_line = pblk_line_get_erase(pblk);
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       unsigned int valid = c_ctx->nr_valid;
-       unsigned int padded = c_ctx->nr_padded;
-       unsigned int nr_secs = valid + padded;
-       unsigned long *lun_bitmap;
-       int ret;
-
-       lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
-       if (!lun_bitmap)
-               return -ENOMEM;
-       c_ctx->lun_bitmap = lun_bitmap;
-
-       ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
-       if (ret) {
-               kfree(lun_bitmap);
-               return ret;
-       }
-
-       if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
-               ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
-                                                       valid, 0);
-       else
-               ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
-                                                       valid, erase_ppa);
-
-       return ret;
-}
-
-static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
-                                 unsigned int secs_to_flush)
-{
-       int secs_to_sync;
-
-       secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if ((!secs_to_sync && secs_to_flush)
-                       || (secs_to_sync < 0)
-                       || (secs_to_sync > secs_avail && !secs_to_flush)) {
-               pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
-                               secs_avail, secs_to_sync, secs_to_flush);
-       }
-#endif
-
-       return secs_to_sync;
-}
-
-int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_emeta *emeta = meta_line->emeta;
-       struct ppa_addr *ppa_list;
-       struct pblk_g_ctx *m_ctx;
-       struct nvm_rq *rqd;
-       void *data;
-       u64 paddr;
-       int rq_ppas = pblk->min_write_pgs;
-       int id = meta_line->id;
-       int rq_len;
-       int i, j;
-       int ret;
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
-
-       m_ctx = nvm_rq_to_pdu(rqd);
-       m_ctx->private = meta_line;
-
-       rq_len = rq_ppas * geo->csecs;
-       data = ((void *)emeta->buf) + emeta->mem;
-
-       ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
-       if (ret)
-               goto fail_free_rqd;
-
-       ppa_list = nvm_rq_to_ppa_list(rqd);
-       for (i = 0; i < rqd->nr_ppas; ) {
-               spin_lock(&meta_line->lock);
-               paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
-               spin_unlock(&meta_line->lock);
-               for (j = 0; j < rq_ppas; j++, i++, paddr++)
-                       ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
-       }
-
-       spin_lock(&l_mg->close_lock);
-       emeta->mem += rq_len;
-       if (emeta->mem >= lm->emeta_len[0])
-               list_del(&meta_line->list);
-       spin_unlock(&l_mg->close_lock);
-
-       pblk_down_chunk(pblk, ppa_list[0]);
-
-       ret = pblk_submit_io(pblk, rqd, data);
-       if (ret) {
-               pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
-               goto fail_rollback;
-       }
-
-       return NVM_IO_OK;
-
-fail_rollback:
-       pblk_up_chunk(pblk, ppa_list[0]);
-       spin_lock(&l_mg->close_lock);
-       pblk_dealloc_page(pblk, meta_line, rq_ppas);
-       list_add(&meta_line->list, &meta_line->list);
-       spin_unlock(&l_mg->close_lock);
-fail_free_rqd:
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-       return ret;
-}
-
-static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
-                                      struct pblk_line *meta_line,
-                                      struct nvm_rq *data_rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
-       struct pblk_line *data_line = pblk_line_get_data(pblk);
-       struct ppa_addr ppa, ppa_opt;
-       u64 paddr;
-       int pos_opt;
-
-       /* Schedule a metadata I/O that is half the distance from the data I/O
-        * with regards to the number of LUNs forming the pblk instance. This
-        * balances LUN conflicts across every I/O.
-        *
-        * When the LUN configuration changes (e.g., due to GC), this distance
-        * can align, which would result on metadata and data I/Os colliding. In
-        * this case, modify the distance to not be optimal, but move the
-        * optimal in the right direction.
-        */
-       paddr = pblk_lookup_page(pblk, meta_line);
-       ppa = addr_to_gen_ppa(pblk, paddr, 0);
-       ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
-       pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
-
-       if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
-                               test_bit(pos_opt, data_line->blk_bitmap))
-               return true;
-
-       if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
-               data_line->meta_distance--;
-
-       return false;
-}
-
-static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
-                                                   struct nvm_rq *data_rqd)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *meta_line;
-
-       spin_lock(&l_mg->close_lock);
-       if (list_empty(&l_mg->emeta_list)) {
-               spin_unlock(&l_mg->close_lock);
-               return NULL;
-       }
-       meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
-       if (meta_line->emeta->mem >= lm->emeta_len[0]) {
-               spin_unlock(&l_mg->close_lock);
-               return NULL;
-       }
-       spin_unlock(&l_mg->close_lock);
-
-       if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
-               return NULL;
-
-       return meta_line;
-}
-
-static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct ppa_addr erase_ppa;
-       struct pblk_line *meta_line;
-       int err;
-
-       pblk_ppa_set_empty(&erase_ppa);
-
-       /* Assign lbas to ppas and populate request structure */
-       err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
-       if (err) {
-               pblk_err(pblk, "could not setup write request: %d\n", err);
-               return NVM_IO_ERR;
-       }
-
-       meta_line = pblk_should_submit_meta_io(pblk, rqd);
-
-       /* Submit data write for current data line */
-       err = pblk_submit_io(pblk, rqd, NULL);
-       if (err) {
-               pblk_err(pblk, "data I/O submission failed: %d\n", err);
-               return NVM_IO_ERR;
-       }
-
-       if (!pblk_ppa_empty(erase_ppa)) {
-               /* Submit erase for next data line */
-               if (pblk_blk_erase_async(pblk, erase_ppa)) {
-                       struct pblk_line *e_line = pblk_line_get_erase(pblk);
-                       struct nvm_tgt_dev *dev = pblk->dev;
-                       struct nvm_geo *geo = &dev->geo;
-                       int bit;
-
-                       atomic_inc(&e_line->left_eblks);
-                       bit = pblk_ppa_to_pos(geo, erase_ppa);
-                       WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
-               }
-       }
-
-       if (meta_line) {
-               /* Submit metadata write for previous data line */
-               err = pblk_submit_meta_io(pblk, meta_line);
-               if (err) {
-                       pblk_err(pblk, "metadata I/O submission failed: %d",
-                                       err);
-                       return NVM_IO_ERR;
-               }
-       }
-
-       return NVM_IO_OK;
-}
-
-static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *bio = rqd->bio;
-
-       if (c_ctx->nr_padded)
-               pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
-                                                       c_ctx->nr_padded);
-}
-
-static int pblk_submit_write(struct pblk *pblk, int *secs_left)
-{
-       struct bio *bio;
-       struct nvm_rq *rqd;
-       unsigned int secs_avail, secs_to_sync, secs_to_com;
-       unsigned int secs_to_flush, packed_meta_pgs;
-       unsigned long pos;
-       unsigned int resubmit;
-
-       *secs_left = 0;
-
-       spin_lock(&pblk->resubmit_lock);
-       resubmit = !list_empty(&pblk->resubmit_list);
-       spin_unlock(&pblk->resubmit_lock);
-
-       /* Resubmit failed writes first */
-       if (resubmit) {
-               struct pblk_c_ctx *r_ctx;
-
-               spin_lock(&pblk->resubmit_lock);
-               r_ctx = list_first_entry(&pblk->resubmit_list,
-                                       struct pblk_c_ctx, list);
-               list_del(&r_ctx->list);
-               spin_unlock(&pblk->resubmit_lock);
-
-               secs_avail = r_ctx->nr_valid;
-               pos = r_ctx->sentry;
-
-               pblk_prepare_resubmit(pblk, pos, secs_avail);
-               secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
-                               secs_avail);
-
-               kfree(r_ctx);
-       } else {
-               /* If there are no sectors in the cache,
-                * flushes (bios without data) will be cleared on
-                * the cache threads
-                */
-               secs_avail = pblk_rb_read_count(&pblk->rwb);
-               if (!secs_avail)
-                       return 0;
-
-               secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
-               if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
-                       return 0;
-
-               secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
-                                       secs_to_flush);
-               if (secs_to_sync > pblk->max_write_pgs) {
-                       pblk_err(pblk, "bad buffer sync calculation\n");
-                       return 0;
-               }
-
-               secs_to_com = (secs_to_sync > secs_avail) ?
-                       secs_avail : secs_to_sync;
-               pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
-       }
-
-       packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
-       bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
-
-       bio->bi_iter.bi_sector = 0; /* internal bio */
-       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
-       rqd->bio = bio;
-
-       if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
-                                                               secs_avail)) {
-               pblk_err(pblk, "corrupted write bio\n");
-               goto fail_put_bio;
-       }
-
-       if (pblk_submit_io_set(pblk, rqd))
-               goto fail_free_bio;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(secs_to_sync, &pblk->sub_writes);
-#endif
-
-       *secs_left = 1;
-       return 0;
-
-fail_free_bio:
-       pblk_free_write_rqd(pblk, rqd);
-fail_put_bio:
-       bio_put(bio);
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
-
-       return -EINTR;
-}
-
-int pblk_write_ts(void *data)
-{
-       struct pblk *pblk = data;
-       int secs_left;
-       int write_failure = 0;
-
-       while (!kthread_should_stop()) {
-               if (!write_failure) {
-                       write_failure = pblk_submit_write(pblk, &secs_left);
-
-                       if (secs_left)
-                               continue;
-               }
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-       return 0;
-}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
deleted file mode 100644 (file)
index 86ffa87..0000000
+++ /dev/null
@@ -1,1358 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Matias Bjorling <matias@cnexlabs.com>
- * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * Implementation of a Physical Block-device target for Open-channel SSDs.
- *
- */
-
-#ifndef PBLK_H_
-#define PBLK_H_
-
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-#include <linux/crc32.h>
-#include <linux/uuid.h>
-
-#include <linux/lightnvm.h>
-
-/* Run only GC if less than 1/X blocks are free */
-#define GC_LIMIT_INVERSE 5
-#define GC_TIME_MSECS 1000
-
-#define PBLK_SECTOR (512)
-#define PBLK_EXPOSED_PAGE_SIZE (4096)
-
-#define PBLK_NR_CLOSE_JOBS (4)
-
-#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
-
-/* Max 512 LUNs per device */
-#define PBLK_MAX_LUNS_BITMAP (4)
-
-#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
-
-/* Static pool sizes */
-#define PBLK_GEN_WS_POOL_SIZE (2)
-
-#define PBLK_DEFAULT_OP (11)
-
-enum {
-       PBLK_READ               = READ,
-       PBLK_WRITE              = WRITE,/* Write from write buffer */
-       PBLK_WRITE_INT,                 /* Internal write - no write buffer */
-       PBLK_READ_RECOV,                /* Recovery read - errors allowed */
-       PBLK_ERASE,
-};
-
-enum {
-       /* IO Types */
-       PBLK_IOTYPE_USER        = 1 << 0,
-       PBLK_IOTYPE_GC          = 1 << 1,
-
-       /* Write buffer flags */
-       PBLK_FLUSH_ENTRY        = 1 << 2,
-       PBLK_WRITTEN_DATA       = 1 << 3,
-       PBLK_SUBMITTED_ENTRY    = 1 << 4,
-       PBLK_WRITABLE_ENTRY     = 1 << 5,
-};
-
-enum {
-       PBLK_BLK_ST_OPEN =      0x1,
-       PBLK_BLK_ST_CLOSED =    0x2,
-};
-
-enum {
-       PBLK_CHUNK_RESET_START,
-       PBLK_CHUNK_RESET_DONE,
-       PBLK_CHUNK_RESET_FAILED,
-};
-
-struct pblk_sec_meta {
-       u64 reserved;
-       __le64 lba;
-};
-
-/* The number of GC lists and the rate-limiter states go together. This way the
- * rate-limiter can dictate how much GC is needed based on resource utilization.
- */
-#define PBLK_GC_NR_LISTS 4
-
-enum {
-       PBLK_RL_OFF = 0,
-       PBLK_RL_WERR = 1,
-       PBLK_RL_HIGH = 2,
-       PBLK_RL_MID = 3,
-       PBLK_RL_LOW = 4
-};
-
-#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
-
-/* write buffer completion context */
-struct pblk_c_ctx {
-       struct list_head list;          /* Head for out-of-order completion */
-
-       unsigned long *lun_bitmap;      /* Luns used on current request */
-       unsigned int sentry;
-       unsigned int nr_valid;
-       unsigned int nr_padded;
-};
-
-/* read context */
-struct pblk_g_ctx {
-       void *private;
-       unsigned long start_time;
-       u64 lba;
-};
-
-/* Pad context */
-struct pblk_pad_rq {
-       struct pblk *pblk;
-       struct completion wait;
-       struct kref ref;
-};
-
-/* Recovery context */
-struct pblk_rec_ctx {
-       struct pblk *pblk;
-       struct nvm_rq *rqd;
-       struct work_struct ws_rec;
-};
-
-/* Write context */
-struct pblk_w_ctx {
-       struct bio_list bios;           /* Original bios - used for completion
-                                        * in REQ_FUA, REQ_FLUSH case
-                                        */
-       u64 lba;                        /* Logic addr. associated with entry */
-       struct ppa_addr ppa;            /* Physic addr. associated with entry */
-       int flags;                      /* Write context flags */
-};
-
-struct pblk_rb_entry {
-       struct ppa_addr cacheline;      /* Cacheline for this entry */
-       void *data;                     /* Pointer to data on this entry */
-       struct pblk_w_ctx w_ctx;        /* Context for this entry */
-       struct list_head index;         /* List head to enable indexes */
-};
-
-#define EMPTY_ENTRY (~0U)
-
-struct pblk_rb_pages {
-       struct page *pages;
-       int order;
-       struct list_head list;
-};
-
-struct pblk_rb {
-       struct pblk_rb_entry *entries;  /* Ring buffer entries */
-       unsigned int mem;               /* Write offset - points to next
-                                        * writable entry in memory
-                                        */
-       unsigned int subm;              /* Read offset - points to last entry
-                                        * that has been submitted to the media
-                                        * to be persisted
-                                        */
-       unsigned int sync;              /* Synced - backpointer that signals
-                                        * the last submitted entry that has
-                                        * been successfully persisted to media
-                                        */
-       unsigned int flush_point;       /* Sync point - last entry that must be
-                                        * flushed to the media. Used with
-                                        * REQ_FLUSH and REQ_FUA
-                                        */
-       unsigned int l2p_update;        /* l2p update point - next entry for
-                                        * which l2p mapping will be updated to
-                                        * contain a device ppa address (instead
-                                        * of a cacheline
-                                        */
-       unsigned int nr_entries;        /* Number of entries in write buffer -
-                                        * must be a power of two
-                                        */
-       unsigned int seg_size;          /* Size of the data segments being
-                                        * stored on each entry. Typically this
-                                        * will be 4KB
-                                        */
-
-       unsigned int back_thres;        /* Threshold that shall be maintained by
-                                        * the backpointer in order to respect
-                                        * geo->mw_cunits on a per chunk basis
-                                        */
-
-       struct list_head pages;         /* List of data pages */
-
-       spinlock_t w_lock;              /* Write lock */
-       spinlock_t s_lock;              /* Sync lock */
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_t inflight_flush_point;  /* Not served REQ_FLUSH | REQ_FUA */
-#endif
-};
-
-#define PBLK_RECOVERY_SECTORS 16
-
-struct pblk_lun {
-       struct ppa_addr bppa;
-       struct semaphore wr_sem;
-};
-
-struct pblk_gc_rq {
-       struct pblk_line *line;
-       void *data;
-       u64 paddr_list[NVM_MAX_VLBA];
-       u64 lba_list[NVM_MAX_VLBA];
-       int nr_secs;
-       int secs_to_gc;
-       struct list_head list;
-};
-
-struct pblk_gc {
-       /* These states are not protected by a lock since (i) they are in the
-        * fast path, and (ii) they are not critical.
-        */
-       int gc_active;
-       int gc_enabled;
-       int gc_forced;
-
-       struct task_struct *gc_ts;
-       struct task_struct *gc_writer_ts;
-       struct task_struct *gc_reader_ts;
-
-       struct workqueue_struct *gc_line_reader_wq;
-       struct workqueue_struct *gc_reader_wq;
-
-       struct timer_list gc_timer;
-
-       struct semaphore gc_sem;
-       atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
-       atomic_t pipeline_gc;      /* Number of lines in the GC pipeline -
-                                   * started reads to finished writes
-                                   */
-       int w_entries;
-
-       struct list_head w_list;
-       struct list_head r_list;
-
-       spinlock_t lock;
-       spinlock_t w_lock;
-       spinlock_t r_lock;
-};
-
-struct pblk_rl {
-       unsigned int high;      /* Upper threshold for rate limiter (free run -
-                                * user I/O rate limiter
-                                */
-       unsigned int high_pw;   /* High rounded up as a power of 2 */
-
-#define PBLK_USER_HIGH_THRS 8  /* Begin write limit at 12% available blks */
-#define PBLK_USER_LOW_THRS 10  /* Aggressive GC at 10% available blocks */
-
-       int rb_windows_pw;      /* Number of rate windows in the write buffer
-                                * given as a power-of-2. This guarantees that
-                                * when user I/O is being rate limited, there
-                                * will be reserved enough space for the GC to
-                                * place its payload. A window is of
-                                * pblk->max_write_pgs size, which in NVMe is
-                                * 64, i.e., 256kb.
-                                */
-       int rb_budget;          /* Total number of entries available for I/O */
-       int rb_user_max;        /* Max buffer entries available for user I/O */
-       int rb_gc_max;          /* Max buffer entries available for GC I/O */
-       int rb_gc_rsv;          /* Reserved buffer entries for GC I/O */
-       int rb_state;           /* Rate-limiter current state */
-       int rb_max_io;          /* Maximum size for an I/O giving the config */
-
-       atomic_t rb_user_cnt;   /* User I/O buffer counter */
-       atomic_t rb_gc_cnt;     /* GC I/O buffer counter */
-       atomic_t rb_space;      /* Space limit in case of reaching capacity */
-
-       int rsv_blocks;         /* Reserved blocks for GC */
-
-       int rb_user_active;
-       int rb_gc_active;
-
-       atomic_t werr_lines;    /* Number of write error lines that needs gc */
-
-       struct timer_list u_timer;
-
-       unsigned long total_blocks;
-
-       atomic_t free_blocks;           /* Total number of free blocks (+ OP) */
-       atomic_t free_user_blocks;      /* Number of user free blocks (no OP) */
-};
-
-#define PBLK_LINE_EMPTY (~0U)
-
-enum {
-       /* Line Types */
-       PBLK_LINETYPE_FREE = 0,
-       PBLK_LINETYPE_LOG = 1,
-       PBLK_LINETYPE_DATA = 2,
-
-       /* Line state */
-       PBLK_LINESTATE_NEW = 9,
-       PBLK_LINESTATE_FREE = 10,
-       PBLK_LINESTATE_OPEN = 11,
-       PBLK_LINESTATE_CLOSED = 12,
-       PBLK_LINESTATE_GC = 13,
-       PBLK_LINESTATE_BAD = 14,
-       PBLK_LINESTATE_CORRUPT = 15,
-
-       /* GC group */
-       PBLK_LINEGC_NONE = 20,
-       PBLK_LINEGC_EMPTY = 21,
-       PBLK_LINEGC_LOW = 22,
-       PBLK_LINEGC_MID = 23,
-       PBLK_LINEGC_HIGH = 24,
-       PBLK_LINEGC_FULL = 25,
-       PBLK_LINEGC_WERR = 26
-};
-
-#define PBLK_MAGIC 0x70626c6b /*pblk*/
-
-/* emeta/smeta persistent storage format versions:
- * Changes in major version requires offline migration.
- * Changes in minor version are handled automatically during
- * recovery.
- */
-
-#define SMETA_VERSION_MAJOR (0)
-#define SMETA_VERSION_MINOR (1)
-
-#define EMETA_VERSION_MAJOR (0)
-#define EMETA_VERSION_MINOR (2)
-
-struct line_header {
-       __le32 crc;
-       __le32 identifier;      /* pblk identifier */
-       __u8 uuid[16];          /* instance uuid */
-       __le16 type;            /* line type */
-       __u8 version_major;     /* version major */
-       __u8 version_minor;     /* version minor */
-       __le32 id;              /* line id for current line */
-};
-
-struct line_smeta {
-       struct line_header header;
-
-       __le32 crc;             /* Full structure including struct crc */
-       /* Previous line metadata */
-       __le32 prev_id;         /* Line id for previous line */
-
-       /* Current line metadata */
-       __le64 seq_nr;          /* Sequence number for current line */
-
-       /* Active writers */
-       __le32 window_wr_lun;   /* Number of parallel LUNs to write */
-
-       __le32 rsvd[2];
-
-       __le64 lun_bitmap[];
-};
-
-
-/*
- * Metadata layout in media:
- *     First sector:
- *             1. struct line_emeta
- *             2. bad block bitmap (u64 * window_wr_lun)
- *             3. write amplification counters
- *     Mid sectors (start at lbas_sector):
- *             3. nr_lbas (u64) forming lba list
- *     Last sectors (start at vsc_sector):
- *             4. u32 valid sector count (vsc) for all lines (~0U: free line)
- */
-struct line_emeta {
-       struct line_header header;
-
-       __le32 crc;             /* Full structure including struct crc */
-
-       /* Previous line metadata */
-       __le32 prev_id;         /* Line id for prev line */
-
-       /* Current line metadata */
-       __le64 seq_nr;          /* Sequence number for current line */
-
-       /* Active writers */
-       __le32 window_wr_lun;   /* Number of parallel LUNs to write */
-
-       /* Bookkeeping for recovery */
-       __le32 next_id;         /* Line id for next line */
-       __le64 nr_lbas;         /* Number of lbas mapped in line */
-       __le64 nr_valid_lbas;   /* Number of valid lbas mapped in line */
-       __le64 bb_bitmap[];     /* Updated bad block bitmap for line */
-};
-
-
-/* Write amplification counters stored on media */
-struct wa_counters {
-       __le64 user;            /* Number of user written sectors */
-       __le64 gc;              /* Number of sectors written by GC*/
-       __le64 pad;             /* Number of padded sectors */
-};
-
-struct pblk_emeta {
-       struct line_emeta *buf;         /* emeta buffer in media format */
-       int mem;                        /* Write offset - points to next
-                                        * writable entry in memory
-                                        */
-       atomic_t sync;                  /* Synced - backpointer that signals the
-                                        * last entry that has been successfully
-                                        * persisted to media
-                                        */
-       unsigned int nr_entries;        /* Number of emeta entries */
-};
-
-struct pblk_smeta {
-       struct line_smeta *buf;         /* smeta buffer in persistent format */
-};
-
-struct pblk_w_err_gc {
-       int has_write_err;
-       int has_gc_err;
-       __le64 *lba_list;
-};
-
-struct pblk_line {
-       struct pblk *pblk;
-       unsigned int id;                /* Line number corresponds to the
-                                        * block line
-                                        */
-       unsigned int seq_nr;            /* Unique line sequence number */
-
-       int state;                      /* PBLK_LINESTATE_X */
-       int type;                       /* PBLK_LINETYPE_X */
-       int gc_group;                   /* PBLK_LINEGC_X */
-       struct list_head list;          /* Free, GC lists */
-
-       unsigned long *lun_bitmap;      /* Bitmap for LUNs mapped in line */
-
-       struct nvm_chk_meta *chks;      /* Chunks forming line */
-
-       struct pblk_smeta *smeta;       /* Start metadata */
-       struct pblk_emeta *emeta;       /* End medatada */
-
-       int meta_line;                  /* Metadata line id */
-       int meta_distance;              /* Distance between data and metadata */
-
-       u64 emeta_ssec;                 /* Sector where emeta starts */
-
-       unsigned int sec_in_line;       /* Number of usable secs in line */
-
-       atomic_t blk_in_line;           /* Number of good blocks in line */
-       unsigned long *blk_bitmap;      /* Bitmap for valid/invalid blocks */
-       unsigned long *erase_bitmap;    /* Bitmap for erased blocks */
-
-       unsigned long *map_bitmap;      /* Bitmap for mapped sectors in line */
-       unsigned long *invalid_bitmap;  /* Bitmap for invalid sectors in line */
-
-       atomic_t left_eblks;            /* Blocks left for erasing */
-       atomic_t left_seblks;           /* Blocks left for sync erasing */
-
-       int left_msecs;                 /* Sectors left for mapping */
-       unsigned int cur_sec;           /* Sector map pointer */
-       unsigned int nr_valid_lbas;     /* Number of valid lbas in line */
-
-       __le32 *vsc;                    /* Valid sector count in line */
-
-       struct kref ref;                /* Write buffer L2P references */
-       atomic_t sec_to_update;         /* Outstanding L2P updates to ppa */
-
-       struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
-
-       spinlock_t lock;                /* Necessary for invalid_bitmap only */
-};
-
-#define PBLK_DATA_LINES 4
-
-enum {
-       PBLK_EMETA_TYPE_HEADER = 1,     /* struct line_emeta first sector */
-       PBLK_EMETA_TYPE_LLBA = 2,       /* lba list - type: __le64 */
-       PBLK_EMETA_TYPE_VSC = 3,        /* vsc list - type: __le32 */
-};
-
-struct pblk_line_mgmt {
-       int nr_lines;                   /* Total number of full lines */
-       int nr_free_lines;              /* Number of full lines in free list */
-
-       /* Free lists - use free_lock */
-       struct list_head free_list;     /* Full lines ready to use */
-       struct list_head corrupt_list;  /* Full lines corrupted */
-       struct list_head bad_list;      /* Full lines bad */
-
-       /* GC lists - use gc_lock */
-       struct list_head *gc_lists[PBLK_GC_NR_LISTS];
-       struct list_head gc_high_list;  /* Full lines ready to GC, high isc */
-       struct list_head gc_mid_list;   /* Full lines ready to GC, mid isc */
-       struct list_head gc_low_list;   /* Full lines ready to GC, low isc */
-
-       struct list_head gc_werr_list;  /* Write err recovery list */
-
-       struct list_head gc_full_list;  /* Full lines ready to GC, no valid */
-       struct list_head gc_empty_list; /* Full lines close, all valid */
-
-       struct pblk_line *log_line;     /* Current FTL log line */
-       struct pblk_line *data_line;    /* Current data line */
-       struct pblk_line *log_next;     /* Next FTL log line */
-       struct pblk_line *data_next;    /* Next data line */
-
-       struct list_head emeta_list;    /* Lines queued to schedule emeta */
-
-       __le32 *vsc_list;               /* Valid sector counts for all lines */
-
-       /* Pre-allocated metadata for data lines */
-       struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
-       struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
-       unsigned long meta_bitmap;
-
-       /* Cache and mempool for map/invalid bitmaps */
-       struct kmem_cache *bitmap_cache;
-       mempool_t *bitmap_pool;
-
-       /* Helpers for fast bitmap calculations */
-       unsigned long *bb_template;
-       unsigned long *bb_aux;
-
-       unsigned long d_seq_nr;         /* Data line unique sequence number */
-       unsigned long l_seq_nr;         /* Log line unique sequence number */
-
-       spinlock_t free_lock;
-       spinlock_t close_lock;
-       spinlock_t gc_lock;
-};
-
-struct pblk_line_meta {
-       unsigned int smeta_len;         /* Total length for smeta */
-       unsigned int smeta_sec;         /* Sectors needed for smeta */
-
-       unsigned int emeta_len[4];      /* Lengths for emeta:
-                                        *  [0]: Total
-                                        *  [1]: struct line_emeta +
-                                        *       bb_bitmap + struct wa_counters
-                                        *  [2]: L2P portion
-                                        *  [3]: vsc
-                                        */
-       unsigned int emeta_sec[4];      /* Sectors needed for emeta. Same layout
-                                        * as emeta_len
-                                        */
-
-       unsigned int emeta_bb;          /* Boundary for bb that affects emeta */
-
-       unsigned int vsc_list_len;      /* Length for vsc list */
-       unsigned int sec_bitmap_len;    /* Length for sector bitmap in line */
-       unsigned int blk_bitmap_len;    /* Length for block bitmap in line */
-       unsigned int lun_bitmap_len;    /* Length for lun bitmap in line */
-
-       unsigned int blk_per_line;      /* Number of blocks in a full line */
-       unsigned int sec_per_line;      /* Number of sectors in a line */
-       unsigned int dsec_per_line;     /* Number of data sectors in a line */
-       unsigned int min_blk_line;      /* Min. number of good blocks in line */
-
-       unsigned int mid_thrs;          /* Threshold for GC mid list */
-       unsigned int high_thrs;         /* Threshold for GC high list */
-
-       unsigned int meta_distance;     /* Distance between data and metadata */
-};
-
-enum {
-       PBLK_STATE_RUNNING = 0,
-       PBLK_STATE_STOPPING = 1,
-       PBLK_STATE_RECOVERING = 2,
-       PBLK_STATE_STOPPED = 3,
-};
-
-/* Internal format to support not power-of-2 device formats */
-struct pblk_addrf {
-       /* gen to dev */
-       int sec_stripe;
-       int ch_stripe;
-       int lun_stripe;
-
-       /* dev to gen */
-       int sec_lun_stripe;
-       int sec_ws_stripe;
-};
-
-struct pblk {
-       struct nvm_tgt_dev *dev;
-       struct gendisk *disk;
-
-       struct kobject kobj;
-
-       struct pblk_lun *luns;
-
-       struct pblk_line *lines;                /* Line array */
-       struct pblk_line_mgmt l_mg;             /* Line management */
-       struct pblk_line_meta lm;               /* Line metadata */
-
-       struct nvm_addrf addrf;         /* Aligned address format */
-       struct pblk_addrf uaddrf;       /* Unaligned address format */
-       int addrf_len;
-
-       struct pblk_rb rwb;
-
-       int state;                      /* pblk line state */
-
-       int min_write_pgs; /* Minimum amount of pages required by controller */
-       int min_write_pgs_data; /* Minimum amount of payload pages */
-       int max_write_pgs; /* Maximum amount of pages supported by controller */
-       int oob_meta_size; /* Size of OOB sector metadata */
-
-       sector_t capacity; /* Device capacity when bad blocks are subtracted */
-
-       int op;      /* Percentage of device used for over-provisioning */
-       int op_blks; /* Number of blocks used for over-provisioning */
-
-       /* pblk provisioning values. Used by rate limiter */
-       struct pblk_rl rl;
-
-       int sec_per_write;
-
-       guid_t instance_uuid;
-
-       /* Persistent write amplification counters, 4kb sector I/Os */
-       atomic64_t user_wa;             /* Sectors written by user */
-       atomic64_t gc_wa;               /* Sectors written by GC */
-       atomic64_t pad_wa;              /* Padded sectors written */
-
-       /* Reset values for delta write amplification measurements */
-       u64 user_rst_wa;
-       u64 gc_rst_wa;
-       u64 pad_rst_wa;
-
-       /* Counters used for calculating padding distribution */
-       atomic64_t *pad_dist;           /* Padding distribution buckets */
-       u64 nr_flush_rst;               /* Flushes reset value for pad dist.*/
-       atomic64_t nr_flush;            /* Number of flush/fua I/O */
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Non-persistent debug counters, 4kb sector I/Os */
-       atomic_long_t inflight_writes;  /* Inflight writes (user and gc) */
-       atomic_long_t padded_writes;    /* Sectors padded due to flush/fua */
-       atomic_long_t padded_wb;        /* Sectors padded in write buffer */
-       atomic_long_t req_writes;       /* Sectors stored on write buffer */
-       atomic_long_t sub_writes;       /* Sectors submitted from buffer */
-       atomic_long_t sync_writes;      /* Sectors synced to media */
-       atomic_long_t inflight_reads;   /* Inflight sector read requests */
-       atomic_long_t cache_reads;      /* Read requests that hit the cache */
-       atomic_long_t sync_reads;       /* Completed sector read requests */
-       atomic_long_t recov_writes;     /* Sectors submitted from recovery */
-       atomic_long_t recov_gc_writes;  /* Sectors submitted from write GC */
-       atomic_long_t recov_gc_reads;   /* Sectors submitted from read GC */
-#endif
-
-       spinlock_t lock;
-
-       atomic_long_t read_failed;
-       atomic_long_t read_empty;
-       atomic_long_t read_high_ecc;
-       atomic_long_t read_failed_gc;
-       atomic_long_t write_failed;
-       atomic_long_t erase_failed;
-
-       atomic_t inflight_io;           /* General inflight I/O counter */
-
-       struct task_struct *writer_ts;
-
-       /* Simple translation map of logical addresses to physical addresses.
-        * The logical addresses is known by the host system, while the physical
-        * addresses are used when writing to the disk block device.
-        */
-       unsigned char *trans_map;
-       spinlock_t trans_lock;
-
-       struct list_head compl_list;
-
-       spinlock_t resubmit_lock;        /* Resubmit list lock */
-       struct list_head resubmit_list; /* Resubmit list for failed writes*/
-
-       mempool_t page_bio_pool;
-       mempool_t gen_ws_pool;
-       mempool_t rec_pool;
-       mempool_t r_rq_pool;
-       mempool_t w_rq_pool;
-       mempool_t e_rq_pool;
-
-       struct workqueue_struct *close_wq;
-       struct workqueue_struct *bb_wq;
-       struct workqueue_struct *r_end_wq;
-
-       struct timer_list wtimer;
-
-       struct pblk_gc gc;
-};
-
-struct pblk_line_ws {
-       struct pblk *pblk;
-       struct pblk_line *line;
-       void *priv;
-       struct work_struct ws;
-};
-
-#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
-#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
-
-#define pblk_err(pblk, fmt, ...)                       \
-       pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-#define pblk_info(pblk, fmt, ...)                      \
-       pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-#define pblk_warn(pblk, fmt, ...)                      \
-       pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-#define pblk_debug(pblk, fmt, ...)                     \
-       pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-
-/*
- * pblk ring buffer operations
- */
-int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
-                unsigned int seg_sz);
-int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
-                          unsigned int nr_entries, unsigned int *pos);
-int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
-                        unsigned int *pos);
-void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
-                             struct pblk_w_ctx w_ctx, unsigned int pos);
-void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
-                           struct pblk_w_ctx w_ctx, struct pblk_line *line,
-                           u64 paddr, unsigned int pos);
-struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
-void pblk_rb_flush(struct pblk_rb *rb);
-
-void pblk_rb_sync_l2p(struct pblk_rb *rb);
-unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
-                                unsigned int pos, unsigned int nr_entries,
-                                unsigned int count);
-int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
-                       struct ppa_addr ppa);
-unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
-
-unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
-unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
-                             unsigned int nr_entries);
-void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
-
-unsigned int pblk_rb_read_count(struct pblk_rb *rb);
-unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
-unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
-
-int pblk_rb_tear_down_check(struct pblk_rb *rb);
-int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
-void pblk_rb_free(struct pblk_rb *rb);
-ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
-
-/*
- * pblk core
- */
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
-int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
-int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                       struct pblk_c_ctx *c_ctx);
-void pblk_discard(struct pblk *pblk, struct bio *bio);
-struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
-struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
-                                             struct nvm_chk_meta *lp,
-                                             struct ppa_addr ppa);
-void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
-int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
-int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
-int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
-void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
-struct pblk_line *pblk_line_get(struct pblk *pblk);
-struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
-struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
-void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
-void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
-int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
-struct pblk_line *pblk_line_get_data(struct pblk *pblk);
-struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
-int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_is_full(struct pblk_line *line);
-void pblk_line_free(struct pblk_line *line);
-void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_close_ws(struct work_struct *work);
-void pblk_pipeline_stop(struct pblk *pblk);
-void __pblk_pipeline_stop(struct pblk *pblk);
-void __pblk_pipeline_flush(struct pblk *pblk);
-void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
-                    void (*work)(struct work_struct *), gfp_t gfp_mask,
-                    struct workqueue_struct *wq);
-u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
-                        void *emeta_buf);
-int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
-void pblk_line_put(struct kref *ref);
-void pblk_line_put_wq(struct kref *ref);
-struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
-u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
-void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
-u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
-u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
-int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
-                  unsigned long secs_to_flush, bool skip_meta);
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
-                 unsigned long *lun_bitmap);
-void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
-void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
-void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
-int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
-                      int nr_pages);
-void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
-                        int nr_pages);
-void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
-void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
-                          u64 paddr);
-void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
-void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
-                          struct ppa_addr ppa);
-void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
-                        struct ppa_addr ppa, struct ppa_addr entry_line);
-int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
-                      struct pblk_line *gc_line, u64 paddr);
-void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
-                         u64 *lba_list, int nr_secs);
-int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
-                        sector_t blba, int nr_secs, bool *from_cache);
-void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
-
-/*
- * pblk user I/O write path
- */
-void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
-                       unsigned long flags);
-int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
-
-/*
- * pblk map
- */
-int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                      unsigned int sentry, unsigned long *lun_bitmap,
-                      unsigned int valid_secs, struct ppa_addr *erase_ppa);
-int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
-                unsigned long *lun_bitmap, unsigned int valid_secs,
-                unsigned int off);
-
-/*
- * pblk write thread
- */
-int pblk_write_ts(void *data);
-void pblk_write_timer_fn(struct timer_list *t);
-void pblk_write_should_kick(struct pblk *pblk);
-void pblk_write_kick(struct pblk *pblk);
-
-/*
- * pblk read path
- */
-extern struct bio_set pblk_bio_set;
-void pblk_submit_read(struct pblk *pblk, struct bio *bio);
-int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
-/*
- * pblk recovery
- */
-struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
-int pblk_recov_pad(struct pblk *pblk);
-int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
-
-/*
- * pblk gc
- */
-#define PBLK_GC_MAX_READERS 8  /* Max number of outstanding GC reader jobs */
-#define PBLK_GC_RQ_QD 128      /* Queue depth for inflight GC requests */
-#define PBLK_GC_L_QD 4         /* Queue depth for inflight GC lines */
-
-int pblk_gc_init(struct pblk *pblk);
-void pblk_gc_exit(struct pblk *pblk, bool graceful);
-void pblk_gc_should_start(struct pblk *pblk);
-void pblk_gc_should_stop(struct pblk *pblk);
-void pblk_gc_should_kick(struct pblk *pblk);
-void pblk_gc_free_full_lines(struct pblk *pblk);
-void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
-                             int *gc_active);
-int pblk_gc_sysfs_force(struct pblk *pblk, int force);
-void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line);
-
-/*
- * pblk rate limiter
- */
-void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold);
-void pblk_rl_free(struct pblk_rl *rl);
-void pblk_rl_update_rates(struct pblk_rl *rl);
-int pblk_rl_high_thrs(struct pblk_rl *rl);
-unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
-unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
-int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
-int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
-int pblk_rl_max_io(struct pblk_rl *rl);
-void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
-                           bool used);
-int pblk_rl_is_limit(struct pblk_rl *rl);
-
-void pblk_rl_werr_line_in(struct pblk_rl *rl);
-void pblk_rl_werr_line_out(struct pblk_rl *rl);
-
-/*
- * pblk sysfs
- */
-int pblk_sysfs_init(struct gendisk *tdisk);
-void pblk_sysfs_exit(struct gendisk *tdisk);
-
-static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
-{
-       return c_ctx - sizeof(struct nvm_rq);
-}
-
-static inline void *emeta_to_bb(struct line_emeta *emeta)
-{
-       return emeta->bb_bitmap;
-}
-
-static inline void *emeta_to_wa(struct pblk_line_meta *lm,
-                               struct line_emeta *emeta)
-{
-       return emeta->bb_bitmap + lm->blk_bitmap_len;
-}
-
-static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
-{
-       return ((void *)emeta + pblk->lm.emeta_len[1]);
-}
-
-static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
-{
-       return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
-}
-
-static inline int pblk_line_vsc(struct pblk_line *line)
-{
-       return le32_to_cpu(*line->vsc);
-}
-
-static inline int pblk_ppa_to_line_id(struct ppa_addr p)
-{
-       return p.a.blk;
-}
-
-static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
-                                                struct ppa_addr p)
-{
-       return &pblk->lines[pblk_ppa_to_line_id(p)];
-}
-
-static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
-{
-       return p.a.lun * geo->num_ch + p.a.ch;
-}
-
-static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
-                                             u64 line_id)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr ppa;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
-
-               ppa.ppa = 0;
-               ppa.g.blk = line_id;
-               ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
-               ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
-               ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
-               ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
-               ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
-       } else {
-               struct pblk_addrf *uaddrf = &pblk->uaddrf;
-               int secs, chnls, luns;
-
-               ppa.ppa = 0;
-
-               ppa.m.chk = line_id;
-
-               paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
-               ppa.m.sec = secs;
-
-               paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
-               ppa.m.grp = chnls;
-
-               paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
-               ppa.m.pu = luns;
-
-               ppa.m.sec += uaddrf->sec_stripe * paddr;
-       }
-
-       return ppa;
-}
-
-static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
-                                                       struct ppa_addr p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line *line = pblk_ppa_to_line(pblk, p);
-       int pos = pblk_ppa_to_pos(geo, p);
-
-       return &line->chks[pos];
-}
-
-static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
-                                                       struct ppa_addr p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
-}
-
-static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
-                                                       struct ppa_addr p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       u64 paddr;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
-
-               paddr = (u64)p.g.ch << ppaf->ch_offset;
-               paddr |= (u64)p.g.lun << ppaf->lun_offset;
-               paddr |= (u64)p.g.pg << ppaf->pg_offset;
-               paddr |= (u64)p.g.pl << ppaf->pln_offset;
-               paddr |= (u64)p.g.sec << ppaf->sec_offset;
-       } else {
-               struct pblk_addrf *uaddrf = &pblk->uaddrf;
-               u64 secs = p.m.sec;
-               int sec_stripe;
-
-               paddr = (u64)p.m.grp * uaddrf->sec_stripe;
-               paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
-
-               secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
-               paddr += secs * uaddrf->sec_ws_stripe;
-               paddr += sec_stripe;
-       }
-
-       return paddr;
-}
-
-static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
-}
-
-static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
-}
-
-static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
-                                                               sector_t lba)
-{
-       struct ppa_addr ppa;
-
-       if (pblk->addrf_len < 32) {
-               u32 *map = (u32 *)pblk->trans_map;
-
-               ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
-       } else {
-               struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
-
-               ppa = map[lba];
-       }
-
-       return ppa;
-}
-
-static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
-                                               struct ppa_addr ppa)
-{
-       if (pblk->addrf_len < 32) {
-               u32 *map = (u32 *)pblk->trans_map;
-
-               map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
-       } else {
-               u64 *map = (u64 *)pblk->trans_map;
-
-               map[lba] = ppa.ppa;
-       }
-}
-
-static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
-{
-       return (ppa_addr.ppa == ADDR_EMPTY);
-}
-
-static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
-{
-       ppa_addr->ppa = ADDR_EMPTY;
-}
-
-static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
-{
-       return (lppa.ppa == rppa.ppa);
-}
-
-static inline int pblk_addr_in_cache(struct ppa_addr ppa)
-{
-       return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
-}
-
-static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
-{
-       return ppa.c.line;
-}
-
-static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
-{
-       struct ppa_addr p;
-
-       p.c.line = addr;
-       p.c.is_cached = 1;
-
-       return p;
-}
-
-static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
-                                           struct line_header *header)
-{
-       u32 crc = ~(u32)0;
-
-       crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
-                               sizeof(struct line_header) - sizeof(crc));
-
-       return crc;
-}
-
-static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
-                                     struct line_smeta *smeta)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       u32 crc = ~(u32)0;
-
-       crc = crc32_le(crc, (unsigned char *)smeta +
-                               sizeof(struct line_header) + sizeof(crc),
-                               lm->smeta_len -
-                               sizeof(struct line_header) - sizeof(crc));
-
-       return crc;
-}
-
-static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
-                                     struct line_emeta *emeta)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       u32 crc = ~(u32)0;
-
-       crc = crc32_le(crc, (unsigned char *)emeta +
-                               sizeof(struct line_header) + sizeof(crc),
-                               lm->emeta_len[0] -
-                               sizeof(struct line_header) - sizeof(crc));
-
-       return crc;
-}
-
-static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
-{
-       return !(nr_secs % pblk->min_write_pgs);
-}
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
-                            char *msg, int error)
-{
-       struct nvm_geo *geo = &pblk->dev->geo;
-
-       if (p->c.is_cached) {
-               pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
-                               msg, error, (u64)p->c.line);
-       } else if (geo->version == NVM_OCSSD_SPEC_12) {
-               pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
-                       msg, error,
-                       p->g.ch, p->g.lun, p->g.blk,
-                       p->g.pg, p->g.pl, p->g.sec);
-       } else {
-               pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
-                       msg, error,
-                       p->m.grp, p->m.pu, p->m.chk, p->m.sec);
-       }
-}
-
-static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
-                                        int error)
-{
-       int bit = -1;
-
-       if (rqd->nr_ppas ==  1) {
-               print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
-               return;
-       }
-
-       while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
-                                               bit + 1)) < rqd->nr_ppas) {
-               print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
-       }
-
-       pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
-}
-
-static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
-                                      struct ppa_addr *ppas, int nr_ppas)
-{
-       struct nvm_geo *geo = &tgt_dev->geo;
-       struct ppa_addr *ppa;
-       int i;
-
-       for (i = 0; i < nr_ppas; i++) {
-               ppa = &ppas[i];
-
-               if (geo->version == NVM_OCSSD_SPEC_12) {
-                       if (!ppa->c.is_cached &&
-                                       ppa->g.ch < geo->num_ch &&
-                                       ppa->g.lun < geo->num_lun &&
-                                       ppa->g.pl < geo->num_pln &&
-                                       ppa->g.blk < geo->num_chk &&
-                                       ppa->g.pg < geo->num_pg &&
-                                       ppa->g.sec < geo->ws_min)
-                               continue;
-               } else {
-                       if (!ppa->c.is_cached &&
-                                       ppa->m.grp < geo->num_ch &&
-                                       ppa->m.pu < geo->num_lun &&
-                                       ppa->m.chk < geo->num_chk &&
-                                       ppa->m.sec < geo->clba)
-                               continue;
-               }
-
-               print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
-
-               return 1;
-       }
-       return 0;
-}
-
-static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       if (rqd->opcode == NVM_OP_PWRITE) {
-               struct pblk_line *line;
-               int i;
-
-               for (i = 0; i < rqd->nr_ppas; i++) {
-                       line = pblk_ppa_to_line(pblk, ppa_list[i]);
-
-                       spin_lock(&line->lock);
-                       if (line->state != PBLK_LINESTATE_OPEN) {
-                               pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
-                                                       line->id, line->state);
-                               WARN_ON(1);
-                               spin_unlock(&line->lock);
-                               return -EINVAL;
-                       }
-                       spin_unlock(&line->lock);
-               }
-       }
-
-       return 0;
-}
-#endif
-
-static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-
-       if (paddr > lm->sec_per_line)
-               return 1;
-
-       return 0;
-}
-
-static inline unsigned int pblk_get_bi_idx(struct bio *bio)
-{
-       return bio->bi_iter.bi_idx;
-}
-
-static inline sector_t pblk_get_lba(struct bio *bio)
-{
-       return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
-}
-
-static inline unsigned int pblk_get_secs(struct bio *bio)
-{
-       return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
-}
-
-static inline char *pblk_disk_name(struct pblk *pblk)
-{
-       struct gendisk *disk = pblk->disk;
-
-       return disk->disk_name;
-}
-
-static inline unsigned int pblk_get_min_chks(struct pblk *pblk)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       /* In a worst-case scenario every line will have OP invalid sectors.
-        * We will then need a minimum of 1/OP lines to free up a single line
-        */
-
-       return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line;
-}
-
-static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
-                                                        void *meta, int index)
-{
-       return meta +
-              max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
-              * index;
-}
-
-static inline int pblk_dma_meta_size(struct pblk *pblk)
-{
-       return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
-              * NVM_MAX_VLBA;
-}
-
-static inline int pblk_is_oob_meta_supported(struct pblk *pblk)
-{
-       return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta);
-}
-#endif /* PBLK_H_ */
index 0602e82..f45fb37 100644 (file)
@@ -15,6 +15,7 @@ if MD
 
 config BLK_DEV_MD
        tristate "RAID support"
+       select BLOCK_HOLDER_DEPRECATED if SYSFS
        help
          This driver lets you combine several hard disk partitions into one
          logical block device. This can be used to simply append one
@@ -201,6 +202,7 @@ config BLK_DEV_DM_BUILTIN
 
 config BLK_DEV_DM
        tristate "Device mapper support"
+       select BLOCK_HOLDER_DEPRECATED if SYSFS
        select BLK_DEV_DM_BUILTIN
        depends on DAX || DAX=n
        help
@@ -340,7 +342,7 @@ config DM_WRITECACHE
 
 config DM_EBS
        tristate "Emulated block size target (EXPERIMENTAL)"
-       depends on BLK_DEV_DM
+       depends on BLK_DEV_DM && !HIGHMEM
        select DM_BUFIO
        help
          dm-ebs emulates smaller logical block size on backing devices
index d1ca4d0..cf3e809 100644 (file)
@@ -2,6 +2,7 @@
 
 config BCACHE
        tristate "Block device as cache"
+       select BLOCK_HOLDER_DEPRECATED if SYSFS
        select CRC64
        help
        Allows a block device to be used as cache for other devices; uses
index 183a58c..0595559 100644 (file)
@@ -378,7 +378,7 @@ static void do_btree_node_write(struct btree *b)
                struct bvec_iter_all iter_all;
 
                bio_for_each_segment_all(bv, b->bio, iter_all) {
-                       memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
+                       memcpy(bvec_virt(bv), addr, PAGE_SIZE);
                        addr += PAGE_SIZE;
                }
 
index 185246a..f2874c7 100644 (file)
@@ -885,11 +885,6 @@ static void bcache_device_free(struct bcache_device *d)
                bcache_device_detach(d);
 
        if (disk) {
-               bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
-
-               if (disk_added)
-                       del_gendisk(disk);
-
                blk_cleanup_disk(disk);
                ida_simple_remove(&bcache_device_idx,
                                  first_minor_to_idx(disk->first_minor));
@@ -931,20 +926,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
        n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
        d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
        if (!d->full_dirty_stripes)
-               return -ENOMEM;
+               goto out_free_stripe_sectors_dirty;
 
        idx = ida_simple_get(&bcache_device_idx, 0,
                                BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
        if (idx < 0)
-               return idx;
+               goto out_free_full_dirty_stripes;
 
        if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
                        BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
-               goto err;
+               goto out_ida_remove;
 
        d->disk = blk_alloc_disk(NUMA_NO_NODE);
        if (!d->disk)
-               goto err;
+               goto out_bioset_exit;
 
        set_capacity(d->disk, sectors);
        snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
@@ -987,8 +982,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
 
        return 0;
 
-err:
+out_bioset_exit:
+       bioset_exit(&d->bio_split);
+out_ida_remove:
        ida_simple_remove(&bcache_device_idx, idx);
+out_free_full_dirty_stripes:
+       kvfree(d->full_dirty_stripes);
+out_free_stripe_sectors_dirty:
+       kvfree(d->stripe_sectors_dirty);
        return -ENOMEM;
 
 }
@@ -1365,8 +1366,10 @@ static void cached_dev_free(struct closure *cl)
 
        mutex_lock(&bch_register_lock);
 
-       if (atomic_read(&dc->running))
+       if (atomic_read(&dc->running)) {
                bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+               del_gendisk(dc->disk.disk);
+       }
        bcache_device_free(&dc->disk);
        list_del(&dc->list);
 
@@ -1512,6 +1515,7 @@ static void flash_dev_free(struct closure *cl)
        mutex_lock(&bch_register_lock);
        atomic_long_sub(bcache_dev_sectors_dirty(d),
                        &d->c->flash_dev_dirty_sectors);
+       del_gendisk(d->disk);
        bcache_device_free(d);
        mutex_unlock(&bch_register_lock);
        kobject_put(&d->kobj);
index bca4a7c..b64460a 100644 (file)
@@ -15,8 +15,6 @@
 
 #include "closure.h"
 
-#define PAGE_SECTORS           (PAGE_SIZE / 512)
-
 struct closure;
 
 #ifdef CONFIG_BCACHE_DEBUG
index 71475a2..0c509da 100644 (file)
@@ -74,7 +74,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
        if (unlikely(!bv->bv_page || !bv_len))
                return -EIO;
 
-       pa = page_address(bv->bv_page) + bv->bv_offset;
+       pa = bvec_virt(bv);
 
        /* Handle overlapping page <-> blocks */
        while (bv_len) {
index 20f2510..a9ea361 100644 (file)
@@ -1819,7 +1819,7 @@ again:
                                unsigned this_len;
 
                                BUG_ON(PageHighMem(biv.bv_page));
-                               tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
+                               tag = bvec_virt(&biv);
                                this_len = min(biv.bv_len, data_to_process);
                                r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
                                                        this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
@@ -2006,7 +2006,7 @@ retry_kmap:
                                        unsigned tag_now = min(biv.bv_len, tag_todo);
                                        char *tag_addr;
                                        BUG_ON(PageHighMem(biv.bv_page));
-                                       tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
+                                       tag_addr = bvec_virt(&biv);
                                        if (likely(dio->op == REQ_OP_WRITE))
                                                memcpy(tag_ptr, tag_addr, tag_now);
                                        else
index 2209cbc..2575074 100644 (file)
@@ -1436,9 +1436,6 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
        }
 
        if (dm_get_md_type(md) == DM_TYPE_NONE) {
-               /* Initial table load: acquire type of table. */
-               dm_set_md_type(md, dm_table_get_type(t));
-
                /* setup md->queue to reflect md's type (may block) */
                r = dm_setup_md_queue(md, t);
                if (r) {
@@ -2187,7 +2184,6 @@ int __init dm_early_create(struct dm_ioctl *dmi,
        if (r)
                goto err_destroy_table;
 
-       md->type = dm_table_get_type(t);
        /* setup md->queue to reflect md's type (may block) */
        r = dm_setup_md_queue(md, t);
        if (r) {
index 0dbd48c..5b95eea 100644 (file)
@@ -559,7 +559,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
        err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
        if (err)
                goto out_tag_set;
-       elevator_init_mq(md->queue);
        return 0;
 
 out_tag_set:
index 0543cdf..b03eabc 100644 (file)
@@ -2076,7 +2076,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        }
 
        dm_update_keyslot_manager(q, t);
-       blk_queue_update_readahead(q);
+       disk_update_readahead(t->md->disk);
 
        return 0;
 }
index e21e29e..3d2cf81 100644 (file)
@@ -1214,14 +1214,13 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
 {
        void *buf;
-       unsigned long flags;
        unsigned size;
        int rw = bio_data_dir(bio);
        unsigned remaining_size = wc->block_size;
 
        do {
                struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
-               buf = bvec_kmap_irq(&bv, &flags);
+               buf = bvec_kmap_local(&bv);
                size = bv.bv_len;
                if (unlikely(size > remaining_size))
                        size = remaining_size;
@@ -1239,7 +1238,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
                        memcpy_flushcache_optimized(data, buf, size);
                }
 
-               bvec_kunmap_irq(buf, &flags);
+               kunmap_local(buf);
 
                data = (char *)data + size;
                remaining_size -= size;
index 2c5f9e5..7981b72 100644 (file)
@@ -1693,14 +1693,13 @@ static void cleanup_mapped_device(struct mapped_device *md)
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
                spin_unlock(&_minor_lock);
-               del_gendisk(md->disk);
-       }
-
-       if (md->queue)
+               if (dm_get_md_type(md) != DM_TYPE_NONE) {
+                       dm_sysfs_exit(md);
+                       del_gendisk(md->disk);
+               }
                dm_queue_destroy_keyslot_manager(md->queue);
-
-       if (md->disk)
                blk_cleanup_disk(md->disk);
+       }
 
        cleanup_srcu_struct(&md->io_barrier);
 
@@ -1792,7 +1791,6 @@ static struct mapped_device *alloc_dev(int minor)
                        goto bad;
        }
 
-       add_disk_no_queue_reg(md->disk);
        format_dev_t(md->name, MKDEV(_major, minor));
 
        md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@@ -1993,19 +1991,12 @@ static struct dm_table *__unbind(struct mapped_device *md)
  */
 int dm_create(int minor, struct mapped_device **result)
 {
-       int r;
        struct mapped_device *md;
 
        md = alloc_dev(minor);
        if (!md)
                return -ENXIO;
 
-       r = dm_sysfs_init(md);
-       if (r) {
-               free_dev(md);
-               return r;
-       }
-
        *result = md;
        return 0;
 }
@@ -2056,9 +2047,9 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
  */
 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
 {
-       int r;
+       enum dm_queue_mode type = dm_table_get_type(t);
        struct queue_limits limits;
-       enum dm_queue_mode type = dm_get_md_type(md);
+       int r;
 
        switch (type) {
        case DM_TYPE_REQUEST_BASED:
@@ -2086,8 +2077,14 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
        if (r)
                return r;
 
-       blk_register_queue(md->disk);
+       add_disk(md->disk);
 
+       r = dm_sysfs_init(md);
+       if (r) {
+               del_gendisk(md->disk);
+               return r;
+       }
+       md->type = type;
        return 0;
 }
 
@@ -2193,7 +2190,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
                DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
                       dm_device_name(md), atomic_read(&md->holders));
 
-       dm_sysfs_exit(md);
        dm_table_destroy(__unbind(md));
        free_dev(md);
 }
index 832547c..4c96c36 100644 (file)
@@ -764,9 +764,7 @@ struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
 
 static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
 {
-       int flags = rdev->bdev->bd_disk->flags;
-
-       if (!(flags & GENHD_FL_UP)) {
+       if (!disk_live(rdev->bdev->bd_disk)) {
                if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
                        pr_warn("md: %s: %s array has a missing/failed member\n",
                                mdname(rdev->mddev), md_type);
index 51f2547..19598bd 100644 (file)
@@ -474,8 +474,6 @@ static void raid1_end_write_request(struct bio *bio)
                /*
                 * When the device is faulty, it is not necessary to
                 * handle write error.
-                * For failfast, this is the only remaining device,
-                * We need to retry the write without FailFast.
                 */
                if (!test_bit(Faulty, &rdev->flags))
                        set_bit(R1BIO_WriteError, &r1_bio->state);
@@ -1331,6 +1329,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
        struct raid1_plug_cb *plug = NULL;
        int first_clone;
        int max_sectors;
+       bool write_behind = false;
 
        if (mddev_is_clustered(mddev) &&
             md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1383,6 +1382,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
        max_sectors = r1_bio->sectors;
        for (i = 0;  i < disks; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
+               /*
+                * The write-behind io is only attempted on drives marked as
+                * write-mostly, which means we could allocate write behind
+                * bio later.
+                */
+               if (rdev && test_bit(WriteMostly, &rdev->flags))
+                       write_behind = true;
+
                if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
                        atomic_inc(&rdev->nr_pending);
                        blocked_rdev = rdev;
@@ -1456,6 +1464,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                goto retry_write;
        }
 
+       /*
+        * When using a bitmap, we may call alloc_behind_master_bio below.
+        * alloc_behind_master_bio allocates a copy of the data payload a page
+        * at a time and thus needs a new bio that can fit the whole payload
+        * this bio in page sized chunks.
+        */
+       if (write_behind && bitmap)
+               max_sectors = min_t(int, max_sectors,
+                                   BIO_MAX_VECS * (PAGE_SIZE >> 9));
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
                                              GFP_NOIO, &conf->bio_split);
index 16977e8..aa26365 100644 (file)
@@ -471,12 +471,12 @@ static void raid10_end_write_request(struct bio *bio)
                        /*
                         * When the device is faulty, it is not necessary to
                         * handle write error.
-                        * For failfast, this is the only remaining device,
-                        * We need to retry the write without FailFast.
                         */
                        if (!test_bit(Faulty, &rdev->flags))
                                set_bit(R10BIO_WriteError, &r10_bio->state);
                        else {
+                               /* Fail the request */
+                               set_bit(R10BIO_Degraded, &r10_bio->state);
                                r10_bio->devs[slot].bio = NULL;
                                to_put = bio;
                                dec_rdev = 1;
@@ -1712,6 +1712,11 @@ retry_discard:
        } else
                r10_bio->master_bio = (struct bio *)first_r10bio;
 
+       /*
+        * first select target devices under rcu_lock and
+        * inc refcount on their rdev.  Record them by setting
+        * bios[x] to bio
+        */
        rcu_read_lock();
        for (disk = 0; disk < geo->raid_disks; disk++) {
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
@@ -1743,9 +1748,6 @@ retry_discard:
        for (disk = 0; disk < geo->raid_disks; disk++) {
                sector_t dev_start, dev_end;
                struct bio *mbio, *rbio = NULL;
-               struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
-               struct md_rdev *rrdev = rcu_dereference(
-                       conf->mirrors[disk].replacement);
 
                /*
                 * Now start to calculate the start and end address for each disk.
@@ -1775,9 +1777,12 @@ retry_discard:
 
                /*
                 * It only handles discard bio which size is >= stripe size, so
-                * dev_end > dev_start all the time
+                * dev_end > dev_start all the time.
+                * It doesn't need to use rcu lock to get rdev here. We already
+                * add rdev->nr_pending in the first loop.
                 */
                if (r10_bio->devs[disk].bio) {
+                       struct md_rdev *rdev = conf->mirrors[disk].rdev;
                        mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
                        mbio->bi_end_io = raid10_end_discard_request;
                        mbio->bi_private = r10_bio;
@@ -1790,6 +1795,7 @@ retry_discard:
                        bio_endio(mbio);
                }
                if (r10_bio->devs[disk].repl_bio) {
+                       struct md_rdev *rrdev = conf->mirrors[disk].replacement;
                        rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
                        rbio->bi_end_io = raid10_end_discard_request;
                        rbio->bi_private = r10_bio;
index b8436e4..02ed53b 100644 (file)
@@ -2437,7 +2437,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
            conf->scribble_sectors >= new_sectors)
                return 0;
        mddev_suspend(conf->mddev);
-       get_online_cpus();
+       cpus_read_lock();
 
        for_each_present_cpu(cpu) {
                struct raid5_percpu *percpu;
@@ -2449,7 +2449,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
                        break;
        }
 
-       put_online_cpus();
+       cpus_read_unlock();
        mddev_resume(conf->mddev);
        if (!err) {
                conf->scribble_disks = new_disks;
index 02281d1..508ac29 100644 (file)
@@ -1573,6 +1573,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
                  struct media_request *req)
 {
        struct vb2_buffer *vb;
+       enum vb2_buffer_state orig_state;
        int ret;
 
        if (q->error) {
@@ -1673,6 +1674,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
         * Add to the queued buffers list, a buffer will stay on it until
         * dequeued in dqbuf.
         */
+       orig_state = vb->state;
        list_add_tail(&vb->queued_entry, &q->queued_list);
        q->queued_count++;
        q->waiting_for_buffers = false;
@@ -1703,8 +1705,17 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
        if (q->streaming && !q->start_streaming_called &&
            q->queued_count >= q->min_buffers_needed) {
                ret = vb2_start_streaming(q);
-               if (ret)
+               if (ret) {
+                       /*
+                        * Since vb2_core_qbuf will return with an error,
+                        * we should return it to state DEQUEUED since
+                        * the error indicates that the buffer wasn't queued.
+                        */
+                       list_del(&vb->queued_entry);
+                       q->queued_count--;
+                       vb->state = orig_state;
                        return ret;
+               }
        }
 
        dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
index 59a36f9..30d29b9 100644 (file)
@@ -226,7 +226,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
 err_free_swnodes:
        software_node_unregister_nodes(sensor->swnodes);
 err_put_adev:
-       acpi_dev_put(sensor->adev);
+       acpi_dev_put(adev);
        return ret;
 }
 
index 99b5121..dda2f27 100644 (file)
@@ -8,6 +8,7 @@ config VIDEO_ATMEL_ISC
        select VIDEOBUF2_DMA_CONTIG
        select REGMAP_MMIO
        select V4L2_FWNODE
+       select VIDEO_ATMEL_ISC_BASE
        help
           This module makes the ATMEL Image Sensor Controller available
           as a v4l2 device.
@@ -19,10 +20,17 @@ config VIDEO_ATMEL_XISC
        select VIDEOBUF2_DMA_CONTIG
        select REGMAP_MMIO
        select V4L2_FWNODE
+       select VIDEO_ATMEL_ISC_BASE
        help
           This module makes the ATMEL eXtended Image Sensor Controller
           available as a v4l2 device.
 
+config VIDEO_ATMEL_ISC_BASE
+       tristate
+       default n
+       help
+         ATMEL ISC and XISC common code base.
+
 config VIDEO_ATMEL_ISI
        tristate "ATMEL Image Sensor Interface (ISI) support"
        depends on VIDEO_V4L2 && OF
index c5c0155..46d264a 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
-atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o
-atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o
+atmel-isc-objs = atmel-sama5d2-isc.o
+atmel-xisc-objs = atmel-sama7g5-isc.o
 
 obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
+obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o
 obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
 obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
index 19daa49..136ab7c 100644 (file)
@@ -378,6 +378,7 @@ int isc_clk_init(struct isc_device *isc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(isc_clk_init);
 
 void isc_clk_cleanup(struct isc_device *isc)
 {
@@ -392,6 +393,7 @@ void isc_clk_cleanup(struct isc_device *isc)
                        clk_unregister(isc_clk->clk);
        }
 }
+EXPORT_SYMBOL_GPL(isc_clk_cleanup);
 
 static int isc_queue_setup(struct vb2_queue *vq,
                            unsigned int *nbuffers, unsigned int *nplanes,
@@ -1578,6 +1580,7 @@ irqreturn_t isc_interrupt(int irq, void *dev_id)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(isc_interrupt);
 
 static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
 {
@@ -2212,6 +2215,7 @@ const struct v4l2_async_notifier_operations isc_async_ops = {
        .unbind = isc_async_unbind,
        .complete = isc_async_complete,
 };
+EXPORT_SYMBOL_GPL(isc_async_ops);
 
 void isc_subdev_cleanup(struct isc_device *isc)
 {
@@ -2224,6 +2228,7 @@ void isc_subdev_cleanup(struct isc_device *isc)
 
        INIT_LIST_HEAD(&isc->subdev_entities);
 }
+EXPORT_SYMBOL_GPL(isc_subdev_cleanup);
 
 int isc_pipeline_init(struct isc_device *isc)
 {
@@ -2264,6 +2269,7 @@ int isc_pipeline_init(struct isc_device *isc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(isc_pipeline_init);
 
 /* regmap configuration */
 #define ATMEL_ISC_REG_MAX    0xd5c
@@ -2273,4 +2279,9 @@ const struct regmap_config isc_regmap_config = {
        .val_bits       = 32,
        .max_register   = ATMEL_ISC_REG_MAX,
 };
+EXPORT_SYMBOL_GPL(isc_regmap_config);
 
+MODULE_AUTHOR("Songjun Wu");
+MODULE_AUTHOR("Eugen Hristev");
+MODULE_DESCRIPTION("Atmel ISC common code base");
+MODULE_LICENSE("GPL v2");
index 8370573..795a012 100644 (file)
@@ -37,7 +37,16 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
        } else {
                /* read */
                requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
-               pipe = usb_rcvctrlpipe(d->udev, 0);
+
+               /*
+                * Zero-length transfers must use usb_sndctrlpipe() and
+                * rtl28xxu_identify_state() uses a zero-length i2c read
+                * command to determine the chip type.
+                */
+               if (req->size)
+                       pipe = usb_rcvctrlpipe(d->udev, 0);
+               else
+                       pipe = usb_sndctrlpipe(d->udev, 0);
        }
 
        ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value,
@@ -612,9 +621,8 @@ static int rtl28xxu_read_config(struct dvb_usb_device *d)
 static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
 {
        struct rtl28xxu_dev *dev = d_to_priv(d);
-       u8 buf[1];
        int ret;
-       struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf};
+       struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL};
 
        dev_dbg(&d->intf->dev, "\n");
 
index 4a4573f..acf3667 100644 (file)
@@ -1105,7 +1105,7 @@ static u16 msb_get_free_block(struct msb_data *msb, int zone)
        dbg_verbose("result of the free blocks scan: pba %d", pba);
 
        if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
-               pr_err("BUG: cant get a free block");
+               pr_err("BUG: can't get a free block");
                msb->read_only = true;
                return MS_BLOCK_INVALID;
        }
index 615a837..e79a021 100644 (file)
@@ -293,7 +293,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
 
        /* TODO: hidden assumption about nenth beeing always 1 */
        sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
-               PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+                             DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
                message("problem in dma_map_sg");
@@ -310,8 +310,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
        }
 
        dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
-               PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
-
+                    DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        return dev->dma_error;
 }
@@ -877,7 +876,7 @@ static SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume);
 
 MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl);
 
-static struct pci_driver r852_pci_driver = {
+static struct pci_driver r592_pci_driver = {
        .name           = DRV_NAME,
        .id_table       = r592_pci_id_tbl,
        .probe          = r592_probe,
@@ -885,7 +884,7 @@ static struct pci_driver r852_pci_driver = {
        .driver.pm      = &r592_pm_ops,
 };
 
-module_pci_driver(r852_pci_driver);
+module_pci_driver(r592_pci_driver);
 
 module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO);
 MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)");
index 5714537..c272453 100644 (file)
@@ -279,8 +279,8 @@ static int tifm_ms_issue_cmd(struct tifm_ms *host)
        if (host->use_dma) {
                if (1 != tifm_map_sg(sock, &host->req->sg, 1,
                                     host->req->data_dir == READ
-                                    ? PCI_DMA_FROMDEVICE
-                                    : PCI_DMA_TODEVICE)) {
+                                    ? DMA_FROM_DEVICE
+                                    : DMA_TO_DEVICE)) {
                        host->req->error = -ENOMEM;
                        return host->req->error;
                }
@@ -350,8 +350,8 @@ static void tifm_ms_complete_cmd(struct tifm_ms *host)
        if (host->use_dma) {
                tifm_unmap_sg(sock, &host->req->sg, 1,
                              host->req->data_dir == READ
-                             ? PCI_DMA_FROMDEVICE
-                             : PCI_DMA_TODEVICE);
+                             ? DMA_FROM_DEVICE
+                             : DMA_TO_DEVICE);
        }
 
        writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
@@ -607,8 +607,8 @@ static void tifm_ms_remove(struct tifm_dev *sock)
                if (host->use_dma)
                        tifm_unmap_sg(sock, &host->req->sg, 1,
                                      host->req->data_dir == READ
-                                     ? PCI_DMA_TODEVICE
-                                     : PCI_DMA_FROMDEVICE);
+                                     ? DMA_TO_DEVICE
+                                     : DMA_FROM_DEVICE);
                host->req->error = -ETIME;
 
                do {
index 3bde7fd..287da20 100644 (file)
@@ -2364,7 +2364,7 @@ static bool read_mailbox_0(void)
 
                for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
                        if (ev & prcmu_irq_bit[n])
-                               generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
+                               generic_handle_domain_irq(db8500_irq_domain, n);
                }
                r = true;
                break;
index 5f6f0a8..37e5e02 100644 (file)
@@ -35,10 +35,10 @@ static void mx25_tsadc_irq_handler(struct irq_desc *desc)
        regmap_read(tsadc->regs, MX25_TSC_TGSR, &status);
 
        if (status & MX25_TGSR_GCQ_INT)
-               generic_handle_irq(irq_find_mapping(tsadc->domain, 1));
+               generic_handle_domain_irq(tsadc->domain, 1);
 
        if (status & MX25_TGSR_TCQ_INT)
-               generic_handle_irq(irq_find_mapping(tsadc->domain, 0));
+               generic_handle_domain_irq(tsadc->domain, 0);
 
        chained_irq_exit(chip, desc);
 }
index 99b9c11..5865683 100644 (file)
@@ -105,19 +105,15 @@ static void ioc3_irq_handler(struct irq_desc *desc)
        struct ioc3_priv_data *ipd = domain->host_data;
        struct ioc3 __iomem *regs = ipd->regs;
        u32 pending, mask;
-       unsigned int irq;
 
        pending = readl(&regs->sio_ir);
        mask = readl(&regs->sio_ies);
        pending &= mask; /* Mask off not enabled interrupts */
 
-       if (pending) {
-               irq = irq_find_mapping(domain, __ffs(pending));
-               if (irq)
-                       generic_handle_irq(irq);
-       } else  {
+       if (pending)
+               generic_handle_domain_irq(domain, __ffs(pending));
+       else
                spurious_interrupt();
-       }
 }
 
 /*
index acd172d..ec18a04 100644 (file)
@@ -122,7 +122,7 @@ bail:
 
 static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
 {
-       int pmirq, irq, i, ret = 0;
+       int pmirq, i, ret = 0;
        unsigned int bits;
 
        ret = pm8xxx_read_block_irq(chip, block, &bits);
@@ -139,8 +139,7 @@ static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
        for (i = 0; i < 8; i++) {
                if (bits & (1 << i)) {
                        pmirq = block * 8 + i;
-                       irq = irq_find_mapping(chip->irqdomain, pmirq);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(chip->irqdomain, pmirq);
                }
        }
        return 0;
@@ -199,7 +198,7 @@ static void pm8xxx_irq_handler(struct irq_desc *desc)
 static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
                                     int master, int block)
 {
-       int pmirq, irq, i, ret;
+       int pmirq, i, ret;
        unsigned int bits;
 
        ret = regmap_read(chip->regmap,
@@ -216,8 +215,7 @@ static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
        for (i = 0; i < 8; i++) {
                if (bits & BIT(i)) {
                        pmirq = block * 8 + i;
-                       irq = irq_find_mapping(chip->irqdomain, pmirq);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(chip->irqdomain, pmirq);
                }
        }
 }
index ae8b69a..6f25c34 100644 (file)
@@ -15,7 +15,7 @@ config PWRSEQ_EMMC
 
 config PWRSEQ_SD8787
        tristate "HW reset support for SD8787 BT + Wifi module"
-       depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO)
+       depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO || WILC1000_SDIO)
        help
          This selects hardware reset support for the SD8787 BT + Wifi
          module. By default this option is set to n.
index ce8aed5..431af5e 100644 (file)
@@ -98,6 +98,11 @@ static int max_devices;
 static DEFINE_IDA(mmc_blk_ida);
 static DEFINE_IDA(mmc_rpmb_ida);
 
+struct mmc_blk_busy_data {
+       struct mmc_card *card;
+       u32 status;
+};
+
 /*
  * There is one mmc_blk_data per slot.
  */
@@ -128,8 +133,6 @@ struct mmc_blk_data {
         * track of the current selected device partition.
         */
        unsigned int    part_curr;
-       struct device_attribute force_ro;
-       struct device_attribute power_ro_lock;
        int     area_type;
 
        /* debugfs files (only in main mmc_blk_data) */
@@ -281,6 +284,9 @@ out_put:
        return count;
 }
 
+static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
+               power_ro_lock_show, power_ro_lock_store);
+
 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
@@ -313,6 +319,44 @@ out:
        return ret;
 }
 
+static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
+
+static struct attribute *mmc_disk_attrs[] = {
+       &dev_attr_force_ro.attr,
+       &dev_attr_ro_lock_until_next_power_on.attr,
+       NULL,
+};
+
+static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
+               struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+       umode_t mode = a->mode;
+
+       if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
+           (md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+           md->queue.card->ext_csd.boot_ro_lockable) {
+               mode = S_IRUGO;
+               if (!(md->queue.card->ext_csd.boot_ro_lock &
+                               EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
+                       mode |= S_IWUSR;
+       }
+
+       mmc_blk_put(md);
+       return mode;
+}
+
+static const struct attribute_group mmc_disk_attr_group = {
+       .is_visible     = mmc_disk_attrs_is_visible,
+       .attrs          = mmc_disk_attrs,
+};
+
+static const struct attribute_group *mmc_disk_attr_groups[] = {
+       &mmc_disk_attr_group,
+       NULL,
+};
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
        struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -417,42 +461,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
        return 0;
 }
 
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
-                           u32 *resp_errs)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
-       int err = 0;
-       u32 status;
-
-       do {
-               bool done = time_after(jiffies, timeout);
-
-               err = __mmc_send_status(card, &status, 5);
-               if (err) {
-                       dev_err(mmc_dev(card->host),
-                               "error %d requesting status\n", err);
-                       return err;
-               }
-
-               /* Accumulate any response error bits seen */
-               if (resp_errs)
-                       *resp_errs |= status;
-
-               /*
-                * Timeout if the device never becomes ready for data and never
-                * leaves the program state.
-                */
-               if (done) {
-                       dev_err(mmc_dev(card->host),
-                               "Card stuck in wrong state! %s status: %#x\n",
-                                __func__, status);
-                       return -ETIMEDOUT;
-               }
-       } while (!mmc_ready_for_data(status));
-
-       return err;
-}
-
 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
                               struct mmc_blk_ioc_data *idata)
 {
@@ -549,6 +557,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
                return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
 
        mmc_wait_for_req(card->host, &mrq);
+       memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
 
        if (cmd.error) {
                dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
@@ -598,14 +607,13 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
        if (idata->ic.postsleep_min_us)
                usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 
-       memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
-
        if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
                /*
                 * Ensure RPMB/R1B command has completed by polling CMD13
                 * "Send Status".
                 */
-               err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
+               err = mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, false,
+                                       MMC_BUSY_IO);
        }
 
        return err;
@@ -792,6 +800,26 @@ static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
 }
 #endif
 
+static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
+                                         sector_t *sector)
+{
+       struct mmc_blk_data *md;
+       int ret;
+
+       md = mmc_blk_get(disk);
+       if (!md)
+               return -EINVAL;
+
+       if (md->queue.card)
+               ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
+       else
+               ret = -ENODEV;
+
+       mmc_blk_put(md);
+
+       return ret;
+}
+
 static const struct block_device_operations mmc_bdops = {
        .open                   = mmc_blk_open,
        .release                = mmc_blk_release,
@@ -801,6 +829,7 @@ static const struct block_device_operations mmc_bdops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl           = mmc_blk_compat_ioctl,
 #endif
+       .alternative_gpt_sector = mmc_blk_alternative_gpt_sector,
 };
 
 static int mmc_blk_part_switch_pre(struct mmc_card *card,
@@ -1636,7 +1665,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
 
        mmc_blk_send_stop(card, timeout);
 
-       err = card_busy_detect(card, timeout, NULL);
+       err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
 
        mmc_retune_release(card->host);
 
@@ -1851,28 +1880,48 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
               brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
 }
 
+static int mmc_blk_busy_cb(void *cb_data, bool *busy)
+{
+       struct mmc_blk_busy_data *data = cb_data;
+       u32 status = 0;
+       int err;
+
+       err = mmc_send_status(data->card, &status);
+       if (err)
+               return err;
+
+       /* Accumulate response error bits. */
+       data->status |= status;
+
+       *busy = !mmc_ready_for_data(status);
+       return 0;
+}
+
 static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
 {
        struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
-       u32 status = 0;
+       struct mmc_blk_busy_data cb_data;
        int err;
 
        if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
                return 0;
 
-       err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
+       cb_data.card = card;
+       cb_data.status = 0;
+       err = __mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb,
+                                 &cb_data);
 
        /*
         * Do not assume data transferred correctly if there are any error bits
         * set.
         */
-       if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+       if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
                mqrq->brq.data.bytes_xfered = 0;
                err = err ? err : -EIO;
        }
 
        /* Copy the exception bit so it will be seen later on */
-       if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+       if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
                mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
 
        return err;
@@ -2289,7 +2338,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                                              sector_t size,
                                              bool default_ro,
                                              const char *subname,
-                                             int area_type)
+                                             int area_type,
+                                             unsigned int part_type)
 {
        struct mmc_blk_data *md;
        int devidx, ret;
@@ -2336,6 +2386,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        kref_init(&md->kref);
 
        md->queue.blkdata = md;
+       md->part_type = part_type;
 
        md->disk->major = MMC_BLOCK_MAJOR;
        md->disk->minors = perdev_minors;
@@ -2388,6 +2439,10 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
                cap_str, md->read_only ? "(ro)" : "");
 
+       /* used in ->open, must be set before add_disk: */
+       if (area_type == MMC_BLK_DATA_AREA_MAIN)
+               dev_set_drvdata(&card->dev, md);
+       device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
        return md;
 
  err_kfree:
@@ -2417,7 +2472,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
        }
 
        return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
-                                       MMC_BLK_DATA_AREA_MAIN);
+                                       MMC_BLK_DATA_AREA_MAIN, 0);
 }
 
 static int mmc_blk_alloc_part(struct mmc_card *card,
@@ -2431,10 +2486,9 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
        struct mmc_blk_data *part_md;
 
        part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
-                                   subname, area_type);
+                                   subname, area_type, part_type);
        if (IS_ERR(part_md))
                return PTR_ERR(part_md);
-       part_md->part_type = part_type;
        list_add(&part_md->part, &md->part);
 
        return 0;
@@ -2635,27 +2689,13 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
 
 static void mmc_blk_remove_req(struct mmc_blk_data *md)
 {
-       struct mmc_card *card;
-
-       if (md) {
-               /*
-                * Flush remaining requests and free queues. It
-                * is freeing the queue that stops new requests
-                * from being accepted.
-                */
-               card = md->queue.card;
-               if (md->disk->flags & GENHD_FL_UP) {
-                       device_remove_file(disk_to_dev(md->disk), &md->force_ro);
-                       if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
-                                       card->ext_csd.boot_ro_lockable)
-                               device_remove_file(disk_to_dev(md->disk),
-                                       &md->power_ro_lock);
-
-                       del_gendisk(md->disk);
-               }
-               mmc_cleanup_queue(&md->queue);
-               mmc_blk_put(md);
-       }
+       /*
+        * Flush remaining requests and free queues. It is freeing the queue
+        * that stops new requests from being accepted.
+        */
+       del_gendisk(md->disk);
+       mmc_cleanup_queue(&md->queue);
+       mmc_blk_put(md);
 }
 
 static void mmc_blk_remove_parts(struct mmc_card *card,
@@ -2679,51 +2719,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
        }
 }
 
-static int mmc_add_disk(struct mmc_blk_data *md)
-{
-       int ret;
-       struct mmc_card *card = md->queue.card;
-
-       device_add_disk(md->parent, md->disk, NULL);
-       md->force_ro.show = force_ro_show;
-       md->force_ro.store = force_ro_store;
-       sysfs_attr_init(&md->force_ro.attr);
-       md->force_ro.attr.name = "force_ro";
-       md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
-       ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
-       if (ret)
-               goto force_ro_fail;
-
-       if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
-            card->ext_csd.boot_ro_lockable) {
-               umode_t mode;
-
-               if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
-                       mode = S_IRUGO;
-               else
-                       mode = S_IRUGO | S_IWUSR;
-
-               md->power_ro_lock.show = power_ro_lock_show;
-               md->power_ro_lock.store = power_ro_lock_store;
-               sysfs_attr_init(&md->power_ro_lock.attr);
-               md->power_ro_lock.attr.mode = mode;
-               md->power_ro_lock.attr.name =
-                                       "ro_lock_until_next_power_on";
-               ret = device_create_file(disk_to_dev(md->disk),
-                               &md->power_ro_lock);
-               if (ret)
-                       goto power_ro_lock_fail;
-       }
-       return ret;
-
-power_ro_lock_fail:
-       device_remove_file(disk_to_dev(md->disk), &md->force_ro);
-force_ro_fail:
-       del_gendisk(md->disk);
-
-       return ret;
-}
-
 #ifdef CONFIG_DEBUG_FS
 
 static int mmc_dbg_card_status_get(void *data, u64 *val)
@@ -2889,7 +2884,7 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
 
 static int mmc_blk_probe(struct mmc_card *card)
 {
-       struct mmc_blk_data *md, *part_md;
+       struct mmc_blk_data *md;
        int ret = 0;
 
        /*
@@ -2917,18 +2912,6 @@ static int mmc_blk_probe(struct mmc_card *card)
        if (ret)
                goto out;
 
-       dev_set_drvdata(&card->dev, md);
-
-       ret = mmc_add_disk(md);
-       if (ret)
-               goto out;
-
-       list_for_each_entry(part_md, &md->part, part) {
-               ret = mmc_add_disk(part_md);
-               if (ret)
-                       goto out;
-       }
-
        /* Add two debugfs entries */
        mmc_blk_add_debugfs(card, md);
 
index 95fedcf..240c5af 100644 (file)
@@ -936,15 +936,16 @@ int mmc_execute_tuning(struct mmc_card *card)
                opcode = MMC_SEND_TUNING_BLOCK;
 
        err = host->ops->execute_tuning(host, opcode);
+       if (!err) {
+               mmc_retune_clear(host);
+               mmc_retune_enable(host);
+               return 0;
+       }
 
-       if (err) {
+       /* Only print error when we don't check for card removal */
+       if (!host->detect_change)
                pr_err("%s: tuning execution failed: %d\n",
                        mmc_hostname(host), err);
-       } else {
-               host->retune_now = 0;
-               host->need_retune = 0;
-               mmc_retune_enable(host);
-       }
 
        return err;
 }
@@ -2149,6 +2150,41 @@ int mmc_detect_card_removed(struct mmc_host *host)
 }
 EXPORT_SYMBOL(mmc_detect_card_removed);
 
+int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
+{
+       unsigned int boot_sectors_num;
+
+       if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
+               return -EOPNOTSUPP;
+
+       /* filter out unrelated cards */
+       if (card->ext_csd.rev < 3 ||
+           !mmc_card_mmc(card) ||
+           !mmc_card_is_blockaddr(card) ||
+            mmc_card_is_removable(card->host))
+               return -ENOENT;
+
+       /*
+        * eMMC storage has two special boot partitions in addition to the
+        * main one.  NVIDIA's bootloader linearizes eMMC boot0->boot1->main
+        * accesses, this means that the partition table addresses are shifted
+        * by the size of boot partitions.  In accordance with the eMMC
+        * specification, the boot partition size is calculated as follows:
+        *
+        *      boot partition size = 128K byte x BOOT_SIZE_MULT
+        *
+        * Calculate number of sectors occupied by the both boot partitions.
+        */
+       boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
+                          SZ_512 * MMC_NUM_BOOT_PARTITION;
+
+       /* Defined by NVIDIA and used by Android devices. */
+       *gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
+
 void mmc_rescan(struct work_struct *work)
 {
        struct mmc_host *host =
index 0c4de20..7931a4f 100644 (file)
@@ -119,6 +119,8 @@ void mmc_release_host(struct mmc_host *host);
 void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
 void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
 
+int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *sector);
+
 /**
  *     mmc_claim_host - exclusively claim a host
  *     @host: mmc host to claim
index 419a368..6755780 100644 (file)
@@ -31,18 +31,11 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
        struct request *req = mmc_queue_req_to_req(mqrq);
        struct mmc_request *mrq = &mqrq->brq.mrq;
 
-       if (!req->crypt_keyslot)
+       if (!req->crypt_ctx)
                return;
 
-       mrq->crypto_enabled = true;
-       mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
-
-       /*
-        * For now we assume that all MMC drivers set max_dun_bytes_supported=4,
-        * which is the limit for CQHCI crypto.  So all DUNs should be 32-bit.
-        */
-       WARN_ON_ONCE(req->crypt_ctx->bc_dun[0] > U32_MAX);
-
-       mrq->data_unit_num = req->crypt_ctx->bc_dun[0];
+       mrq->crypto_ctx = req->crypt_ctx;
+       if (req->crypt_keyslot)
+               mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
 }
 EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);
index 0475d96..d4683b1 100644 (file)
@@ -96,6 +96,10 @@ void mmc_unregister_host_class(void)
        class_unregister(&mmc_host_class);
 }
 
+/**
+ * mmc_retune_enable() - enter a transfer mode that requires retuning
+ * @host: host which should retune now
+ */
 void mmc_retune_enable(struct mmc_host *host)
 {
        host->can_retune = 1;
@@ -127,13 +131,18 @@ void mmc_retune_unpause(struct mmc_host *host)
 }
 EXPORT_SYMBOL(mmc_retune_unpause);
 
+/**
+ * mmc_retune_disable() - exit a transfer mode that requires retuning
+ * @host: host which should not retune anymore
+ *
+ * It is not meant for temporarily preventing retuning!
+ */
 void mmc_retune_disable(struct mmc_host *host)
 {
        mmc_retune_unpause(host);
        host->can_retune = 0;
        del_timer_sync(&host->retune_timer);
-       host->retune_now = 0;
-       host->need_retune = 0;
+       mmc_retune_clear(host);
 }
 
 void mmc_retune_timer_stop(struct mmc_host *host)
index ba40761..48c4952 100644 (file)
@@ -21,6 +21,12 @@ int mmc_retune(struct mmc_host *host);
 void mmc_retune_pause(struct mmc_host *host);
 void mmc_retune_unpause(struct mmc_host *host);
 
+static inline void mmc_retune_clear(struct mmc_host *host)
+{
+       host->retune_now = 0;
+       host->need_retune = 0;
+}
+
 static inline void mmc_retune_hold_now(struct mmc_host *host)
 {
        host->retune_now = 0;
index 838726b..29e58ff 100644 (file)
@@ -418,6 +418,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
                ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
        card->ext_csd.raw_hc_erase_grp_size =
                ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+       card->ext_csd.raw_boot_mult =
+               ext_csd[EXT_CSD_BOOT_MULT];
        if (card->ext_csd.rev >= 3) {
                u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
                card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
index 973756e..0c54858 100644 (file)
@@ -435,7 +435,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
        u32 status = 0;
        int err;
 
-       if (host->ops->card_busy) {
+       if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
                *busy = host->ops->card_busy(host);
                return 0;
        }
@@ -457,6 +457,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
                break;
        case MMC_BUSY_HPI:
        case MMC_BUSY_EXTR_SINGLE:
+       case MMC_BUSY_IO:
                break;
        default:
                err = -EINVAL;
@@ -509,6 +510,7 @@ int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
 
 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
                      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
@@ -521,6 +523,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 
        return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
 }
+EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
 
 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
                          unsigned int timeout_ms)
@@ -956,8 +959,15 @@ void mmc_run_bkops(struct mmc_card *card)
         */
        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                         EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
-       if (err)
-               pr_warn("%s: Error %d starting bkops\n",
+       /*
+        * If the BKOPS timed out, the card is probably still busy in the
+        * R1_STATE_PRG. Rather than continue to wait, let's try to abort
+        * it with a HPI command to get back into R1_STATE_TRAN.
+        */
+       if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
+               pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
+       else if (err)
+               pr_warn("%s: Error %d running bkops\n",
                        mmc_hostname(card->host), err);
 
        mmc_retune_release(card->host);
index 41ab4f5..ae25ffc 100644 (file)
@@ -15,6 +15,7 @@ enum mmc_busy_cmd {
        MMC_BUSY_ERASE,
        MMC_BUSY_HPI,
        MMC_BUSY_EXTR_SINGLE,
+       MMC_BUSY_IO,
 };
 
 struct mmc_host;
index 68a826f..2e120ad 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -27,6 +28,7 @@ struct mmc_pwrseq_sd8787 {
        struct mmc_pwrseq pwrseq;
        struct gpio_desc *reset_gpio;
        struct gpio_desc *pwrdn_gpio;
+       u32 reset_pwrdwn_delay_ms;
 };
 
 #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
@@ -37,7 +39,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
 
        gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
 
-       msleep(300);
+       msleep(pwrseq->reset_pwrdwn_delay_ms);
        gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
 }
 
@@ -54,8 +56,12 @@ static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
        .power_off = mmc_pwrseq_sd8787_power_off,
 };
 
+static const u32 sd8787_delay_ms = 300;
+static const u32 wilc1000_delay_ms = 5;
+
 static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
-       { .compatible = "mmc-pwrseq-sd8787",},
+       { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms },
+       { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms },
        {/* sentinel */},
 };
 MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
@@ -64,11 +70,15 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
 {
        struct mmc_pwrseq_sd8787 *pwrseq;
        struct device *dev = &pdev->dev;
+       const struct of_device_id *match;
 
        pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
        if (!pwrseq)
                return -ENOMEM;
 
+       match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node);
+       pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data;
+
        pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
        if (IS_ERR(pwrseq->pwrdn_gpio))
                return PTR_ERR(pwrseq->pwrdn_gpio);
index cc32617..b15c034 100644 (file)
@@ -163,7 +163,7 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
        blk_mq_run_hw_queues(q, true);
 }
 
-static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
+static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
 {
        struct scatterlist *sg;
 
@@ -193,33 +193,29 @@ static void mmc_queue_setup_discard(struct request_queue *q,
                blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
 }
 
-static unsigned int mmc_get_max_segments(struct mmc_host *host)
+static unsigned short mmc_get_max_segments(struct mmc_host *host)
 {
        return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
                                         host->max_segs;
 }
 
-/**
- * mmc_init_request() - initialize the MMC-specific per-request data
- * @mq: the request queue
- * @req: the request
- * @gfp: memory allocation policy
- */
-static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
-                             gfp_t gfp)
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+                              unsigned int hctx_idx, unsigned int numa_node)
 {
        struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
+       struct mmc_queue *mq = set->driver_data;
        struct mmc_card *card = mq->card;
        struct mmc_host *host = card->host;
 
-       mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
+       mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
        if (!mq_rq->sg)
                return -ENOMEM;
 
        return 0;
 }
 
-static void mmc_exit_request(struct request_queue *q, struct request *req)
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+                               unsigned int hctx_idx)
 {
        struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
 
@@ -227,20 +223,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
        mq_rq->sg = NULL;
 }
 
-static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
-                              unsigned int hctx_idx, unsigned int numa_node)
-{
-       return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
-}
-
-static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
-                               unsigned int hctx_idx)
-{
-       struct mmc_queue *mq = set->driver_data;
-
-       mmc_exit_request(mq->queue, req);
-}
-
 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                                    const struct blk_mq_queue_data *bd)
 {
index b237735..a705ba6 100644 (file)
@@ -330,13 +330,25 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
                        prev = &this->next;
 
                        if (ret == -ENOENT) {
+
                                if (time_after(jiffies, timeout))
                                        break;
-                               /* warn about unknown tuples */
-                               pr_warn_ratelimited("%s: queuing unknown"
-                                      " CIS tuple 0x%02x (%u bytes)\n",
-                                      mmc_hostname(card->host),
-                                      tpl_code, tpl_link);
+
+#define FMT(type) "%s: queuing " type " CIS tuple 0x%02x [%*ph] (%u bytes)\n"
+                               /*
+                                * Tuples in this range are reserved for
+                                * vendors, so don't warn about them
+                                */
+                               if (tpl_code >= 0x80 && tpl_code <= 0x8f)
+                                       pr_debug_ratelimited(FMT("vendor"),
+                                               mmc_hostname(card->host),
+                                               tpl_code, tpl_link, this->data,
+                                               tpl_link);
+                               else
+                                       pr_warn_ratelimited(FMT("unknown"),
+                                               mmc_hostname(card->host),
+                                               tpl_code, tpl_link, this->data,
+                                               tpl_link);
                        }
 
                        /* keep on analyzing tuples */
index 60b58ee..d7fb084 100644 (file)
@@ -22,12 +22,15 @@ int cqhci_crypto_init(struct cqhci_host *host);
  */
 static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq)
 {
-       if (!mrq->crypto_enabled)
+       if (!mrq->crypto_ctx)
                return 0;
 
+       /* We set max_dun_bytes_supported=4, so all DUNs should be 32-bit. */
+       WARN_ON_ONCE(mrq->crypto_ctx->bc_dun[0] > U32_MAX);
+
        return CQHCI_CRYPTO_ENABLE_BIT |
               CQHCI_CRYPTO_KEYSLOT(mrq->crypto_key_slot) |
-              mrq->data_unit_num;
+              mrq->crypto_ctx->bc_dun[0];
 }
 
 #else /* CONFIG_MMC_CRYPTO */
index d333130..6578cc6 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/ioport.h>
+#include <linux/ktime.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/prandom.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
@@ -181,6 +183,9 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
                           &host->pending_events);
        debugfs_create_xul("completed_events", S_IRUSR, root,
                           &host->completed_events);
+#ifdef CONFIG_FAULT_INJECTION
+       fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
+#endif
 }
 #endif /* defined(CONFIG_DEBUG_FS) */
 
@@ -782,6 +787,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
        int ret = 0;
 
        /* Set external dma config: burst size, burst width */
+       memset(&cfg, 0, sizeof(cfg));
        cfg.dst_addr = host->phy_regs + fifo_offset;
        cfg.src_addr = cfg.dst_addr;
        cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1788,6 +1794,68 @@ static const struct mmc_host_ops dw_mci_ops = {
        .prepare_hs400_tuning   = dw_mci_prepare_hs400_tuning,
 };
 
+#ifdef CONFIG_FAULT_INJECTION
+static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
+{
+       struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
+       unsigned long flags;
+
+       spin_lock_irqsave(&host->irq_lock, flags);
+
+       if (!host->data_status)
+               host->data_status = SDMMC_INT_DCRC;
+       set_bit(EVENT_DATA_ERROR, &host->pending_events);
+       tasklet_schedule(&host->tasklet);
+
+       spin_unlock_irqrestore(&host->irq_lock, flags);
+
+       return HRTIMER_NORESTART;
+}
+
+static void dw_mci_start_fault_timer(struct dw_mci *host)
+{
+       struct mmc_data *data = host->data;
+
+       if (!data || data->blocks <= 1)
+               return;
+
+       if (!should_fail(&host->fail_data_crc, 1))
+               return;
+
+       /*
+        * Try to inject the error at random points during the data transfer.
+        */
+       hrtimer_start(&host->fault_timer,
+                     ms_to_ktime(prandom_u32() % 25),
+                     HRTIMER_MODE_REL);
+}
+
+static void dw_mci_stop_fault_timer(struct dw_mci *host)
+{
+       hrtimer_cancel(&host->fault_timer);
+}
+
+static void dw_mci_init_fault(struct dw_mci *host)
+{
+       host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
+
+       hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       host->fault_timer.function = dw_mci_fault_timer;
+}
+#else
+static void dw_mci_init_fault(struct dw_mci *host)
+{
+}
+
+static void dw_mci_start_fault_timer(struct dw_mci *host)
+{
+}
+
+static void dw_mci_stop_fault_timer(struct dw_mci *host)
+{
+}
+#endif
+
 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
        __releases(&host->lock)
        __acquires(&host->lock)
@@ -2018,8 +2086,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                                        continue;
                                }
 
-                               dw_mci_stop_dma(host);
                                send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_SENDING_STOP;
                                break;
                        }
@@ -2043,10 +2111,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                         */
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
-                               dw_mci_stop_dma(host);
                                if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_DATA_ERROR;
                                break;
                        }
@@ -2079,10 +2147,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                         */
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
-                               dw_mci_stop_dma(host);
                                if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_DATA_ERROR;
                                break;
                        }
@@ -2102,6 +2170,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                                break;
                        }
 
+                       dw_mci_stop_fault_timer(host);
                        host->data = NULL;
                        set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
                        err = dw_mci_data_complete(host, data);
@@ -2151,6 +2220,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                        if (mrq->cmd->error && mrq->data)
                                dw_mci_reset(host);
 
+                       dw_mci_stop_fault_timer(host);
                        host->cmd = NULL;
                        host->data = NULL;
 
@@ -2600,6 +2670,8 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
 
        set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
        tasklet_schedule(&host->tasklet);
+
+       dw_mci_start_fault_timer(host);
 }
 
 static void dw_mci_handle_cd(struct dw_mci *host)
@@ -3223,6 +3295,8 @@ int dw_mci_probe(struct dw_mci *host)
        spin_lock_init(&host->irq_lock);
        INIT_LIST_HEAD(&host->queue);
 
+       dw_mci_init_fault(host);
+
        /*
         * Get the host data width - this assumes that HCON has been set with
         * the correct values.
index da5923a..ce05d81 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/mmc/core.h>
 #include <linux/dmaengine.h>
 #include <linux/reset.h>
+#include <linux/fault-inject.h>
+#include <linux/hrtimer.h>
 #include <linux/interrupt.h>
 
 enum dw_mci_state {
@@ -230,6 +232,11 @@ struct dw_mci {
        struct timer_list       cmd11_timer;
        struct timer_list       cto_timer;
        struct timer_list       dto_timer;
+
+#ifdef CONFIG_FAULT_INJECTION
+       struct fault_attr       fail_data_crc;
+       struct hrtimer          fault_timer;
+#endif
 };
 
 /* DMA ops for Internal/External DMAC interface */
index 65c65bb..a1bcde3 100644 (file)
@@ -180,7 +180,7 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
        u8 *cp = host->data->status;
        unsigned long start = jiffies;
 
-       while (1) {
+       do {
                int             status;
                unsigned        i;
 
@@ -193,16 +193,9 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
                                return cp[i];
                }
 
-               if (time_is_before_jiffies(start + timeout))
-                       break;
-
-               /* If we need long timeouts, we may release the CPU.
-                * We use jiffies here because we want to have a relation
-                * between elapsed time and the blocking of the scheduler.
-                */
-               if (time_is_before_jiffies(start + 1))
-                       schedule();
-       }
+               /* If we need long timeouts, we may release the CPU */
+               cond_resched();
+       } while (time_is_after_jiffies(start + timeout));
        return -ETIMEDOUT;
 }
 
index 984d350..3765e2f 100644 (file)
@@ -2126,6 +2126,9 @@ static int mmci_probe(struct amba_device *dev,
                ret = PTR_ERR(host->rst);
                goto clk_disable;
        }
+       ret = reset_control_deassert(host->rst);
+       if (ret)
+               dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
 
        /* Get regulators and the supported OCR mask */
        ret = mmc_regulator_get_supply(mmc);
index 51db30a..fdaa11f 100644 (file)
@@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
        u32 status;
        int ret = 0;
 
-       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
-               spin_lock_irqsave(&host->lock, flags);
+       spin_lock_irqsave(&host->lock, flags);
+       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
+           host->pwr_reg & MCI_STM32_VSWITCHEN) {
                mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
                spin_unlock_irqrestore(&host->lock, flags);
 
@@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
 
                writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
                               host->base + MMCICLEAR);
+               spin_lock_irqsave(&host->lock, flags);
                mmci_write_pwrreg(host, host->pwr_reg &
                                  ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
        }
+       spin_unlock_irqrestore(&host->lock, flags);
 
        return ret;
 }
index bde2988..6c9d381 100644 (file)
@@ -628,6 +628,7 @@ static int moxart_probe(struct platform_device *pdev)
                         host->dma_chan_tx, host->dma_chan_rx);
                host->have_dma = true;
 
+               memset(&cfg, 0, sizeof(cfg));
                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
index 53eded8..0c45e82 100644 (file)
@@ -42,6 +42,11 @@ struct renesas_sdhi_quirks {
        const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
 };
 
+struct renesas_sdhi_of_data_with_quirks {
+       const struct renesas_sdhi_of_data *of_data;
+       const struct renesas_sdhi_quirks *quirks;
+};
+
 struct tmio_mmc_dma {
        enum dma_slave_buswidth dma_buswidth;
        bool (*filter)(struct dma_chan *chan, void *arg);
@@ -78,6 +83,8 @@ struct renesas_sdhi {
        container_of((host)->pdata, struct renesas_sdhi, mmc_data)
 
 int renesas_sdhi_probe(struct platform_device *pdev,
-                      const struct tmio_mmc_dma_ops *dma_ops);
+                      const struct tmio_mmc_dma_ops *dma_ops,
+                      const struct renesas_sdhi_of_data *of_data,
+                      const struct renesas_sdhi_quirks *quirks);
 int renesas_sdhi_remove(struct platform_device *pdev);
 #endif
index e49ca0f..6fc4cf3 100644 (file)
@@ -305,27 +305,6 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
 #define SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK     0x1f
 #define SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE         BIT(7)
 
-static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
-       { 3,  3,  3,  3,  3,  3,  3,  4,  4,  5,  6,  7,  8,  9, 10, 15,
-        16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
-       { 5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  6,  7,  8, 11,
-        12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
-};
-
-static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
-       { 1,  2,  6,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
-        17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
-       { 2,  3,  4,  4,  5,  6,  7,  9, 10, 11, 12, 13, 14, 15, 16, 17,
-        17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
-};
-
-static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
-       { 0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
-         0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0 },
-       { 0,  0,  0,  1,  2,  3,  3,  4,  4,  4,  5,  5,  6,  8,  9, 10,
-        11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
-};
-
 static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
                                struct renesas_sdhi *priv, int addr)
 {
@@ -895,69 +874,12 @@ static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
        renesas_sdhi_sdbuf_width(host, enable ? width : 16);
 }
 
-static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
-       .hs400_disabled = true,
-       .hs400_4taps = true,
-};
-
-static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
-       .hs400_4taps = true,
-       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
-};
-
-static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
-       .hs400_disabled = true,
-};
-
-static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
-       .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
-};
-
-static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
-       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
-};
-
-static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
-       .hs400_4taps = true,
-       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
-       .hs400_calib_table = r8a7796_es13_calib_table,
-};
-
-static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
-       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
-       .hs400_calib_table = r8a77965_calib_table,
-};
-
-static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
-       .hs400_calib_table = r8a77990_calib_table,
-};
-
-/*
- * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
- * So, we want to treat them equally and only have a match for ES1.2 to enforce
- * this if there ever will be a way to distinguish ES1.2.
- */
-static const struct soc_device_attribute sdhi_quirks_match[]  = {
-       { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
-       { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
-       { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
-       { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
-       { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
-       { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
-       { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
-       { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
-       { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
-       { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
-       { /* Sentinel. */ },
-};
-
 int renesas_sdhi_probe(struct platform_device *pdev,
-                      const struct tmio_mmc_dma_ops *dma_ops)
+                      const struct tmio_mmc_dma_ops *dma_ops,
+                      const struct renesas_sdhi_of_data *of_data,
+                      const struct renesas_sdhi_quirks *quirks)
 {
        struct tmio_mmc_data *mmd = pdev->dev.platform_data;
-       const struct renesas_sdhi_quirks *quirks = NULL;
-       const struct renesas_sdhi_of_data *of_data;
-       const struct soc_device_attribute *attr;
        struct tmio_mmc_data *mmc_data;
        struct tmio_mmc_dma *dma_priv;
        struct tmio_mmc_host *host;
@@ -966,12 +888,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        struct resource *res;
        u16 ver;
 
-       of_data = of_device_get_match_data(&pdev->dev);
-
-       attr = soc_device_match(sdhi_quirks_match);
-       if (attr)
-               quirks = attr->data;
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
                return -EINVAL;
index e8f4863..7660f7e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mmc/host.h>
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 #include <linux/sys_soc.h>
@@ -92,7 +93,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
        },
 };
 
-static const struct renesas_sdhi_of_data of_rza2_compatible = {
+static const struct renesas_sdhi_of_data of_data_rza2 = {
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
                          TMIO_MMC_HAVE_CBSY,
        .tmio_ocr_mask  = MMC_VDD_32_33,
@@ -107,7 +108,11 @@ static const struct renesas_sdhi_of_data of_rza2_compatible = {
        .max_segs       = 1,
 };
 
-static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
+static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = {
+       .of_data        = &of_data_rza2,
+};
+
+static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
                          TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
        .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
@@ -122,11 +127,116 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
        .max_segs       = 1,
 };
 
+static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+       { 3,  3,  3,  3,  3,  3,  3,  4,  4,  5,  6,  7,  8,  9, 10, 15,
+        16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
+       { 5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  6,  7,  8, 11,
+        12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
+};
+
+static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+       { 1,  2,  6,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
+        17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
+       { 2,  3,  4,  4,  5,  6,  7,  9, 10, 11, 12, 13, 14, 15, 16, 17,
+        17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
+};
+
+static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+       { 0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+         0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0 },
+       { 0,  0,  0,  1,  2,  3,  3,  4,  4,  4,  5,  5,  6,  8,  9, 10,
+        11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
+       .hs400_disabled = true,
+       .hs400_4taps = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
+       .hs400_4taps = true,
+       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
+       .hs400_disabled = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
+       .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
+       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
+       .hs400_4taps = true,
+       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+       .hs400_calib_table = r8a7796_es13_calib_table,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
+       .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+       .hs400_calib_table = r8a77965_calib_table,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
+       .hs400_calib_table = r8a77990_calib_table,
+};
+
+/*
+ * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
+ * So, we want to treat them equally and only have a match for ES1.2 to enforce
+ * this if there ever will be a way to distinguish ES1.2.
+ */
+static const struct soc_device_attribute sdhi_quirks_match[]  = {
+       { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
+       { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
+       { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
+       { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
+       { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
+       { /* Sentinel. */ },
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
+       .of_data = &of_data_rcar_gen3,
+       .quirks = &sdhi_quirks_bad_taps2367,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = {
+       .of_data = &of_data_rcar_gen3,
+       .quirks = &sdhi_quirks_bad_taps1357,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
+       .of_data = &of_data_rcar_gen3,
+       .quirks = &sdhi_quirks_r8a77965,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77980_compatible = {
+       .of_data = &of_data_rcar_gen3,
+       .quirks = &sdhi_quirks_nohs400,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
+       .of_data = &of_data_rcar_gen3,
+       .quirks = &sdhi_quirks_r8a77990,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
+       .of_data = &of_data_rcar_gen3,
+};
+
 static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
        { .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, },
        { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
-       { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
+       { .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, },
        { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+       { .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
+       { .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
+       { .compatible = "renesas,sdhi-r8a77980", .data = &of_r8a77980_compatible, },
+       { .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
        { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
        {},
 };
@@ -405,16 +515,27 @@ static const struct soc_device_attribute soc_dma_quirks[] = {
 
 static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
 {
-       const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
+       const struct soc_device_attribute *attr;
+       const struct renesas_sdhi_of_data_with_quirks *of_data_quirks;
+       const struct renesas_sdhi_quirks *quirks;
        struct device *dev = &pdev->dev;
 
-       if (soc)
-               global_flags |= (unsigned long)soc->data;
+       of_data_quirks = of_device_get_match_data(&pdev->dev);
+       quirks = of_data_quirks->quirks;
+
+       attr = soc_device_match(soc_dma_quirks);
+       if (attr)
+               global_flags |= (unsigned long)attr->data;
+
+       attr = soc_device_match(sdhi_quirks_match);
+       if (attr)
+               quirks = attr->data;
 
        /* value is max of SD_SECCNT. Confirmed by HW engineers */
        dma_set_max_seg_size(dev, 0xffffffff);
 
-       return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
+       return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops,
+                                 of_data_quirks->of_data, quirks);
 }
 
 static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {
index ffa6421..99e3426 100644 (file)
@@ -108,9 +108,9 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
        renesas_sdhi_sys_dmac_enable_dma(host, false);
 
        if (host->chan_rx)
-               dmaengine_terminate_all(host->chan_rx);
+               dmaengine_terminate_sync(host->chan_rx);
        if (host->chan_tx)
-               dmaengine_terminate_all(host->chan_tx);
+               dmaengine_terminate_sync(host->chan_tx);
 
        renesas_sdhi_sys_dmac_enable_dma(host, true);
 }
@@ -451,7 +451,8 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
 
 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
 {
-       return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
+       return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops,
+                                 of_device_get_match_data(&pdev->dev), NULL);
 }
 
 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
index 4ca9374..58cfaff 100644 (file)
@@ -542,9 +542,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
        return 0;
 }
 
+static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+       rtsx_pci_write_register(host->pcr, SD_CFG1,
+                       SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
+}
+
+static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+       rtsx_pci_write_register(host->pcr, SD_CFG1,
+                       SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+}
+
 static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
 {
        struct mmc_data *data = mrq->data;
+       int err;
 
        if (host->sg_count < 0) {
                data->error = host->sg_count;
@@ -553,22 +566,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
                return data->error;
        }
 
-       if (data->flags & MMC_DATA_READ)
-               return sd_read_long_data(host, mrq);
+       if (data->flags & MMC_DATA_READ) {
+               if (host->initial_mode)
+                       sd_disable_initial_mode(host);
 
-       return sd_write_long_data(host, mrq);
-}
+               err = sd_read_long_data(host, mrq);
 
-static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
-{
-       rtsx_pci_write_register(host->pcr, SD_CFG1,
-                       SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
-}
+               if (host->initial_mode)
+                       sd_enable_initial_mode(host);
 
-static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
-{
-       rtsx_pci_write_register(host->pcr, SD_CFG1,
-                       SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+               return err;
+       }
+
+       return sd_write_long_data(host, mrq);
 }
 
 static void sd_normal_rw(struct realtek_pci_sdmmc *host,
index 72c0bf0..f18d169 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/mmc-esdhc-imx.h>
 #include <linux/pm_runtime.h>
 #include "sdhci-pltfm.h"
 #include "sdhci-esdhc.h"
 
 #define ESDHC_VEND_SPEC2               0xc8
 #define ESDHC_VEND_SPEC2_EN_BUSY_IRQ   (1 << 8)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN     (1 << 4)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN     (0 << 4)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN     (2 << 4)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN      (1 << 6)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK   (7 << 4)
 
 #define ESDHC_TUNING_CTRL              0xcc
 #define ESDHC_STD_TUNING_EN            (1 << 24)
 #define ESDHC_CTRL_4BITBUS             (0x1 << 1)
 #define ESDHC_CTRL_8BITBUS             (0x2 << 1)
 #define ESDHC_CTRL_BUSWIDTH_MASK       (0x3 << 1)
+#define USDHC_GET_BUSWIDTH(c) (c & ESDHC_CTRL_BUSWIDTH_MASK)
 
 /*
  * There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC:
  */
 #define ESDHC_FLAG_BROKEN_AUTO_CMD23   BIT(16)
 
+enum wp_types {
+       ESDHC_WP_NONE,          /* no WP, neither controller nor gpio */
+       ESDHC_WP_CONTROLLER,    /* mmc controller internal WP */
+       ESDHC_WP_GPIO,          /* external gpio pin for WP */
+};
+
+enum cd_types {
+       ESDHC_CD_NONE,          /* no CD, neither controller nor gpio */
+       ESDHC_CD_CONTROLLER,    /* mmc controller internal CD */
+       ESDHC_CD_GPIO,          /* external gpio pin for CD */
+       ESDHC_CD_PERMANENT,     /* no CD, card permanently wired to host */
+};
+
+/*
+ * struct esdhc_platform_data - platform data for esdhc on i.MX
+ *
+ * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
+ *
+ * @wp_type:   type of write_protect method (see wp_types enum above)
+ * @cd_type:   type of card_detect method (see cd_types enum above)
+ */
+
+struct esdhc_platform_data {
+       enum wp_types wp_type;
+       enum cd_types cd_type;
+       int max_bus_width;
+       unsigned int delay_line;
+       unsigned int tuning_step;       /* The delay cell steps in tuning procedure */
+       unsigned int tuning_start_tap;  /* The start delay cell point in tuning procedure */
+       unsigned int strobe_dll_delay_target;   /* The delay cell for strobe pad (read clock) */
+};
+
 struct esdhc_soc_data {
        u32 flags;
 };
@@ -376,6 +413,30 @@ static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host)
                dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__);
 }
 
+/* Enable the auto tuning circuit to check the CMD line and BUS line */
+static inline void usdhc_auto_tuning_mode_sel(struct sdhci_host *host)
+{
+       u32 buswidth, auto_tune_buswidth;
+
+       buswidth = USDHC_GET_BUSWIDTH(readl(host->ioaddr + SDHCI_HOST_CONTROL));
+
+       switch (buswidth) {
+       case ESDHC_CTRL_8BITBUS:
+               auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN;
+               break;
+       case ESDHC_CTRL_4BITBUS:
+               auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN;
+               break;
+       default:        /* 1BITBUS */
+               auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN;
+               break;
+       }
+
+       esdhc_clrset_le(host, ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK,
+                       auto_tune_buswidth | ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN,
+                       ESDHC_VEND_SPEC2);
+}
+
 static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -597,17 +658,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
                else
                        new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
                writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
-               if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
-                       new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
-                       if (val & SDHCI_CTRL_TUNED_CLK) {
-                               new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
-                               new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
-                       } else {
-                               new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
-                               new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
-                       }
-                       writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
-               } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+               if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
                        u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
                        u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
                        if (val & SDHCI_CTRL_TUNED_CLK) {
@@ -622,6 +673,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
                                v |= ESDHC_MIX_CTRL_EXE_TUNE;
                                m |= ESDHC_MIX_CTRL_FBCLK_SEL;
                                m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
+                               usdhc_auto_tuning_mode_sel(host);
                        } else {
                                v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
                        }
@@ -991,6 +1043,8 @@ static void esdhc_post_tuning(struct sdhci_host *host)
 {
        u32 reg;
 
+       usdhc_auto_tuning_mode_sel(host);
+
        reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
        reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
        reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
index cce390f..032bf85 100644 (file)
@@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
                return pltfm_host->clock;
 }
 
+/*
+ * There is a known bug on BCM2711's SDHCI core integration where the
+ * controller will hang when the difference between the core clock and the bus
+ * clock is too great. Specifically this can be reproduced under the following
+ * conditions:
+ *
+ *  - No SD card plugged in, polling thread is running, probing cards at
+ *    100 kHz.
+ *  - BCM2711's core clock configured at 500MHz or more
+ *
+ * So we set 200kHz as the minimum clock frequency available for that SoC.
+ */
+static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
+{
+       return 200000;
+}
+
 static const struct sdhci_ops sdhci_iproc_ops = {
        .set_clock = sdhci_set_clock,
        .get_max_clock = sdhci_iproc_get_max_clock,
@@ -271,6 +288,7 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
        .set_clock = sdhci_set_clock,
        .set_power = sdhci_set_power_and_bus_voltage,
        .get_max_clock = sdhci_iproc_get_max_clock,
+       .get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
        .set_bus_width = sdhci_set_bus_width,
        .reset = sdhci_reset,
        .set_uhs_signaling = sdhci_set_uhs_signaling,
index e44b7a6..50c71e0 100644 (file)
@@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
        sdhci_cqe_disable(mmc, recovery);
 }
 
+static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+       u32 count, start = 15;
+
+       __sdhci_set_timeout(host, cmd);
+       count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
+       /*
+        * Update software timeout value if its value is less than hardware data
+        * timeout value. Qcom SoC hardware data timeout value was calculated
+        * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
+        */
+       if (cmd && cmd->data && host->clock > 400000 &&
+           host->clock <= 50000000 &&
+           ((1 << (count + start)) > (10 * host->clock)))
+               host->data_timeout = 22LL * NSEC_PER_SEC;
+}
+
 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
        .enable         = sdhci_msm_cqe_enable,
        .disable        = sdhci_msm_cqe_disable,
@@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
        .irq    = sdhci_msm_cqe_irq,
        .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
        .set_power = sdhci_set_power_noreg,
+       .set_timeout = sdhci_msm_set_timeout,
 };
 
 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -2696,6 +2714,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 
        msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
 
+       /* Set the timeout value to max possible */
+       host->max_timeout_count = 0xF;
+
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
index 0e7c07e..737e2bf 100644 (file)
@@ -159,6 +159,12 @@ struct sdhci_arasan_data {
 /* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
  * internal clock even when the clock isn't stable */
 #define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
+/*
+ * Some of the Arasan variations might not have timing requirements
+ * met at 25MHz for Default Speed mode, those controllers work at
+ * 19MHz instead
+ */
+#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
 };
 
 struct sdhci_arasan_of_data {
@@ -267,7 +273,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
                         * through low speeds without power cycling.
                         */
                        sdhci_set_clock(host, host->max_clk);
-                       phy_power_on(sdhci_arasan->phy);
+                       if (phy_power_on(sdhci_arasan->phy)) {
+                               pr_err("%s: Cannot power on phy.\n",
+                                      mmc_hostname(host->mmc));
+                               return;
+                       }
+
                        sdhci_arasan->is_phy_on = true;
 
                        /*
@@ -290,6 +301,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
                sdhci_arasan->is_phy_on = false;
        }
 
+       if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) {
+               /*
+                * Some of the Arasan variations might not have timing
+                * requirements met at 25MHz for Default Speed mode,
+                * those controllers work at 19MHz instead.
+                */
+               if (clock == DEFAULT_SPEED_MAX_DTR)
+                       clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25;
+       }
+
        /* Set the Input and Output Clock Phase Delays */
        if (clk_data->set_clk_delays)
                clk_data->set_clk_delays(host);
@@ -307,7 +328,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
                msleep(20);
 
        if (ctrl_phy) {
-               phy_power_on(sdhci_arasan->phy);
+               if (phy_power_on(sdhci_arasan->phy)) {
+                       pr_err("%s: Cannot power on phy.\n",
+                              mmc_hostname(host->mmc));
+                       return;
+               }
+
                sdhci_arasan->is_phy_on = true;
        }
 }
@@ -463,7 +489,9 @@ static int sdhci_arasan_suspend(struct device *dev)
                ret = phy_power_off(sdhci_arasan->phy);
                if (ret) {
                        dev_err(dev, "Cannot power off phy.\n");
-                       sdhci_resume_host(host);
+                       if (sdhci_resume_host(host))
+                               dev_err(dev, "Cannot resume host.\n");
+
                        return ret;
                }
                sdhci_arasan->is_phy_on = false;
@@ -878,6 +906,10 @@ static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
                                                           NODE_SD_1;
        int err;
 
+       /* ZynqMP SD controller does not perform auto tuning in DDR50 mode */
+       if (mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+               return 0;
+
        arasan_zynqmp_dll_reset(host, device_id);
 
        err = sdhci_execute_tuning(mmc, opcode);
@@ -952,7 +984,7 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
        struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
        const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
                sdhci_arasan->soc_ctl_map;
-       u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000);
+       u32 mhz = DIV_ROUND_CLOSEST_ULL(clk_get_rate(pltfm_host->clk), 1000000);
 
        /* Having a map is optional */
        if (!soc_ctl_map)
@@ -986,14 +1018,16 @@ static void arasan_dt_read_clk_phase(struct device *dev,
 {
        struct device_node *np = dev->of_node;
 
-       int clk_phase[2] = {0};
+       u32 clk_phase[2] = {0};
+       int ret;
 
        /*
         * Read Tap Delay values from DT, if the DT does not contain the
         * Tap Values then use the pre-defined values.
         */
-       if (of_property_read_variable_u32_array(np, prop, &clk_phase[0],
-                                               2, 0)) {
+       ret = of_property_read_variable_u32_array(np, prop, &clk_phase[0],
+                                                 2, 0);
+       if (ret < 0) {
                dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n",
                        prop, clk_data->clk_phase_in[timing],
                        clk_data->clk_phase_out[timing]);
@@ -1608,6 +1642,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
        if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
                host->mmc_host_ops.execute_tuning =
                        arasan_zynqmp_execute_tuning;
+
+               sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN;
+               host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
        }
 
        arasan_dt_parse_clk_phases(dev, &sdhci_arasan->clk_data);
index 387ce9c..a500187 100644 (file)
  */
 #define NVQUIRK_HAS_TMCLK                              BIT(10)
 
+#define NVQUIRK_HAS_ANDROID_GPT_SECTOR                 BIT(11)
+
 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 #define SDHCI_TEGRA_CQE_BASE_ADDR                      0xF000
 
@@ -1361,6 +1363,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
        .pdata = &sdhci_tegra20_pdata,
        .dma_mask = DMA_BIT_MASK(32),
        .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
+                   NVQUIRK_HAS_ANDROID_GPT_SECTOR |
                    NVQUIRK_ENABLE_BLOCK_GAP_DET,
 };
 
@@ -1390,6 +1393,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
        .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
                    NVQUIRK_ENABLE_SDR50 |
                    NVQUIRK_ENABLE_SDR104 |
+                   NVQUIRK_HAS_ANDROID_GPT_SECTOR |
                    NVQUIRK_HAS_PADCALIB,
 };
 
@@ -1422,6 +1426,7 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
        .pdata = &sdhci_tegra114_pdata,
        .dma_mask = DMA_BIT_MASK(32),
+       .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
 };
 
 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
@@ -1438,6 +1443,7 @@ static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
        .pdata = &sdhci_tegra124_pdata,
        .dma_mask = DMA_BIT_MASK(34),
+       .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
 };
 
 static const struct sdhci_ops tegra210_sdhci_ops = {
@@ -1616,6 +1622,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
        tegra_host->pad_control_available = false;
        tegra_host->soc_data = soc_data;
 
+       if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
+               host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
+
        if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
                rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
                if (rc == 0)
index aba6e10..8eefa7d 100644 (file)
@@ -934,21 +934,21 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
 
        /*
         * If the host controller provides us with an incorrect timeout
-        * value, just skip the check and use 0xE.  The hardware may take
+        * value, just skip the check and use the maximum. The hardware may take
         * longer to time out, but that's much better than having a too-short
         * timeout value.
         */
        if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
-               return 0xE;
+               return host->max_timeout_count;
 
        /* Unspecified command, asume max */
        if (cmd == NULL)
-               return 0xE;
+               return host->max_timeout_count;
 
        data = cmd->data;
        /* Unspecified timeout, assume max */
        if (!data && !cmd->busy_timeout)
-               return 0xE;
+               return host->max_timeout_count;
 
        /* timeout in us */
        target_timeout = sdhci_target_timeout(host, cmd, data);
@@ -968,15 +968,15 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
        while (current_timeout < target_timeout) {
                count++;
                current_timeout <<= 1;
-               if (count >= 0xF)
+               if (count > host->max_timeout_count)
                        break;
        }
 
-       if (count >= 0xF) {
+       if (count > host->max_timeout_count) {
                if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
                        DBG("Too large timeout 0x%x requested for CMD%d!\n",
                            count, cmd->opcode);
-               count = 0xE;
+               count = host->max_timeout_count;
        } else {
                *too_big = false;
        }
@@ -1222,6 +1222,7 @@ static int sdhci_external_dma_setup(struct sdhci_host *host,
        if (!host->mapbase)
                return -EINVAL;
 
+       memset(&cfg, 0, sizeof(cfg));
        cfg.src_addr = host->mapbase + SDHCI_BUFFER;
        cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
        cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -3278,8 +3279,14 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 {
        u32 command;
 
-       /* CMD19 generates _only_ Buffer Read Ready interrupt */
-       if (intmask & SDHCI_INT_DATA_AVAIL) {
+       /*
+        * CMD19 generates _only_ Buffer Read Ready interrupt if
+        * use sdhci_send_tuning.
+        * Need to exclude this case: PIO mode and use mmc_send_tuning,
+        * If not, sdhci_transfer_pio will never be called, make the
+        * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
+        */
+       if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
                command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
                if (command == MMC_SEND_TUNING_BLOCK ||
                    command == MMC_SEND_TUNING_BLOCK_HS200) {
@@ -3940,6 +3947,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
         */
        host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
 
+       host->max_timeout_count = 0xE;
+
        return host;
 }
 
index 074dc18..e8d04e4 100644 (file)
@@ -517,6 +517,7 @@ struct sdhci_host {
 
        unsigned int max_clk;   /* Max possible freq (MHz) */
        unsigned int timeout_clk;       /* Timeout freq (KHz) */
+       u8 max_timeout_count;   /* Vendor specific max timeout count */
        unsigned int clk_mul;   /* Clock Muliplier value */
 
        unsigned int clock;     /* Current clock (MHz) */
index e5e4570..bcc595c 100644 (file)
@@ -1164,9 +1164,9 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
                data->bytes_xfered = 0;
                /* Abort DMA */
                if (data->flags & MMC_DATA_READ)
-                       dmaengine_terminate_all(host->chan_rx);
+                       dmaengine_terminate_sync(host->chan_rx);
                else
-                       dmaengine_terminate_all(host->chan_tx);
+                       dmaengine_terminate_sync(host->chan_tx);
        }
 
        return false;
index 9fdf7ea..6391707 100644 (file)
@@ -669,8 +669,8 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
                        if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
                                            r_data->flags & MMC_DATA_WRITE
-                                           ? PCI_DMA_TODEVICE
-                                           : PCI_DMA_FROMDEVICE)) {
+                                           ? DMA_TO_DEVICE
+                                           : DMA_FROM_DEVICE)) {
                                pr_err("%s : scatterlist map failed\n",
                                       dev_name(&sock->dev));
                                mrq->cmd->error = -ENOMEM;
@@ -680,15 +680,15 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
                                                   r_data->sg_len,
                                                   r_data->flags
                                                   & MMC_DATA_WRITE
-                                                  ? PCI_DMA_TODEVICE
-                                                  : PCI_DMA_FROMDEVICE);
+                                                  ? DMA_TO_DEVICE
+                                                  : DMA_FROM_DEVICE);
                        if (host->sg_len < 1) {
                                pr_err("%s : scatterlist map failed\n",
                                       dev_name(&sock->dev));
                                tifm_unmap_sg(sock, &host->bounce_buf, 1,
                                              r_data->flags & MMC_DATA_WRITE
-                                             ? PCI_DMA_TODEVICE
-                                             : PCI_DMA_FROMDEVICE);
+                                             ? DMA_TO_DEVICE
+                                             : DMA_FROM_DEVICE);
                                mrq->cmd->error = -ENOMEM;
                                goto err_out;
                        }
@@ -762,10 +762,10 @@ static void tifm_sd_end_cmd(struct tasklet_struct *t)
                } else {
                        tifm_unmap_sg(sock, &host->bounce_buf, 1,
                                      (r_data->flags & MMC_DATA_WRITE)
-                                     ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+                                     ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                        tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
                                      (r_data->flags & MMC_DATA_WRITE)
-                                     ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+                                     ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                }
 
                r_data->bytes_xfered = r_data->blocks
index b9b79b1..99515be 100644 (file)
@@ -631,9 +631,9 @@ static void usdhi6_dma_kill(struct usdhi6_host *host)
                __func__, data->sg_len, data->blocks, data->blksz);
        /* Abort DMA */
        if (data->flags & MMC_DATA_READ)
-               dmaengine_terminate_all(host->chan_rx);
+               dmaengine_terminate_sync(host->chan_rx);
        else
-               dmaengine_terminate_all(host->chan_tx);
+               dmaengine_terminate_sync(host->chan_tx);
 }
 
 static void usdhi6_dma_check_error(struct usdhi6_host *host)
@@ -1186,6 +1186,15 @@ static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
        return ret;
 }
 
+static int usdhi6_card_busy(struct mmc_host *mmc)
+{
+       struct usdhi6_host *host = mmc_priv(mmc);
+       u32 tmp = usdhi6_read(host, USDHI6_SD_INFO2);
+
+       /* Card is busy if it is pulling dat[0] low */
+       return !(tmp & USDHI6_SD_INFO2_SDDAT0);
+}
+
 static const struct mmc_host_ops usdhi6_ops = {
        .request        = usdhi6_request,
        .set_ios        = usdhi6_set_ios,
@@ -1193,6 +1202,7 @@ static const struct mmc_host_ops usdhi6_ops = {
        .get_ro         = usdhi6_get_ro,
        .enable_sdio_irq = usdhi6_enable_sdio_irq,
        .start_signal_voltage_switch = usdhi6_sig_volt_switch,
+       .card_busy = usdhi6_card_busy,
 };
 
 /*                     State machine handlers                          */
index c32df55..88662a9 100644 (file)
@@ -491,7 +491,7 @@ static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
 
        count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
                ((data->flags & MMC_DATA_READ) ?
-               PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+               DMA_FROM_DEVICE : DMA_TO_DEVICE));
        BUG_ON(count != 1);
 
        via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
@@ -638,7 +638,7 @@ static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
 
        dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
                ((data->flags & MMC_DATA_READ) ?
-               PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+               DMA_FROM_DEVICE : DMA_TO_DEVICE));
 
        if (data->stop)
                via_sdc_send_command(host, data->stop);
index 3097e93..a761134 100644 (file)
@@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
        u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
 
-       return extp->MinorVersion >= '5' &&
+       return extp && extp->MinorVersion >= '5' &&
                (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
 }
 
index efc2003..99400d0 100644 (file)
@@ -229,7 +229,7 @@ static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
                woff += ws;
        }
 
-       return ret;
+       return 0;
 }
 
 static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
@@ -255,6 +255,7 @@ static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
        if (!ret)
                *retlen += len;
 
+       kfree(cmd);
        return ret;
 
 fail:
@@ -286,7 +287,7 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
                woff += ws;
        }
 
-       return ret;
+       return 0;
 };
 
 static const struct mchp48_caps mchp48l640_caps = {
index 6ce4bc5..44bea3f 100644 (file)
@@ -419,6 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
        if (tr->discard) {
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
                blk_queue_max_discard_sectors(new->rq, UINT_MAX);
+               new->rq->limits.discard_granularity = tr->blksize;
        }
 
        gd->queue = new->rq;
@@ -525,14 +526,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
        if (!blktrans_notifier.list.next)
                register_mtd_user(&blktrans_notifier);
 
-
-       mutex_lock(&mtd_table_mutex);
-
        ret = register_blkdev(tr->major, tr->name);
        if (ret < 0) {
                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
                       tr->name, tr->major, ret);
-               mutex_unlock(&mtd_table_mutex);
                return ret;
        }
 
@@ -542,12 +539,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
        tr->blkshift = ffs(tr->blksize) - 1;
 
        INIT_LIST_HEAD(&tr->devs);
-       list_add(&tr->list, &blktrans_majors);
 
+       mutex_lock(&mtd_table_mutex);
+       list_add(&tr->list, &blktrans_majors);
        mtd_for_each_device(mtd)
                if (mtd->type != MTD_ABSENT)
                        tr->add_mtd(tr, mtd);
-
        mutex_unlock(&mtd_table_mutex);
        return 0;
 }
@@ -564,8 +561,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
        list_for_each_entry_safe(dev, next, &tr->devs, list)
                tr->remove_dev(dev);
 
-       unregister_blkdev(tr->major, tr->name);
        mutex_unlock(&mtd_table_mutex);
+       unregister_blkdev(tr->major, tr->name);
 
        BUG_ON(!list_empty(&tr->devs));
        return 0;
index b5ccd30..c8fd7f7 100644 (file)
@@ -806,7 +806,9 @@ static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
 
 err:
        kfree(info);
-       return ret;
+
+       /* ENODATA means there is no OTP region. */
+       return ret == -ENODATA ? 0 : ret;
 }
 
 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
index 57a5831..3d6c6e8 100644 (file)
@@ -5228,12 +5228,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
 static int of_get_nand_secure_regions(struct nand_chip *chip)
 {
        struct device_node *dn = nand_get_flash_node(chip);
+       struct property *prop;
        int nr_elem, i, j;
 
-       nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
-       if (!nr_elem)
+       /* Only proceed if the "secure-regions" property is present in DT */
+       prop = of_find_property(dn, "secure-regions", NULL);
+       if (!prop)
                return 0;
 
+       nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
+       if (nr_elem <= 0)
+               return nr_elem;
+
        chip->nr_secure_regions = nr_elem / 2;
        chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
                                       GFP_KERNEL);
index a7ee0af..54e321a 100644 (file)
@@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                family = AF_INET6;
 
        if (bareudp->ethertype == htons(ETH_P_IP)) {
-               struct iphdr *iphdr;
+               __u8 ipversion;
 
-               iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
-               if (iphdr->version == 4) {
-                       proto = bareudp->ethertype;
-               } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
+               if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+                                 sizeof(ipversion))) {
+                       bareudp->dev->stats.rx_dropped++;
+                       goto drop;
+               }
+               ipversion >>= 4;
+
+               if (ipversion == 4) {
+                       proto = htons(ETH_P_IP);
+               } else if (ipversion == 6 && bareudp->multi_proto_mode) {
                        proto = htons(ETH_P_IPV6);
                } else {
                        bareudp->dev->stats.rx_dropped++;
index bba2a44..43bca31 100644 (file)
@@ -1164,10 +1164,10 @@ static int m_can_set_bittiming(struct net_device *dev)
                                    FIELD_PREP(TDCR_TDCO_MASK, tdco));
                }
 
-               reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
-                         FIELD_PREP(NBTP_NSJW_MASK, sjw) |
-                         FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
-                         FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
+               reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
+                       FIELD_PREP(DBTP_DSJW_MASK, sjw) |
+                       FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
+                       FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
 
                m_can_write(cdev, M_CAN_DBTP, reg_btp);
        }
index 66fa8b0..95ae740 100644 (file)
@@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
        if (id == ESD_EV_CAN_ERROR_EXT) {
                u8 state = msg->msg.rx.data[0];
                u8 ecc = msg->msg.rx.data[1];
-               u8 txerr = msg->msg.rx.data[2];
-               u8 rxerr = msg->msg.rx.data[3];
+               u8 rxerr = msg->msg.rx.data[2];
+               u8 txerr = msg->msg.rx.data[3];
 
                skb = alloc_can_err_skb(priv->netdev, &cf);
                if (skb == NULL) {
index 9fdcc4b..7062db6 100644 (file)
@@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
 {
        struct hellcreek *hellcreek = ds->priv;
        u16 entries;
+       int ret = 0;
        size_t i;
 
        mutex_lock(&hellcreek->reg_lock);
@@ -943,12 +944,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
                if (!(entry.portmask & BIT(port)))
                        continue;
 
-               cb(entry.mac, 0, entry.is_static, data);
+               ret = cb(entry.mac, 0, entry.is_static, data);
+               if (ret)
+                       break;
        }
 
        mutex_unlock(&hellcreek->reg_lock);
 
-       return 0;
+       return ret;
 }
 
 static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
@@ -1469,9 +1472,6 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
                u16 data;
                u8 gates;
 
-               cur++;
-               next++;
-
                if (i == schedule->num_entries)
                        gates = initial->gate_mask ^
                                cur->gate_mask;
@@ -1500,6 +1500,9 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
                        (initial->gate_mask <<
                         TR_GCLCMD_INIT_GATE_STATES_SHIFT);
                hellcreek_write(hellcreek, data, TR_GCLCMD);
+
+               cur++;
+               next++;
        }
 }
 
@@ -1547,7 +1550,7 @@ static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
        /* Calculate difference to admin base time */
        base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
 
-       return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC;
+       return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC;
 }
 
 static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
index 3443740..d7ce281 100644 (file)
@@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
        return 0;
 }
 
-typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
-                          int portmap, void *ctx);
+typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
+                         int portmap, void *ctx);
 
-static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
+static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
 {
-       int i;
+       int ret = 0, i;
 
        mutex_lock(&chip->alr_mutex);
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
@@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
                                                LAN9303_ALR_DAT1_PORT_BITOFFS;
                portmap = alrport_2_portmap[alrport];
 
-               cb(chip, dat0, dat1, portmap, ctx);
+               ret = cb(chip, dat0, dat1, portmap, ctx);
+               if (ret)
+                       break;
 
                lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
                                         LAN9303_ALR_CMD_GET_NEXT);
                lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
        }
        mutex_unlock(&chip->alr_mutex);
+
+       return ret;
 }
 
 static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -606,18 +610,20 @@ struct del_port_learned_ctx {
 };
 
 /* Clear learned (non-static) entry on given port */
-static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
-                                        u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
+                                       u32 dat1, int portmap, void *ctx)
 {
        struct del_port_learned_ctx *del_ctx = ctx;
        int port = del_ctx->port;
 
        if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
-               return;
+               return 0;
 
        /* learned entries has only one port, we can just delete */
        dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
        lan9303_alr_make_entry_raw(chip, dat0, dat1);
+
+       return 0;
 }
 
 struct port_fdb_dump_ctx {
@@ -626,19 +632,19 @@ struct port_fdb_dump_ctx {
        dsa_fdb_dump_cb_t *cb;
 };
 
-static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
-                                     u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
+                                    u32 dat1, int portmap, void *ctx)
 {
        struct port_fdb_dump_ctx *dump_ctx = ctx;
        u8 mac[ETH_ALEN];
        bool is_static;
 
        if ((BIT(dump_ctx->port) & portmap) == 0)
-               return;
+               return 0;
 
        alr_reg_to_mac(dat0, dat1, mac);
        is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
-       dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
+       return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
 }
 
 /* Set a static ALR entry. Delete entry if port_map is zero */
@@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
        };
 
        dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
-       lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
-
-       return 0;
+       return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
 }
 
 static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
index 314ae78..e78026e 100644 (file)
@@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
                addr[1] = mac_bridge.key[2] & 0xff;
                addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
                if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
-                       if (mac_bridge.val[0] & BIT(port))
-                               cb(addr, 0, true, data);
+                       if (mac_bridge.val[0] & BIT(port)) {
+                               err = cb(addr, 0, true, data);
+                               if (err)
+                                       return err;
+                       }
                } else {
-                       if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
-                               cb(addr, 0, false, data);
+                       if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
+                               err = cb(addr, 0, false, data);
+                               if (err)
+                                       return err;
+                       }
                }
        }
        return 0;
index 560f684..c5142f8 100644 (file)
@@ -687,8 +687,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
        shifts = ksz8->shifts;
 
        ksz8_r_table(dev, TABLE_VLAN, addr, &data);
-       addr *= dev->phy_port_cnt;
-       for (i = 0; i < dev->phy_port_cnt; i++) {
+       addr *= 4;
+       for (i = 0; i < 4; i++) {
                dev->vlan_cache[addr + i].table[0] = (u16)data;
                data >>= shifts[VLAN_TABLE];
        }
@@ -702,7 +702,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
        u64 buf;
 
        data = (u16 *)&buf;
-       addr = vid / dev->phy_port_cnt;
+       addr = vid / 4;
        index = vid & 3;
        ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
        *vlan = data[index];
@@ -716,7 +716,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
        u64 buf;
 
        data = (u16 *)&buf;
-       addr = vid / dev->phy_port_cnt;
+       addr = vid / 4;
        index = vid & 3;
        ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
        data[index] = vlan;
@@ -1119,24 +1119,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
        if (ksz_is_ksz88x3(dev))
                return -ENOTSUPP;
 
+       /* Discard packets with VID not enabled on the switch */
        ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
 
+       /* Discard packets with VID not enabled on the ingress port */
+       for (port = 0; port < dev->phy_port_cnt; ++port)
+               ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
+                            flag);
+
        return 0;
 }
 
+static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
+{
+       if (ksz_is_ksz88x3(dev)) {
+               ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
+                       0x03 << (4 - 2 * port), state);
+       } else {
+               ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
+       }
+}
+
 static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
                              const struct switchdev_obj_port_vlan *vlan,
                              struct netlink_ext_ack *extack)
 {
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        struct ksz_device *dev = ds->priv;
+       struct ksz_port *p = &dev->ports[port];
        u16 data, new_pvid = 0;
        u8 fid, member, valid;
 
        if (ksz_is_ksz88x3(dev))
                return -ENOTSUPP;
 
-       ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+       /* If a VLAN is added with untagged flag different from the
+        * port's Remove Tag flag, we need to change the latter.
+        * Ignore VID 0, which is always untagged.
+        * Ignore CPU port, which will always be tagged.
+        */
+       if (untagged != p->remove_tag && vlan->vid != 0 &&
+           port != dev->cpu_port) {
+               unsigned int vid;
+
+               /* Reject attempts to add a VLAN that requires the
+                * Remove Tag flag to be changed, unless there are no
+                * other VLANs currently configured.
+                */
+               for (vid = 1; vid < dev->num_vlans; ++vid) {
+                       /* Skip the VID we are going to add or reconfigure */
+                       if (vid == vlan->vid)
+                               continue;
+
+                       ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
+                                      &fid, &member, &valid);
+                       if (valid && (member & BIT(port)))
+                               return -EINVAL;
+               }
+
+               ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+               p->remove_tag = untagged;
+       }
 
        ksz8_r_vlan_table(dev, vlan->vid, &data);
        ksz8_from_vlan(dev, data, &fid, &member, &valid);
@@ -1160,9 +1203,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
                u16 vid;
 
                ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
-               vid &= 0xfff;
+               vid &= ~VLAN_VID_MASK;
                vid |= new_pvid;
                ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+
+               ksz8_port_enable_pvid(dev, port, true);
        }
 
        return 0;
@@ -1171,9 +1216,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
 static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
                              const struct switchdev_obj_port_vlan *vlan)
 {
-       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        struct ksz_device *dev = ds->priv;
-       u16 data, pvid, new_pvid = 0;
+       u16 data, pvid;
        u8 fid, member, valid;
 
        if (ksz_is_ksz88x3(dev))
@@ -1182,8 +1226,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
        ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
        pvid = pvid & 0xFFF;
 
-       ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
-
        ksz8_r_vlan_table(dev, vlan->vid, &data);
        ksz8_from_vlan(dev, data, &fid, &member, &valid);
 
@@ -1195,14 +1237,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
                valid = 0;
        }
 
-       if (pvid == vlan->vid)
-               new_pvid = 1;
-
        ksz8_to_vlan(dev, fid, member, valid, &data);
        ksz8_w_vlan_table(dev, vlan->vid, data);
 
-       if (new_pvid != pvid)
-               ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
+       if (pvid == vlan->vid)
+               ksz8_port_enable_pvid(dev, port, false);
 
        return 0;
 }
@@ -1435,6 +1474,9 @@ static int ksz8_setup(struct dsa_switch *ds)
 
        ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
 
+       if (!ksz_is_ksz88x3(dev))
+               ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
+
        /* set broadcast storm protection 10% rate */
        regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
                           BROADCAST_STORM_RATE,
@@ -1717,6 +1759,16 @@ static int ksz8_switch_init(struct ksz_device *dev)
        /* set the real number of ports */
        dev->ds->num_ports = dev->port_cnt;
 
+       /* We rely on software untagging on the CPU port, so that we
+        * can support both tagged and untagged VLANs
+        */
+       dev->ds->untag_bridge_pvid = true;
+
+       /* VLAN filtering is partly controlled by the global VLAN
+        * Enable flag
+        */
+       dev->ds->vlan_filtering_is_global = true;
+
        return 0;
 }
 
index a323556..6b40bc2 100644 (file)
 #define REG_PORT_4_OUT_RATE_3          0xEE
 #define REG_PORT_5_OUT_RATE_3          0xFE
 
+/* 88x3 specific */
+
+#define REG_SW_INSERT_SRC_PVID         0xC2
+
 /* PME */
 
 #define SW_PME_OUTPUT_ENABLE           BIT(1)
index 2e6bfd3..1597c63 100644 (file)
@@ -27,6 +27,7 @@ struct ksz_port_mib {
 struct ksz_port {
        u16 member;
        u16 vid_member;
+       bool remove_tag;                /* Remove Tag flag set, for ksz8795 only */
        int stp_state;
        struct phy_device phydev;
 
@@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
        int ret;
 
        ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
-       if (!ret) {
-               /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
-               value[0] = swab32(value[0]);
-               value[1] = swab32(value[1]);
-               *val = swab64((u64)*value);
-       }
+       if (!ret)
+               *val = (u64)value[0] << 32 | value[1];
 
        return ret;
 }
index 69f21b7..632f0fc 100644 (file)
@@ -47,6 +47,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
        MIB_DESC(2, 0x48, "TxBytes"),
        MIB_DESC(1, 0x60, "RxDrop"),
        MIB_DESC(1, 0x64, "RxFiltering"),
+       MIB_DESC(1, 0x68, "RxUnicast"),
        MIB_DESC(1, 0x6c, "RxMulticast"),
        MIB_DESC(1, 0x70, "RxBroadcast"),
        MIB_DESC(1, 0x74, "RxAlignErr"),
index b1d46dd..6ea0036 100644 (file)
@@ -1277,15 +1277,16 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
        int err;
 
        /* mv88e6393x family errata 4.6:
-        * Cannot clear PwrDn bit on SERDES on port 0 if device is configured
-        * CPU_MGD mode or P0_mode is configured for [x]MII.
-        * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1.
+        * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD
+        * mode or P0_mode is configured for [x]MII.
+        * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
         *
         * It seems that after this workaround the SERDES is automatically
         * powered up (the bit is cleared), so power it down.
         */
-       if (lane == MV88E6393X_PORT0_LANE) {
-               err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE,
+       if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
+           lane == MV88E6393X_PORT10_LANE) {
+               err = mv88e6390_serdes_read(chip, lane,
                                            MDIO_MMD_PHYXS,
                                            MV88E6393X_SERDES_POC, &reg);
                if (err)
index ca2ad77..563d8a2 100644 (file)
         AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
         AR9331_SW_PORT_STATUS_SPEED_M)
 
+#define AR9331_SW_REG_PORT_CTRL(_port)                 (0x104 + (_port) * 0x100)
+#define AR9331_SW_PORT_CTRL_HEAD_EN                    BIT(11)
+#define AR9331_SW_PORT_CTRL_PORT_STATE                 GENMASK(2, 0)
+#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED                0
+#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING                1
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING       2
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING                3
+#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD         4
+
+#define AR9331_SW_REG_PORT_VLAN(_port)                 (0x108 + (_port) * 0x100)
+#define AR9331_SW_PORT_VLAN_8021Q_MODE                 GENMASK(31, 30)
+#define AR9331_SW_8021Q_MODE_SECURE                    3
+#define AR9331_SW_8021Q_MODE_CHECK                     2
+#define AR9331_SW_8021Q_MODE_FALLBACK                  1
+#define AR9331_SW_8021Q_MODE_NONE                      0
+#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER            GENMASK(25, 16)
+
 /* MIB registers */
 #define AR9331_MIB_COUNTER(x)                  (0x20000 + ((x) * 0x100))
 
@@ -371,12 +388,60 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
        return 0;
 }
 
-static int ar9331_sw_setup(struct dsa_switch *ds)
+static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
 {
        struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
        struct regmap *regmap = priv->regmap;
+       u32 port_mask, port_ctrl, val;
        int ret;
 
+       /* Generate default port settings */
+       port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
+                              AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
+
+       if (dsa_is_cpu_port(ds, port)) {
+               /* CPU port should be allowed to communicate with all user
+                * ports.
+                */
+               port_mask = dsa_user_ports(ds);
+               /* Enable Atheros header on CPU port. This will allow us
+                * communicate with each port separately
+                */
+               port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
+       } else if (dsa_is_user_port(ds, port)) {
+               /* User ports should communicate only with the CPU port.
+                */
+               port_mask = BIT(dsa_upstream_port(ds, port));
+       } else {
+               /* Other ports do not need to communicate at all */
+               port_mask = 0;
+       }
+
+       val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
+                        AR9331_SW_8021Q_MODE_NONE) |
+               FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
+
+       ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
+       if (ret)
+               goto error;
+
+       ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
+       if (ret)
+               goto error;
+
+       return 0;
+error:
+       dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
+
+       return ret;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+       struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+       struct regmap *regmap = priv->regmap;
+       int ret, i;
+
        ret = ar9331_sw_reset(priv);
        if (ret)
                return ret;
@@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
        if (ret)
                goto error;
 
+       for (i = 0; i < ds->num_ports; i++) {
+               ret = ar9331_sw_setup_port(ds, i);
+               if (ret)
+                       goto error;
+       }
+
        ds->configure_vlan_while_not_filtering = false;
 
        return 0;
@@ -837,16 +908,24 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
                return 0;
        }
 
-       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+       /* In case of this switch we work with 32bit registers on top of 16bit
+        * bus. Some registers (for example access to forwarding database) have
+        * trigger bit on the first 16bit half of request, the result and
+        * configuration of request in the second half.
+        * To make it work properly, we should do the second part of transfer
+        * before the first one is done.
+        */
+       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+                                 val >> 16);
        if (ret < 0)
                goto error;
 
-       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
-                                 val >> 16);
+       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
        if (ret < 0)
                goto error;
 
        return 0;
+
 error:
        dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
        return ret;
index 56fead6..1477091 100644 (file)
@@ -304,6 +304,15 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
                        hostcmd = SJA1105_HOSTCMD_INVALIDATE;
        }
        sja1105_packing(p, &hostcmd, 25, 23, size, op);
+}
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+                                 enum packing_op op)
+{
+       int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+
+       sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
 
        /* Hack - The hardware takes the 'index' field within
         * struct sja1105_l2_lookup_entry as the index on which this command
@@ -313,26 +322,18 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
         * such that our API doesn't need to ask for a full-blown entry
         * structure when e.g. a delete is requested.
         */
-       sja1105_packing(buf, &cmd->index, 15, 6,
-                       SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
-}
-
-static void
-sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
-                                 enum packing_op op)
-{
-       int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
-
-       return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+       sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
 }
 
 static void
 sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
                              enum packing_op op)
 {
-       int size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+       int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+
+       sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
 
-       return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+       sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
 }
 
 /* The switch is so retarded that it makes our command/entry abstraction
index e2dc997..49eb0ac 100644 (file)
@@ -1318,10 +1318,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                      const unsigned char *addr, u16 vid)
 {
-       struct sja1105_l2_lookup_entry l2_lookup = {0};
+       struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
        struct sja1105_private *priv = ds->priv;
        struct device *dev = ds->dev;
        int last_unused = -1;
+       int start, end, i;
        int bin, way, rc;
 
        bin = sja1105et_fdb_hash(priv, addr, vid);
@@ -1333,7 +1334,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                 * mask? If yes, we need to do nothing. If not, we need
                 * to rewrite the entry by adding this port to it.
                 */
-               if (l2_lookup.destports & BIT(port))
+               if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
                        return 0;
                l2_lookup.destports |= BIT(port);
        } else {
@@ -1364,6 +1365,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                                                     index, NULL, false);
                }
        }
+       l2_lookup.lockeds = true;
        l2_lookup.index = sja1105et_fdb_index(bin, way);
 
        rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
@@ -1372,6 +1374,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
        if (rc < 0)
                return rc;
 
+       /* Invalidate a dynamically learned entry if that exists */
+       start = sja1105et_fdb_index(bin, 0);
+       end = sja1105et_fdb_index(bin, way);
+
+       for (i = start; i < end; i++) {
+               rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+                                                i, &tmp);
+               if (rc == -ENOENT)
+                       continue;
+               if (rc)
+                       return rc;
+
+               if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
+                       continue;
+
+               rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+                                                 i, NULL, false);
+               if (rc)
+                       return rc;
+
+               break;
+       }
+
        return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
 }
 
@@ -1413,32 +1438,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port,
 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
                        const unsigned char *addr, u16 vid)
 {
-       struct sja1105_l2_lookup_entry l2_lookup = {0};
+       struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
        struct sja1105_private *priv = ds->priv;
        int rc, i;
 
        /* Search for an existing entry in the FDB table */
        l2_lookup.macaddr = ether_addr_to_u64(addr);
        l2_lookup.vlanid = vid;
-       l2_lookup.iotag = SJA1105_S_TAG;
        l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
-       if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
-               l2_lookup.mask_vlanid = VLAN_VID_MASK;
-               l2_lookup.mask_iotag = BIT(0);
-       } else {
-               l2_lookup.mask_vlanid = 0;
-               l2_lookup.mask_iotag = 0;
-       }
+       l2_lookup.mask_vlanid = VLAN_VID_MASK;
        l2_lookup.destports = BIT(port);
 
+       tmp = l2_lookup;
+
        rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
-                                        SJA1105_SEARCH, &l2_lookup);
-       if (rc == 0) {
-               /* Found and this port is already in the entry's
+                                        SJA1105_SEARCH, &tmp);
+       if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
+               /* Found a static entry and this port is already in the entry's
                 * port mask => job done
                 */
-               if (l2_lookup.destports & BIT(port))
+               if ((tmp.destports & BIT(port)) && tmp.lockeds)
                        return 0;
+
+               l2_lookup = tmp;
+
                /* l2_lookup.index is populated by the switch in case it
                 * found something.
                 */
@@ -1460,16 +1483,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
                dev_err(ds->dev, "FDB is full, cannot add entry.\n");
                return -EINVAL;
        }
-       l2_lookup.lockeds = true;
        l2_lookup.index = i;
 
 skip_finding_an_index:
+       l2_lookup.lockeds = true;
+
        rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
                                          l2_lookup.index, &l2_lookup,
                                          true);
        if (rc < 0)
                return rc;
 
+       /* The switch learns dynamic entries and looks up the FDB left to
+        * right. It is possible that our addition was concurrent with the
+        * dynamic learning of the same address, so now that the static entry
+        * has been installed, we are certain that address learning for this
+        * particular address has been turned off, so the dynamic entry either
+        * is in the FDB at an index smaller than the static one, or isn't (it
+        * can also be at a larger index, but in that case it is inactive
+        * because the static FDB entry will match first, and the dynamic one
+        * will eventually age out). Search for a dynamically learned address
+        * prior to our static one and invalidate it.
+        */
+       tmp = l2_lookup;
+
+       rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+                                        SJA1105_SEARCH, &tmp);
+       if (rc < 0) {
+               dev_err(ds->dev,
+                       "port %d failed to read back entry for %pM vid %d: %pe\n",
+                       port, addr, vid, ERR_PTR(rc));
+               return rc;
+       }
+
+       if (tmp.index < l2_lookup.index) {
+               rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+                                                 tmp.index, NULL, false);
+               if (rc < 0)
+                       return rc;
+       }
+
        return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
 }
 
@@ -1483,15 +1536,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
 
        l2_lookup.macaddr = ether_addr_to_u64(addr);
        l2_lookup.vlanid = vid;
-       l2_lookup.iotag = SJA1105_S_TAG;
        l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
-       if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
-               l2_lookup.mask_vlanid = VLAN_VID_MASK;
-               l2_lookup.mask_iotag = BIT(0);
-       } else {
-               l2_lookup.mask_vlanid = 0;
-               l2_lookup.mask_iotag = 0;
-       }
+       l2_lookup.mask_vlanid = VLAN_VID_MASK;
        l2_lookup.destports = BIT(port);
 
        rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1589,7 +1635,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
                /* We need to hide the dsa_8021q VLANs from the user. */
                if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
                        l2_lookup.vlanid = 0;
-               cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+               rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+               if (rc)
+                       return rc;
        }
        return 0;
 }
@@ -3139,6 +3187,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
        }
 
        sja1105_devlink_teardown(ds);
+       sja1105_mdiobus_unregister(ds);
        sja1105_flower_teardown(ds);
        sja1105_tas_teardown(ds);
        sja1105_ptp_clock_unregister(ds);
index 19aea8f..705d390 100644 (file)
@@ -284,8 +284,7 @@ static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
        struct mii_bus *bus;
        int rc = 0;
 
-       np = of_find_compatible_node(mdio_node, NULL,
-                                    "nxp,sja1110-base-tx-mdio");
+       np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
        if (!np)
                return 0;
 
@@ -339,8 +338,7 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
        struct mii_bus *bus;
        int rc = 0;
 
-       np = of_find_compatible_node(mdio_node, NULL,
-                                    "nxp,sja1110-base-t1-mdio");
+       np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
        if (!np)
                return 0;
 
index 860c18f..80399c8 100644 (file)
@@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev)
        ret = register_netdev(ndev);
        if (ret) {
                netdev_err(ndev, "Failed to register netdev\n");
-               goto err;
+               goto err_mdio_remove;
        }
 
        return 0;
 
+err_mdio_remove:
+       xge_mdio_remove(ndev);
 err:
        free_netdev(ndev);
 
index 1a6ec1a..b5d954c 100644 (file)
@@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        }
 
        /* Allocated memory for FW statistics  */
-       if (bnx2x_alloc_fw_stats_mem(bp))
+       rc = bnx2x_alloc_fw_stats_mem(bp);
+       if (rc)
                LOAD_ERROR_EXIT(bp, load_error0);
 
        /* request pf to initialize status blocks */
index 8960658..8a97640 100644 (file)
@@ -72,7 +72,8 @@
 #include "bnxt_debugfs.h"
 
 #define BNXT_TX_TIMEOUT                (5 * HZ)
-#define BNXT_DEF_MSG_ENABLE    (NETIF_MSG_DRV | NETIF_MSG_HW)
+#define BNXT_DEF_MSG_ENABLE    (NETIF_MSG_DRV | NETIF_MSG_HW | \
+                                NETIF_MSG_TX_ERR)
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -365,6 +366,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
        return md_dst->u.port_info.port_id;
 }
 
+static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                            u16 prod)
+{
+       bnxt_db_write(bp, &txr->tx_db, prod);
+       txr->kick_pending = 0;
+}
+
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+                                         struct bnxt_tx_ring_info *txr,
+                                         struct netdev_queue *txq)
+{
+       netif_tx_stop_queue(txq);
+
+       /* netif_tx_stop_queue() must be done before checking
+        * tx index in bnxt_tx_avail() below, because in
+        * bnxt_tx_int(), we update tx index before checking for
+        * netif_tx_queue_stopped().
+        */
+       smp_mb();
+       if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+               netif_tx_wake_queue(txq);
+               return false;
+       }
+
+       return true;
+}
+
 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bnxt *bp = netdev_priv(dev);
@@ -384,6 +412,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        i = skb_get_queue_mapping(skb);
        if (unlikely(i >= bp->tx_nr_rings)) {
                dev_kfree_skb_any(skb);
+               atomic_long_inc(&dev->tx_dropped);
                return NETDEV_TX_OK;
        }
 
@@ -393,8 +422,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        free_size = bnxt_tx_avail(bp, txr);
        if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
-               netif_tx_stop_queue(txq);
-               return NETDEV_TX_BUSY;
+               /* We must have raced with NAPI cleanup */
+               if (net_ratelimit() && txr->kick_pending)
+                       netif_warn(bp, tx_err, dev,
+                                  "bnxt: ring busy w/ flush pending!\n");
+               if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+                       return NETDEV_TX_BUSY;
        }
 
        length = skb->len;
@@ -426,7 +459,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
                    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
-                       if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
+                       if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
+                                           &ptp->tx_hdr_off)) {
+                               if (vlan_tag_flags)
+                                       ptp->tx_hdr_off += VLAN_HLEN;
                                lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
                                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        } else {
@@ -514,21 +550,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 normal_tx:
        if (length < BNXT_MIN_PKT_SIZE) {
                pad = BNXT_MIN_PKT_SIZE - length;
-               if (skb_pad(skb, pad)) {
+               if (skb_pad(skb, pad))
                        /* SKB already freed. */
-                       tx_buf->skb = NULL;
-                       return NETDEV_TX_OK;
-               }
+                       goto tx_kick_pending;
                length = BNXT_MIN_PKT_SIZE;
        }
 
        mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
 
-       if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
-               dev_kfree_skb_any(skb);
-               tx_buf->skb = NULL;
-               return NETDEV_TX_OK;
-       }
+       if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+               goto tx_free;
 
        dma_unmap_addr_set(tx_buf, mapping, mapping);
        flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -615,24 +646,17 @@ normal_tx:
        txr->tx_prod = prod;
 
        if (!netdev_xmit_more() || netif_xmit_stopped(txq))
-               bnxt_db_write(bp, &txr->tx_db, prod);
+               bnxt_txr_db_kick(bp, txr, prod);
+       else
+               txr->kick_pending = 1;
 
 tx_done:
 
        if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
                if (netdev_xmit_more() && !tx_buf->is_push)
-                       bnxt_db_write(bp, &txr->tx_db, prod);
-
-               netif_tx_stop_queue(txq);
+                       bnxt_txr_db_kick(bp, txr, prod);
 
-               /* netif_tx_stop_queue() must be done before checking
-                * tx index in bnxt_tx_avail() below, because in
-                * bnxt_tx_int(), we update tx index before checking for
-                * netif_tx_queue_stopped().
-                */
-               smp_mb();
-               if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
-                       netif_tx_wake_queue(txq);
+               bnxt_txr_netif_try_stop_queue(bp, txr, txq);
        }
        return NETDEV_TX_OK;
 
@@ -645,7 +669,6 @@ tx_dma_error:
        /* start back at beginning and unmap skb */
        prod = txr->tx_prod;
        tx_buf = &txr->tx_buf_ring[prod];
-       tx_buf->skb = NULL;
        dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
                         skb_headlen(skb), PCI_DMA_TODEVICE);
        prod = NEXT_TX(prod);
@@ -659,7 +682,13 @@ tx_dma_error:
                               PCI_DMA_TODEVICE);
        }
 
+tx_free:
        dev_kfree_skb_any(skb);
+tx_kick_pending:
+       if (txr->kick_pending)
+               bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+       txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+       atomic_long_inc(&dev->tx_dropped);
        return NETDEV_TX_OK;
 }
 
@@ -729,14 +758,9 @@ next_tx_int:
        smp_mb();
 
        if (unlikely(netif_tx_queue_stopped(txq)) &&
-           (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
-               __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_stopped(txq) &&
-                   bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
-                   txr->dev_state != BNXT_DEV_STATE_CLOSING)
-                       netif_tx_wake_queue(txq);
-               __netif_tx_unlock(txq);
-       }
+           bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+           READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+               netif_tx_wake_queue(txq);
 }
 
 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1764,6 +1788,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
                return -EBUSY;
 
+       /* The valid test of the entry must be done first before
+        * reading any further.
+        */
+       dma_rmb();
        prod = rxr->rx_prod;
 
        if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1986,6 +2014,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
                return -EBUSY;
 
+       /* The valid test of the entry must be done first before
+        * reading any further.
+        */
+       dma_rmb();
        cmp_type = RX_CMP_TYPE(rxcmp);
        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
                rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -2451,6 +2483,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
                if (!TX_CMP_VALID(txcmp, raw_cons))
                        break;
 
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               dma_rmb();
                if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
                        cp_cons = RING_CMP(tmp_raw_cons);
@@ -9125,10 +9161,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
        for (i = 0; i < bp->cp_nr_rings; i++) {
                struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
 
+               napi_disable(&bp->bnapi[i]->napi);
                if (bp->bnapi[i]->rx_ring)
                        cancel_work_sync(&cpr->dim.work);
-
-               napi_disable(&bp->bnapi[i]->napi);
        }
 }
 
@@ -9162,9 +9197,11 @@ void bnxt_tx_disable(struct bnxt *bp)
        if (bp->tx_ring) {
                for (i = 0; i < bp->tx_nr_rings; i++) {
                        txr = &bp->tx_ring[i];
-                       txr->dev_state = BNXT_DEV_STATE_CLOSING;
+                       WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
                }
        }
+       /* Make sure napi polls see @dev_state change */
+       synchronize_net();
        /* Drop carrier first to prevent TX timeout */
        netif_carrier_off(bp->dev);
        /* Stop all TX queues */
@@ -9178,8 +9215,10 @@ void bnxt_tx_enable(struct bnxt *bp)
 
        for (i = 0; i < bp->tx_nr_rings; i++) {
                txr = &bp->tx_ring[i];
-               txr->dev_state = 0;
+               WRITE_ONCE(txr->dev_state, 0);
        }
+       /* Make sure napi polls see @dev_state change */
+       synchronize_net();
        netif_tx_wake_all_queues(bp->dev);
        if (bp->link_info.link_up)
                netif_carrier_on(bp->dev);
@@ -10765,6 +10804,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
                        return true;
                return false;
        }
+       /* 212 firmware is broken for aRFS */
+       if (BNXT_FW_MAJ(bp) == 212)
+               return false;
        if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
                return true;
        if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
index bcf8d00..ba4e0fc 100644 (file)
@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info {
        u16                     tx_prod;
        u16                     tx_cons;
        u16                     txq_index;
+       u8                      kick_pending;
        struct bnxt_db_info     tx_db;
 
        struct tx_bd            *tx_desc_ring[MAX_TX_PAGES];
index 3fc6781..94d07a9 100644 (file)
@@ -368,6 +368,7 @@ struct cmd_nums {
        #define HWRM_FUNC_PTP_TS_QUERY                    0x19fUL
        #define HWRM_FUNC_PTP_EXT_CFG                     0x1a0UL
        #define HWRM_FUNC_PTP_EXT_QCFG                    0x1a1UL
+       #define HWRM_FUNC_KEY_CTX_ALLOC                   0x1a2UL
        #define HWRM_SELFTEST_QLIST                       0x200UL
        #define HWRM_SELFTEST_EXEC                        0x201UL
        #define HWRM_SELFTEST_IRQ                         0x202UL
@@ -531,8 +532,8 @@ struct hwrm_err_output {
 #define HWRM_VERSION_MAJOR 1
 #define HWRM_VERSION_MINOR 10
 #define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 47
-#define HWRM_VERSION_STR "1.10.2.47"
+#define HWRM_VERSION_RSVD 52
+#define HWRM_VERSION_STR "1.10.2.52"
 
 /* hwrm_ver_get_input (size:192b/24B) */
 struct hwrm_ver_get_input {
@@ -585,6 +586,7 @@ struct hwrm_ver_get_output {
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED              0x1000UL
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED                      0x2000UL
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED                    0x4000UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE                      0x8000UL
        u8      roce_fw_maj_8b;
        u8      roce_fw_min_8b;
        u8      roce_fw_bld_8b;
@@ -886,7 +888,8 @@ struct hwrm_async_event_cmpl_reset_notify {
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL        (0x2UL << 8)
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL    (0x3UL << 8)
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET                (0x4UL << 8)
-       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST                     ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
+       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION             (0x5UL << 8)
+       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST                     ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK           0xffff0000UL
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT            16
 };
@@ -1236,13 +1239,14 @@ struct hwrm_async_event_cmpl_error_report_base {
        u8      timestamp_lo;
        __le16  timestamp_hi;
        __le32  event_data1;
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK          0xffUL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT           0
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED        0x0UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM     0x1UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL  0x2UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM             0x3UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST           ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK                   0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT                    0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED                 0x0UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM              0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL           0x2UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM                      0x3UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD  0x4UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST                    ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
 };
 
 /* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1446,6 +1450,8 @@ struct hwrm_func_vf_cfg_input {
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS            0x200UL
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS        0x400UL
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS     0x800UL
+       #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS      0x1000UL
+       #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS      0x2000UL
        __le16  mtu;
        __le16  guest_vlan;
        __le16  async_event_cr;
@@ -1469,7 +1475,8 @@ struct hwrm_func_vf_cfg_input {
        __le16  num_vnics;
        __le16  num_stat_ctxs;
        __le16  num_hw_ring_grps;
-       u8      unused_0[4];
+       __le16  num_tx_key_ctxs;
+       __le16  num_rx_key_ctxs;
 };
 
 /* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1493,7 +1500,7 @@ struct hwrm_func_qcaps_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_qcaps_output (size:704b/88B) */
+/* hwrm_func_qcaps_output (size:768b/96B) */
 struct hwrm_func_qcaps_output {
        __le16  error_code;
        __le16  req_type;
@@ -1587,7 +1594,8 @@ struct hwrm_func_qcaps_output {
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA      0x4UL
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA      0x8UL
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE     0x10UL
-       u8      unused_1;
+       __le16  max_key_ctxs_alloc;
+       u8      unused_1[7];
        u8      valid;
 };
 
@@ -1602,7 +1610,7 @@ struct hwrm_func_qcfg_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_qcfg_output (size:832b/104B) */
+/* hwrm_func_qcfg_output (size:896b/112B) */
 struct hwrm_func_qcfg_output {
        __le16  error_code;
        __le16  req_type;
@@ -1749,11 +1757,13 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
        #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
        __le16  host_mtu;
-       u8      unused_3;
+       __le16  alloc_tx_key_ctxs;
+       __le16  alloc_rx_key_ctxs;
+       u8      unused_3[5];
        u8      valid;
 };
 
-/* hwrm_func_cfg_input (size:832b/104B) */
+/* hwrm_func_cfg_input (size:896b/112B) */
 struct hwrm_func_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -1820,6 +1830,8 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW         0x8000000UL
        #define FUNC_CFG_REQ_ENABLES_TPID                     0x10000000UL
        #define FUNC_CFG_REQ_ENABLES_HOST_MTU                 0x20000000UL
+       #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS              0x40000000UL
+       #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS              0x80000000UL
        __le16  admin_mtu;
        __le16  mru;
        __le16  num_rsscos_ctxs;
@@ -1929,6 +1941,9 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
        __be16  tpid;
        __le16  host_mtu;
+       __le16  num_tx_key_ctxs;
+       __le16  num_rx_key_ctxs;
+       u8      unused_0[4];
 };
 
 /* hwrm_func_cfg_output (size:128b/16B) */
@@ -2099,6 +2114,7 @@ struct hwrm_func_drv_rgtr_input {
        #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT                   0x40UL
        #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT               0x80UL
        #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT     0x100UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT                 0x200UL
        __le32  enables;
        #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE             0x1UL
        #define FUNC_DRV_RGTR_REQ_ENABLES_VER                 0x2UL
@@ -2268,7 +2284,7 @@ struct hwrm_func_resource_qcaps_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+/* hwrm_func_resource_qcaps_output (size:512b/64B) */
 struct hwrm_func_resource_qcaps_output {
        __le16  error_code;
        __le16  req_type;
@@ -2300,11 +2316,15 @@ struct hwrm_func_resource_qcaps_output {
        __le16  max_tx_scheduler_inputs;
        __le16  flags;
        #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED     0x1UL
+       __le16  min_tx_key_ctxs;
+       __le16  max_tx_key_ctxs;
+       __le16  min_rx_key_ctxs;
+       __le16  max_rx_key_ctxs;
        u8      unused_0[5];
        u8      valid;
 };
 
-/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
 struct hwrm_func_vf_resource_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -2331,6 +2351,10 @@ struct hwrm_func_vf_resource_cfg_input {
        __le16  max_hw_ring_grps;
        __le16  flags;
        #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED     0x1UL
+       __le16  min_tx_key_ctxs;
+       __le16  max_tx_key_ctxs;
+       __le16  min_rx_key_ctxs;
+       __le16  max_rx_key_ctxs;
        u8      unused_0[2];
 };
 
@@ -2348,7 +2372,9 @@ struct hwrm_func_vf_resource_cfg_output {
        __le16  reserved_vnics;
        __le16  reserved_stat_ctx;
        __le16  reserved_hw_ring_grps;
-       u8      unused_0[7];
+       __le16  reserved_tx_key_ctxs;
+       __le16  reserved_rx_key_ctxs;
+       u8      unused_0[3];
        u8      valid;
 };
 
@@ -4220,7 +4246,7 @@ struct hwrm_port_lpbk_clr_stats_output {
        u8      valid;
 };
 
-/* hwrm_port_ts_query_input (size:256b/32B) */
+/* hwrm_port_ts_query_input (size:320b/40B) */
 struct hwrm_port_ts_query_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -4238,8 +4264,11 @@ struct hwrm_port_ts_query_input {
        __le16  enables;
        #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT     0x1UL
        #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID         0x2UL
+       #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET     0x4UL
        __le16  ts_req_timeout;
        __le32  ptp_seq_id;
+       __le16  ptp_hdr_offset;
+       u8      unused_1[6];
 };
 
 /* hwrm_port_ts_query_output (size:192b/24B) */
@@ -8172,6 +8201,7 @@ struct hwrm_fw_reset_input {
        u8      host_idx;
        u8      flags;
        #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL     0x1UL
+       #define FW_RESET_REQ_FLAGS_FW_ACTIVATION      0x2UL
        u8      unused_0[4];
 };
 
@@ -8952,7 +8982,7 @@ struct hwrm_nvm_get_dir_info_output {
        u8      valid;
 };
 
-/* hwrm_nvm_write_input (size:384b/48B) */
+/* hwrm_nvm_write_input (size:448b/56B) */
 struct hwrm_nvm_write_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -8968,7 +8998,11 @@ struct hwrm_nvm_write_input {
        __le16  option;
        __le16  flags;
        #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG     0x1UL
+       #define NVM_WRITE_REQ_FLAGS_BATCH_MODE               0x2UL
+       #define NVM_WRITE_REQ_FLAGS_BATCH_LAST               0x4UL
        __le32  dir_item_length;
+       __le32  offset;
+       __le32  len;
        __le32  unused_0;
 };
 
index ec381c2..81f40ab 100644 (file)
@@ -20,7 +20,7 @@
 #include "bnxt.h"
 #include "bnxt_ptp.h"
 
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
 {
        unsigned int ptp_class;
        struct ptp_header *hdr;
@@ -34,6 +34,7 @@ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
                if (!hdr)
                        return -EINVAL;
 
+               *hdr_off = (u8 *)hdr - skb->data;
                *seq_id  = ntohs(hdr->sequence_id);
                return 0;
        default:
@@ -91,6 +92,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
            PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
                req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
                req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+               req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
                req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
        }
        mutex_lock(&bp->hwrm_cmd_lock);
index 254ba7b..524f1c2 100644 (file)
@@ -10,8 +10,8 @@
 #ifndef BNXT_PTP_H
 #define BNXT_PTP_H
 
-#define BNXT_PTP_GRC_WIN       5
-#define BNXT_PTP_GRC_WIN_BASE  0x5000
+#define BNXT_PTP_GRC_WIN       6
+#define BNXT_PTP_GRC_WIN_BASE  0x6000
 
 #define BNXT_MAX_PHC_DRIFT     31000000
 #define BNXT_LO_TIMER_MASK     0x0000ffffffffUL
@@ -19,7 +19,8 @@
 
 #define BNXT_PTP_QTS_TIMEOUT   1000
 #define BNXT_PTP_QTS_TX_ENABLES        (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
-                                PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+                                PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
+                                PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
 
 struct bnxt_ptp_cfg {
        struct ptp_clock_info   ptp_info;
@@ -37,6 +38,7 @@ struct bnxt_ptp_cfg {
        #define BNXT_PHC_OVERFLOW_PERIOD        (19 * 3600 * HZ)
 
        u16                     tx_seqid;
+       u16                     tx_hdr_off;
        struct bnxt             *bp;
        atomic_t                tx_avail;
 #define BNXT_MAX_TX_TS 1
@@ -74,7 +76,7 @@ do {                                          \
        ((dst) = READ_ONCE(src))
 #endif
 
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
 int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
 int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
 int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
index 5c368a9..c2e1f16 100644 (file)
@@ -275,6 +275,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
 
        if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
                desc_ptp = macb_ptp_desc(bp, desc);
+               /* Unlikely but check */
+               if (!desc_ptp) {
+                       dev_warn_ratelimited(&bp->pdev->dev,
+                                            "Timestamp not supported in BD\n");
+                       return;
+               }
                gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
                memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -307,8 +313,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
        if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
                return -ENOMEM;
 
-       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
        desc_ptp = macb_ptp_desc(queue->bp, desc);
+       /* Unlikely but check */
+       if (!desc_ptp)
+               return -EINVAL;
+       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
        tx_timestamp = &queue->tx_timestamps[head];
        tx_timestamp->skb = skb;
        /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
index dbf9a0e..710cb00 100644 (file)
@@ -5068,6 +5068,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
                ret = -ENOMEM;
                goto bye;
        }
+       bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
 #endif
 
        params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -6788,13 +6789,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        setup_memwin(adapter);
        err = adap_init0(adapter, 0);
-#ifdef CONFIG_DEBUG_FS
-       bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
-#endif
-       setup_memwin_rdma(adapter);
        if (err)
                goto out_unmap_bar;
 
+       setup_memwin_rdma(adapter);
+
        /* configure SGE_STAT_CFG_A to read WC stats */
        if (!is_t4(adapter->params.chip))
                t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
index 68b7864..98cc013 100644 (file)
@@ -3038,26 +3038,30 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
        return err;
 }
 
-static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+       dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+       dpaa2_switch_free_dpio(ethsw);
+       dpaa2_switch_destroy_rings(ethsw);
+       dpaa2_switch_drain_bp(ethsw);
+       dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
 {
        struct device *dev = &sw_dev->dev;
        struct ethsw_core *ethsw = dev_get_drvdata(dev);
        int err;
 
+       dpaa2_switch_ctrl_if_teardown(ethsw);
+
+       destroy_workqueue(ethsw->workqueue);
+
        err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
        if (err)
                dev_warn(dev, "dpsw_close err %d\n", err);
 }
 
-static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
-{
-       dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
-       dpaa2_switch_free_dpio(ethsw);
-       dpaa2_switch_destroy_rings(ethsw);
-       dpaa2_switch_drain_bp(ethsw);
-       dpaa2_switch_free_dpbp(ethsw);
-}
-
 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
 {
        struct ethsw_port_priv *port_priv;
@@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
        dev = &sw_dev->dev;
        ethsw = dev_get_drvdata(dev);
 
-       dpaa2_switch_ctrl_if_teardown(ethsw);
-
        dpaa2_switch_teardown_irqs(sw_dev);
 
        dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
        kfree(ethsw->acls);
        kfree(ethsw->ports);
 
-       dpaa2_switch_takedown(sw_dev);
-
-       destroy_workqueue(ethsw->workqueue);
+       dpaa2_switch_teardown(sw_dev);
 
        fsl_mc_portal_free(ethsw->mc_io);
 
@@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
                               GFP_KERNEL);
        if (!(ethsw->ports)) {
                err = -ENOMEM;
-               goto err_takedown;
+               goto err_teardown;
        }
 
        ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@@ -3270,8 +3270,8 @@ err_free_fdbs:
 err_free_ports:
        kfree(ethsw->ports);
 
-err_takedown:
-       dpaa2_switch_takedown(sw_dev);
+err_teardown:
+       dpaa2_switch_teardown(sw_dev);
 
 err_free_cmdport:
        fsl_mc_portal_free(ethsw->mc_io);
index 8aea707..7e4c498 100644 (file)
@@ -3843,13 +3843,13 @@ fec_drv_remove(struct platform_device *pdev)
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(fep->phy_node);
-       free_netdev(ndev);
 
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
+       free_netdev(ndev);
        return 0;
 }
 
index 5325230..80461ab 100644 (file)
@@ -938,20 +938,19 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
        return 0;
 }
 
-static int hns3_dbg_get_cmd_index(struct hnae3_handle *handle,
-                                 const unsigned char *name, u32 *index)
+static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
 {
        u32 i;
 
        for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
-               if (!strncmp(name, hns3_dbg_cmd[i].name,
-                            strlen(hns3_dbg_cmd[i].name))) {
+               if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
                        *index = i;
                        return 0;
                }
        }
 
-       dev_err(&handle->pdev->dev, "unknown command(%s)\n", name);
+       dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
+               dbg_data->cmd);
        return -EINVAL;
 }
 
@@ -1019,8 +1018,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
        u32 index;
        int ret;
 
-       ret = hns3_dbg_get_cmd_index(handle, filp->f_path.dentry->d_iname,
-                                    &index);
+       ret = hns3_dbg_get_cmd_index(dbg_data, &index);
        if (ret)
                return ret;
 
@@ -1090,6 +1088,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
                char name[HNS3_DBG_FILE_NAME_LEN];
 
                data[i].handle = handle;
+               data[i].cmd = hns3_dbg_cmd[cmd].cmd;
                data[i].qid = i;
                sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
                debugfs_create_file(name, 0400, entry_dir, &data[i],
@@ -1110,6 +1109,7 @@ hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
                return -ENOMEM;
 
        data->handle = handle;
+       data->cmd = hns3_dbg_cmd[cmd].cmd;
        entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
        debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
                            data, &hns3_dbg_fops);
index f3766ff..bd88010 100644 (file)
@@ -22,6 +22,7 @@ struct hns3_dbg_item {
 
 struct hns3_dbg_data {
        struct hnae3_handle *handle;
+       enum hnae3_dbg_cmd cmd;
        u16 qid;
 };
 
index 887297e..eb748aa 100644 (file)
@@ -573,9 +573,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
 
 void hclge_cmd_uninit(struct hclge_dev *hdev)
 {
+       set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+       /* wait to ensure that the firmware completes the possible left
+        * over commands.
+        */
+       msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
        spin_lock(&hdev->hw.cmq.crq.lock);
-       set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
        hclge_cmd_uninit_regs(&hdev->hw);
        spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
index 18bde77..ac70d49 100644 (file)
@@ -9,6 +9,7 @@
 #include "hnae3.h"
 
 #define HCLGE_CMDQ_TX_TIMEOUT          30000
+#define HCLGE_CMDQ_CLEAR_WAIT_TIME     200
 #define HCLGE_DESC_DATA_LEN            6
 
 struct hclge_dev;
@@ -270,6 +271,9 @@ enum hclge_opcode_type {
        /* Led command */
        HCLGE_OPC_LED_STATUS_CFG        = 0xB000,
 
+       /* clear hardware resource command */
+       HCLGE_OPC_CLEAR_HW_RESOURCE     = 0x700B,
+
        /* NCL config command */
        HCLGE_OPC_QUERY_NCL_CONFIG      = 0x7011,
 
index 5bf5db9..39f56f2 100644 (file)
@@ -255,21 +255,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
        u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
        struct hclge_vport *vport = hclge_get_vport(h);
        struct hclge_dev *hdev = vport->back;
-       u8 i, j, pfc_map, *prio_tc;
        int ret;
+       u8 i;
 
        memset(pfc, 0, sizeof(*pfc));
        pfc->pfc_cap = hdev->pfc_max;
-       prio_tc = hdev->tm_info.prio_tc;
-       pfc_map = hdev->tm_info.hw_pfc_map;
-
-       /* Pfc setting is based on TC */
-       for (i = 0; i < hdev->tm_info.num_tc; i++) {
-               for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
-                       if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
-                               pfc->pfc_en |= BIT(j);
-               }
-       }
+       pfc->pfc_en = hdev->tm_info.pfc_en;
 
        ret = hclge_pfc_tx_stats_get(hdev, requests);
        if (ret)
index ebeaf12..03ae122 100644 (file)
@@ -1550,6 +1550,7 @@ static int hclge_configure(struct hclge_dev *hdev)
        hdev->tm_info.hw_pfc_map = 0;
        hdev->wanted_umv_size = cfg.umv_space;
        hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
+       hdev->gro_en = true;
        if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
                set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
 
@@ -1618,7 +1619,7 @@ static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+static int hclge_config_gro(struct hclge_dev *hdev)
 {
        struct hclge_cfg_gro_status_cmd *req;
        struct hclge_desc desc;
@@ -1630,7 +1631,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
        req = (struct hclge_cfg_gro_status_cmd *)desc.data;
 
-       req->gro_en = en ? 1 : 0;
+       req->gro_en = hdev->gro_en ? 1 : 0;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -2952,12 +2953,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
        }
 
        if (state != hdev->hw.mac.link) {
+               hdev->hw.mac.link = state;
                client->ops->link_status_change(handle, state);
                hclge_config_mac_tnl_int(hdev, state);
                if (rclient && rclient->ops->link_status_change)
                        rclient->ops->link_status_change(rhandle, state);
 
-               hdev->hw.mac.link = state;
                hclge_push_link_status(hdev);
        }
 
@@ -10073,7 +10074,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
                                       bool writen_to_tbl)
 {
-       struct hclge_vport_vlan_cfg *vlan;
+       struct hclge_vport_vlan_cfg *vlan, *tmp;
+
+       list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+               if (vlan->vlan_id == vlan_id)
+                       return;
 
        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
        if (!vlan)
@@ -11443,6 +11448,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
        }
 }
 
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
+{
+       struct hclge_desc desc;
+       int ret;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       /* This new command is only supported by new firmware, it will
+        * fail with older firmware. Error value -EOPNOSUPP can only be
+        * returned by older firmware running this command, to keep code
+        * backward compatible we will override this value and return
+        * success.
+        */
+       if (ret && ret != -EOPNOTSUPP) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to clear hw resource, ret = %d\n", ret);
+               return ret;
+       }
+       return 0;
+}
+
 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
 {
        if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
@@ -11492,6 +11519,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        if (ret)
                goto err_cmd_uninit;
 
+       ret  = hclge_clear_hw_resource(hdev);
+       if (ret)
+               goto err_cmd_uninit;
+
        ret = hclge_get_cap(hdev);
        if (ret)
                goto err_cmd_uninit;
@@ -11556,7 +11587,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
                goto err_mdiobus_unreg;
        }
 
-       ret = hclge_config_gro(hdev, true);
+       ret = hclge_config_gro(hdev);
        if (ret)
                goto err_mdiobus_unreg;
 
@@ -11937,7 +11968,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
-       ret = hclge_config_gro(hdev, true);
+       ret = hclge_config_gro(hdev);
        if (ret)
                return ret;
 
@@ -12671,8 +12702,15 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
+       bool gro_en_old = hdev->gro_en;
+       int ret;
 
-       return hclge_config_gro(hdev, enable);
+       hdev->gro_en = enable;
+       ret = hclge_config_gro(hdev);
+       if (ret)
+               hdev->gro_en = gro_en_old;
+
+       return ret;
 }
 
 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
index 3d33524..e446b83 100644 (file)
@@ -927,6 +927,7 @@ struct hclge_dev {
        unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
        enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
        u8 fd_en;
+       bool gro_en;
 
        u16 wanted_umv_size;
        /* max available unicast mac vlan space */
index bd19a2d..d9ddb0a 100644 (file)
@@ -507,12 +507,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
 
 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
 {
+       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+       /* wait to ensure that the firmware completes the possible left
+        * over commands.
+        */
+       msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
        spin_lock(&hdev->hw.cmq.crq.lock);
-       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
        hclgevf_cmd_uninit_regs(&hdev->hw);
        spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
        hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
        hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
 }
index 202feb7..5b82177 100644 (file)
@@ -8,6 +8,7 @@
 #include "hnae3.h"
 
 #define HCLGEVF_CMDQ_TX_TIMEOUT                30000
+#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME   200
 #define HCLGEVF_CMDQ_RX_INVLD_B                0
 #define HCLGEVF_CMDQ_RX_OUTVLD_B       1
 
index 8784d61..9386547 100644 (file)
@@ -506,10 +506,10 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
        link_state =
                test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
        if (link_state != hdev->hw.mac.link) {
+               hdev->hw.mac.link = link_state;
                client->ops->link_status_change(handle, !!link_state);
                if (rclient && rclient->ops->link_status_change)
                        rclient->ops->link_status_change(rhandle, !!link_state);
-               hdev->hw.mac.link = link_state;
        }
 
        clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
@@ -2487,6 +2487,8 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
 {
        int ret;
 
+       hdev->gro_en = true;
+
        ret = hclgevf_get_basic_info(hdev);
        if (ret)
                return ret;
@@ -2549,7 +2551,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
        return 0;
 }
 
-static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+static int hclgevf_config_gro(struct hclgevf_dev *hdev)
 {
        struct hclgevf_cfg_gro_status_cmd *req;
        struct hclgevf_desc desc;
@@ -2562,7 +2564,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
                                     false);
        req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
 
-       req->gro_en = en ? 1 : 0;
+       req->gro_en = hdev->gro_en ? 1 : 0;
 
        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -3308,7 +3310,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
                return ret;
        }
 
-       ret = hclgevf_config_gro(hdev, true);
+       ret = hclgevf_config_gro(hdev);
        if (ret)
                return ret;
 
@@ -3389,7 +3391,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
        if (ret)
                goto err_config;
 
-       ret = hclgevf_config_gro(hdev, true);
+       ret = hclgevf_config_gro(hdev);
        if (ret)
                goto err_config;
 
@@ -3638,8 +3640,15 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+       bool gro_en_old = hdev->gro_en;
+       int ret;
 
-       return hclgevf_config_gro(hdev, enable);
+       hdev->gro_en = enable;
+       ret = hclgevf_config_gro(hdev);
+       if (ret)
+               hdev->gro_en = gro_en_old;
+
+       return ret;
 }
 
 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
index d7d0284..e8013be 100644 (file)
@@ -310,6 +310,8 @@ struct hclgevf_dev {
        u16 *vector_status;
        int *vector_irq;
 
+       bool gro_en;
+
        unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
 
        struct hclgevf_mac_table_cfg mac_table;
index 772b2f8..b339b9b 100644 (file)
@@ -323,8 +323,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                        flag = (u8)msg_q[5];
 
                        /* update upper layer with new link link status */
-                       hclgevf_update_link_status(hdev, link_status);
                        hclgevf_update_speed_duplex(hdev, speed, duplex);
+                       hclgevf_update_link_status(hdev, link_status);
 
                        if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
                                set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
index cf7b388..a80336c 100644 (file)
@@ -1006,6 +1006,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
 {
        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+       u16 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
+       u16 lat_enc_d = 0;      /* latency decoded */
        u16 lat_enc = 0;        /* latency encoded */
 
        if (link) {
@@ -1059,7 +1061,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
                                     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
                max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
 
-               if (lat_enc > max_ltr_enc)
+               lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
+                            (1U << (E1000_LTRV_SCALE_FACTOR *
+                            ((lat_enc & E1000_LTRV_SCALE_MASK)
+                            >> E1000_LTRV_SCALE_SHIFT)));
+
+               max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
+                                (1U << (E1000_LTRV_SCALE_FACTOR *
+                                ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
+                                >> E1000_LTRV_SCALE_SHIFT)));
+
+               if (lat_enc_d > max_ltr_enc_d)
                        lat_enc = max_ltr_enc;
        }
 
@@ -4115,13 +4127,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
                return ret_val;
 
        if (!(data & valid_csum_mask)) {
-               data |= valid_csum_mask;
-               ret_val = e1000_write_nvm(hw, word, 1, &data);
-               if (ret_val)
-                       return ret_val;
-               ret_val = e1000e_update_nvm_checksum(hw);
-               if (ret_val)
-                       return ret_val;
+               e_dbg("NVM Checksum Invalid\n");
+
+               if (hw->mac.type < e1000_pch_cnp) {
+                       data |= valid_csum_mask;
+                       ret_val = e1000_write_nvm(hw, word, 1, &data);
+                       if (ret_val)
+                               return ret_val;
+                       ret_val = e1000e_update_nvm_checksum(hw);
+                       if (ret_val)
+                               return ret_val;
+               }
        }
 
        return e1000e_validate_nvm_checksum_generic(hw);
index 1502895..e757896 100644 (file)
 
 /* Latency Tolerance Reporting */
 #define E1000_LTRV                     0x000F8
+#define E1000_LTRV_VALUE_MASK          0x000003FF
 #define E1000_LTRV_SCALE_MAX           5
 #define E1000_LTRV_SCALE_FACTOR                5
+#define E1000_LTRV_SCALE_SHIFT         10
+#define E1000_LTRV_SCALE_MASK          0x00001C00
 #define E1000_LTRV_REQ_SHIFT           15
 #define E1000_LTRV_NOSNOOP_SHIFT       16
 #define E1000_LTRV_SEND                        (1 << 30)
index 3f25bd8..10a83e5 100644 (file)
@@ -3663,8 +3663,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
 
        /* is DCB enabled at all? */
        if (vsi->tc_config.numtc == 1)
-               return i40e_swdcb_skb_tx_hash(netdev, skb,
-                                             netdev->real_num_tx_queues);
+               return netdev_pick_tx(netdev, skb, sb_dev);
 
        prio = skb->priority;
        hw = &vsi->back->hw;
index e8bd041..90793b3 100644 (file)
@@ -136,6 +136,7 @@ struct iavf_q_vector {
 struct iavf_mac_filter {
        struct list_head list;
        u8 macaddr[ETH_ALEN];
+       bool is_new_mac;        /* filter is new, wait for PF decision */
        bool remove;            /* filter needs to be removed */
        bool add;               /* filter needs to be added */
 };
index 44bafed..606a01c 100644 (file)
@@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
 
                list_add_tail(&f->list, &adapter->mac_filter_list);
                f->add = true;
+               f->is_new_mac = true;
                adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
        } else {
                f->remove = false;
@@ -1506,11 +1507,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
 
        iavf_map_rings_to_vectors(adapter);
-
-       if (RSS_AQ(adapter))
-               adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
-       else
-               err = iavf_init_rss(adapter);
 err:
        return err;
 }
@@ -2200,6 +2196,14 @@ continue_reset:
                        goto reset_err;
        }
 
+       if (RSS_AQ(adapter)) {
+               adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+       } else {
+               err = iavf_init_rss(adapter);
+               if (err)
+                       goto reset_err;
+       }
+
        adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
        adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
 
index 0eab3c4..3c73596 100644 (file)
@@ -540,6 +540,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
        kfree(veal);
 }
 
+/**
+ * iavf_mac_add_ok
+ * @adapter: adapter structure
+ *
+ * Submit list of filters based on PF response.
+ **/
+static void iavf_mac_add_ok(struct iavf_adapter *adapter)
+{
+       struct iavf_mac_filter *f, *ftmp;
+
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               f->is_new_mac = false;
+       }
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_mac_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove filters from list based on PF response.
+ **/
+static void iavf_mac_add_reject(struct iavf_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct iavf_mac_filter *f, *ftmp;
+
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
+                       f->remove = false;
+
+               if (f->is_new_mac) {
+                       list_del(&f->list);
+                       kfree(f);
+               }
+       }
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
 /**
  * iavf_add_vlans
  * @adapter: adapter structure
@@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                case VIRTCHNL_OP_ADD_ETH_ADDR:
                        dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
                                iavf_stat_str(&adapter->hw, v_retval));
+                       iavf_mac_add_reject(adapter);
                        /* restore administratively set MAC address */
                        ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
                        break;
@@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                }
        }
        switch (v_opcode) {
-       case VIRTCHNL_OP_ADD_ETH_ADDR: {
+       case VIRTCHNL_OP_ADD_ETH_ADDR:
+               if (!v_retval)
+                       iavf_mac_add_ok(adapter);
                if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
                        ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
-               }
                break;
        case VIRTCHNL_OP_GET_STATS: {
                struct iavf_eth_stats *stats =
index a450343..eadcb99 100644 (file)
@@ -234,6 +234,7 @@ enum ice_pf_state {
        ICE_VFLR_EVENT_PENDING,
        ICE_FLTR_OVERFLOW_PROMISC,
        ICE_VF_DIS,
+       ICE_VF_DEINIT_IN_PROGRESS,
        ICE_CFG_BUSY,
        ICE_SERVICE_SCHED,
        ICE_SERVICE_DIS,
index 91b545a..7fe6e8e 100644 (file)
@@ -42,7 +42,9 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
 
        status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
        if (status)
-               return -EIO;
+               /* We failed to locate the PBA, so just skip this entry */
+               dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
+                       ice_stat_str(status));
 
        return 0;
 }
index ef8d181..fe2ded7 100644 (file)
@@ -191,6 +191,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
 
+       /* Under some circumstances, we might receive a request to delete our
+        * own device address from our uc list. Because we store the device
+        * address in the VSI's MAC filter list, we need to ignore such
+        * requests and not delete our device address from this list.
+        */
+       if (ether_addr_equal(addr, netdev->dev_addr))
+               return 0;
+
        if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
                                     ICE_FWD_TO_VSI))
                return -EINVAL;
@@ -4194,6 +4202,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        struct ice_hw *hw;
        int i, err;
 
+       if (pdev->is_virtfn) {
+               dev_err(dev, "can't probe a virtual function\n");
+               return -EINVAL;
+       }
+
        /* this driver uses devres, see
         * Documentation/driver-api/driver-model/devres.rst
         */
@@ -5119,7 +5132,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                return -EADDRNOTAVAIL;
 
        if (ether_addr_equal(netdev->dev_addr, mac)) {
-               netdev_warn(netdev, "already using mac %pM\n", mac);
+               netdev_dbg(netdev, "already using mac %pM\n", mac);
                return 0;
        }
 
@@ -5130,6 +5143,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                return -EBUSY;
        }
 
+       netif_addr_lock_bh(netdev);
        /* Clean up old MAC filter. Not an error if old filter doesn't exist */
        status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
        if (status && status != ICE_ERR_DOES_NOT_EXIST) {
@@ -5139,30 +5153,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
 
        /* Add filter for new MAC. If filter exists, return success */
        status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
-       if (status == ICE_ERR_ALREADY_EXISTS) {
+       if (status == ICE_ERR_ALREADY_EXISTS)
                /* Although this MAC filter is already present in hardware it's
                 * possible in some cases (e.g. bonding) that dev_addr was
                 * modified outside of the driver and needs to be restored back
                 * to this value.
                 */
-               memcpy(netdev->dev_addr, mac, netdev->addr_len);
                netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
-               return 0;
-       }
-
-       /* error if the new filter addition failed */
-       if (status)
+       else if (status)
+               /* error if the new filter addition failed */
                err = -EADDRNOTAVAIL;
 
 err_update_filters:
        if (err) {
                netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
                           mac);
+               netif_addr_unlock_bh(netdev);
                return err;
        }
 
        /* change the netdev's MAC address */
        memcpy(netdev->dev_addr, mac, netdev->addr_len);
+       netif_addr_unlock_bh(netdev);
        netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
                   netdev->dev_addr);
 
index 5d5207b..9e3ddb9 100644 (file)
@@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
         * maintaining phase
         */
        if (start_time < current_time)
-               start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+               start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
                                       NSEC_PER_SEC) * NSEC_PER_SEC + phase;
 
        start_time -= E810_OUT_PROP_DELAY_NS;
index 2826570..e93430a 100644 (file)
@@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf)
        struct ice_hw *hw = &pf->hw;
        unsigned int tmp, i;
 
+       set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
+
        if (!pf->vf)
                return;
 
@@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf)
                                i);
 
        clear_bit(ICE_VF_DIS, pf->state);
+       clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 }
 
@@ -4415,6 +4418,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
        struct device *dev;
        int err = 0;
 
+       /* if de-init is underway, don't process messages from VF */
+       if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
+               return;
+
        dev = ice_pf_to_dev(pf);
        if (ice_validate_vf_id(pf, vf_id)) {
                err = -EINVAL;
index e29aadb..ed2d66b 100644 (file)
@@ -149,6 +149,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter)
        struct igc_hw *hw = &adapter->hw;
        u32 ctrl_ext;
 
+       if (!pci_device_is_present(adapter->pdev))
+               return;
+
        /* Let firmware take over control of h/w */
        ctrl_ext = rd32(IGC_CTRL_EXT);
        wr32(IGC_CTRL_EXT,
@@ -4449,26 +4452,29 @@ void igc_down(struct igc_adapter *adapter)
 
        igc_ptp_suspend(adapter);
 
-       /* disable receives in the hardware */
-       rctl = rd32(IGC_RCTL);
-       wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
-       /* flush and sleep below */
-
+       if (pci_device_is_present(adapter->pdev)) {
+               /* disable receives in the hardware */
+               rctl = rd32(IGC_RCTL);
+               wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
+               /* flush and sleep below */
+       }
        /* set trans_start so we don't get spurious watchdogs during reset */
        netif_trans_update(netdev);
 
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
-       /* disable transmits in the hardware */
-       tctl = rd32(IGC_TCTL);
-       tctl &= ~IGC_TCTL_EN;
-       wr32(IGC_TCTL, tctl);
-       /* flush both disables and wait for them to finish */
-       wrfl();
-       usleep_range(10000, 20000);
+       if (pci_device_is_present(adapter->pdev)) {
+               /* disable transmits in the hardware */
+               tctl = rd32(IGC_TCTL);
+               tctl &= ~IGC_TCTL_EN;
+               wr32(IGC_TCTL, tctl);
+               /* flush both disables and wait for them to finish */
+               wrfl();
+               usleep_range(10000, 20000);
 
-       igc_irq_disable(adapter);
+               igc_irq_disable(adapter);
+       }
 
        adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
 
@@ -5489,7 +5495,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
                if (e->command != TC_TAPRIO_CMD_SET_GATES)
                        return false;
 
-               for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
                        if (e->gate_mask & BIT(i))
                                queue_uses[i]++;
 
@@ -5546,7 +5552,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
 
                end_time += e->interval;
 
-               for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
                        struct igc_ring *ring = adapter->tx_ring[i];
 
                        if (!(e->gate_mask & BIT(i)))
index 69617d2..4ae19c6 100644 (file)
@@ -849,7 +849,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
        adapter->ptp_tx_skb = NULL;
        clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
 
-       igc_ptp_time_save(adapter);
+       if (pci_device_is_present(adapter->pdev))
+               igc_ptp_time_save(adapter);
 }
 
 /**
index 96dd1a4..b1d22e4 100644 (file)
@@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
 
                /* Kick start the NAPI context so that receiving will start */
                err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
-               if (err)
+               if (err) {
+                       clear_bit(qid, adapter->af_xdp_zc_qps);
+                       xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
                        return err;
+               }
        }
 
        return 0;
index 76a7777..de32e5b 100644 (file)
 #define        MVNETA_VLAN_PRIO_TO_RXQ                  0x2440
 #define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
 #define MVNETA_PORT_STATUS                       0x2444
-#define      MVNETA_TX_IN_PRGRS                  BIT(1)
+#define      MVNETA_TX_IN_PRGRS                  BIT(0)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 /* Only exists on Armada XP and Armada 370 */
index b9fbc9f..cf8acab 100644 (file)
@@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format {
 #define MVPP2_BM_COOKIE_POOL_OFFS      8
 #define MVPP2_BM_COOKIE_CPU_OFFS       24
 
-#define MVPP2_BM_SHORT_FRAME_SIZE      704     /* frame size 128 */
+#define MVPP2_BM_SHORT_FRAME_SIZE      736     /* frame size 128 */
 #define MVPP2_BM_LONG_FRAME_SIZE       2240    /* frame size 1664 */
 #define MVPP2_BM_JUMBO_FRAME_SIZE      10432   /* frame size 9856 */
 /* BM short pool packet size
index d12e21d..fa7a068 100644 (file)
@@ -530,6 +530,8 @@ err_trap_register:
                prestera_trap = &prestera_trap_items_arr[i];
                devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
        }
+       devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
+                                      groups_count);
 err_groups_register:
        kfree(trap_data->trap_items_arr);
 err_trap_items_alloc:
index 0b3e8f2..9a30916 100644 (file)
@@ -748,7 +748,7 @@ static void
 prestera_fdb_offload_notify(struct prestera_port *port,
                            struct switchdev_notifier_fdb_info *info)
 {
-       struct switchdev_notifier_fdb_info send_info;
+       struct switchdev_notifier_fdb_info send_info = {};
 
        send_info.addr = info->addr;
        send_info.vid = info->vid;
@@ -1123,7 +1123,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
 static void prestera_fdb_event(struct prestera_switch *sw,
                               struct prestera_event *evt, void *arg)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        struct net_device *dev = NULL;
        struct prestera_port *port;
        struct prestera_lag *lag;
index df3e493..360e093 100644 (file)
@@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                              cq->cqn);
 
        cq->uar = dev->priv.uar;
+       cq->irqn = eq->core.irqn;
 
        return 0;
 
index 01a1d02..3f8a980 100644 (file)
@@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
        MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
        mlx5_eq_notifier_register(dev, &tracer->nb);
 
-       mlx5_fw_tracer_start(tracer);
-
+       err = mlx5_fw_tracer_start(tracer);
+       if (err) {
+               mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
+               goto err_notifier_unregister;
+       }
        return 0;
 
+err_notifier_unregister:
+       mlx5_eq_notifier_unregister(dev, &tracer->nb);
+       mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
 err_dealloc_pd:
        mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+       cancel_work_sync(&tracer->read_fw_strings_work);
        return err;
 }
 
index 8f79f04..1e2d117 100644 (file)
@@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
+       if (rt->rt_type != RTN_UNICAST) {
+               ret = -ENETUNREACH;
+               goto err_rt_release;
+       }
+
        if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
                ret = -ENETUNREACH;
                goto err_rt_release;
index 37c4408..24f919e 100644 (file)
@@ -1535,15 +1535,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_core_cq *mcq = &cq->mcq;
-       int eqn_not_used;
-       unsigned int irqn;
        int err;
        u32 i;
 
-       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-       if (err)
-               return err;
-
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
                               &cq->wq_ctrl);
        if (err)
@@ -1557,7 +1551,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
        mcq->vector     = param->eq_ix;
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
-       mcq->irqn       = irqn;
 
        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1605,11 +1598,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        void *in;
        void *cqc;
        int inlen;
-       unsigned int irqn_not_used;
        int eqn;
        int err;
 
-       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
        if (err)
                return err;
 
@@ -1891,30 +1883,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        if (err)
                goto err_close_icosq;
 
+       err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+       if (err)
+               goto err_close_sqs;
+
        if (c->xdp) {
                err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
                                       &c->rq_xdpsq, false);
                if (err)
-                       goto err_close_sqs;
+                       goto err_close_rq;
        }
 
-       err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
-       if (err)
-               goto err_close_xdp_sq;
-
        err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
        if (err)
-               goto err_close_rq;
+               goto err_close_xdp_sq;
 
        return 0;
 
-err_close_rq:
-       mlx5e_close_rq(&c->rq);
-
 err_close_xdp_sq:
        if (c->xdp)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
 
+err_close_rq:
+       mlx5e_close_rq(&c->rq);
+
 err_close_sqs:
        mlx5e_close_sqs(c);
 
@@ -1949,9 +1941,9 @@ err_close_async_icosq_cq:
 static void mlx5e_close_queues(struct mlx5e_channel *c)
 {
        mlx5e_close_xdpsq(&c->xdpsq);
-       mlx5e_close_rq(&c->rq);
        if (c->xdp)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
+       mlx5e_close_rq(&c->rq);
        mlx5e_close_sqs(c);
        mlx5e_close_icosq(&c->icosq);
        mlx5e_close_icosq(&c->async_icosq);
@@ -1983,9 +1975,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        struct mlx5e_channel *c;
        unsigned int irq;
        int err;
-       int eqn;
 
-       err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+       err = mlx5_vector2irqn(priv->mdev, ix, &irq);
        if (err)
                return err;
 
index 6e074cc..605c8ec 100644 (file)
@@ -855,8 +855,8 @@ clean:
        return err;
 }
 
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-                   unsigned int *irqn)
+static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+                         unsigned int *irqn)
 {
        struct mlx5_eq_table *table = dev->priv.eq_table;
        struct mlx5_eq_comp *eq, *n;
@@ -865,8 +865,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 
        list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
                if (i++ == vector) {
-                       *eqn = eq->core.eqn;
-                       *irqn = eq->core.irqn;
+                       if (irqn)
+                               *irqn = eq->core.irqn;
+                       if (eqn)
+                               *eqn = eq->core.eqn;
                        err = 0;
                        break;
                }
@@ -874,8 +876,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 
        return err;
 }
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
+{
+       return vector2eqnirqn(dev, vector, eqn, NULL);
+}
 EXPORT_SYMBOL(mlx5_vector2eqn);
 
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+{
+       return vector2eqnirqn(dev, vector, NULL, irqn);
+}
+
 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
 {
        return dev->priv.eq_table->num_comp_eqs;
index a6e1d4f..69a3630 100644 (file)
@@ -69,7 +69,7 @@ static void
 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
                                   unsigned long val)
 {
-       struct switchdev_notifier_fdb_info send_info;
+       struct switchdev_notifier_fdb_info send_info = {};
 
        send_info.addr = addr;
        send_info.vid = vid;
@@ -579,7 +579,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
        xa_init(&bridge->vports);
        bridge->ifindex = ifindex;
        bridge->refcnt = 1;
-       bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
+       bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
        list_add(&bridge->list, &br_offloads->bridges);
 
        return bridge;
@@ -1006,7 +1006,7 @@ int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswit
        if (!vport->bridge)
                return -EINVAL;
 
-       vport->bridge->ageing_time = ageing_time;
+       vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
        return 0;
 }
 
index 794012c..d3ad78a 100644 (file)
@@ -501,6 +501,7 @@ err_sampler:
 err_offload_rule:
        mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
 err_default_tbl:
+       kfree(sample_flow);
        return ERR_PTR(err);
 }
 
index 011e766..3bb71a1 100644 (file)
@@ -48,6 +48,7 @@
 #include "lib/fs_chains.h"
 #include "en_tc.h"
 #include "en/mapping.h"
+#include "devlink.h"
 
 #define mlx5_esw_for_each_rep(esw, i, rep) \
        xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -3001,12 +3002,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
        if (cur_mlx5_mode == mlx5_mode)
                goto unlock;
 
-       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+               if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Can't change mode while devlink traps are active");
+                       err = -EOPNOTSUPP;
+                       goto unlock;
+               }
                err = esw_offloads_start(esw, extack);
-       else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+       } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
                err = esw_offloads_stop(esw, extack);
-       else
+       } else {
                err = -EINVAL;
+       }
 
 unlock:
        mlx5_esw_unlock(esw);
index bd66ab2..d5da4ab 100644 (file)
@@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        struct mlx5_wq_param wqp;
        struct mlx5_cqe64 *cqe;
        int inlen, err, eqn;
-       unsigned int irqn;
        void *cqc, *in;
        __be64 *pas;
        u32 i;
@@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
                goto err_cqwq;
        }
 
-       err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
        if (err) {
                kvfree(in);
                goto err_cqwq;
@@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        *conn->cq.mcq.arm_db    = 0;
        conn->cq.mcq.vector     = 0;
        conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
-       conn->cq.mcq.irqn       = irqn;
        conn->cq.mcq.uar        = fdev->conn_res.uar;
        tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
 
index 624cede..d3d628b 100644 (file)
@@ -104,4 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
 #endif
 
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+
 #endif
index eb1b316..c84ad87 100644 (file)
@@ -1784,16 +1784,14 @@ static int __init init(void)
        if (err)
                goto err_sf;
 
-#ifdef CONFIG_MLX5_CORE_EN
        err = mlx5e_init();
-       if (err) {
-               pci_unregister_driver(&mlx5_core_driver);
-               goto err_debug;
-       }
-#endif
+       if (err)
+               goto err_en;
 
        return 0;
 
+err_en:
+       mlx5_sf_driver_unregister();
 err_sf:
        pci_unregister_driver(&mlx5_core_driver);
 err_debug:
@@ -1803,9 +1801,7 @@ err_debug:
 
 static void __exit cleanup(void)
 {
-#ifdef CONFIG_MLX5_CORE_EN
        mlx5e_cleanup();
-#endif
        mlx5_sf_driver_unregister();
        pci_unregister_driver(&mlx5_core_driver);
        mlx5_unregister_debugfs();
index 343807a..da365b8 100644 (file)
@@ -206,8 +206,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
 int mlx5_fw_version_query(struct mlx5_core_dev *dev,
                          u32 *running_ver, u32 *stored_ver);
 
+#ifdef CONFIG_MLX5_CORE_EN
 int mlx5e_init(void);
 void mlx5e_cleanup(void);
+#else
+static inline int mlx5e_init(void){ return 0; }
+static inline void mlx5e_cleanup(void){}
+#endif
 
 static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
 {
index b25f764..3465b36 100644 (file)
@@ -214,6 +214,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
                err = -ENOMEM;
                goto err_cpumask;
        }
+       irq->pool = pool;
        kref_init(&irq->kref);
        irq->index = i;
        err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
@@ -222,7 +223,6 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
                              irq->index, err);
                goto err_xa;
        }
-       irq->pool = pool;
        return irq;
 err_xa:
        free_cpumask_var(irq->mask);
@@ -251,8 +251,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
 
 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
 {
+       int err = 0;
+
+       err = atomic_notifier_chain_unregister(&irq->nh, nb);
        irq_put(irq);
-       return atomic_notifier_chain_unregister(&irq->nh, nb);
+       return err;
 }
 
 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
@@ -437,6 +440,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
        if (!pool)
                return ERR_PTR(-ENOMEM);
        pool->dev = dev;
+       mutex_init(&pool->lock);
        xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
        pool->xa_num_irqs.min = start;
        pool->xa_num_irqs.max = start + size - 1;
@@ -445,7 +449,6 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
                         name);
        pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
        pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
-       mutex_init(&pool->lock);
        mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
                      name, size, start);
        return pool;
@@ -459,6 +462,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
        xa_for_each(&pool->irqs, index, irq)
                irq_release(&irq->kref);
        xa_destroy(&pool->irqs);
+       mutex_destroy(&pool->lock);
        kvfree(pool);
 }
 
index 12cf323..9df0e73 100644 (file)
@@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        struct mlx5_cqe64 *cqe;
        struct mlx5dr_cq *cq;
        int inlen, err, eqn;
-       unsigned int irqn;
        void *cqc, *in;
        __be64 *pas;
        int vector;
@@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
                goto err_cqwq;
 
        vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
-       err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, vector, &eqn);
        if (err) {
                kvfree(in);
                goto err_cqwq;
@@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        *cq->mcq.arm_db = cpu_to_be32(2 << 28);
 
        cq->mcq.vector = 0;
-       cq->mcq.irqn = irqn;
        cq->mcq.uar = uar;
 
        return cq;
index f1950e4..e4dd4ee 100644 (file)
@@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
 {
        MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
                 DR_STE_TUNL_ACTION_DECAP);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
 }
 
 static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
@@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
        MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
                 DR_STE_TUNL_ACTION_L3_DECAP);
        MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
 }
 
 static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
index 7e221ef..f69cbb3 100644 (file)
@@ -9079,7 +9079,7 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
 
 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        struct net_device *dev;
 
        dev = br_fdb_find_port(rif->dev, mac, 0);
@@ -9127,8 +9127,8 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
 
 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
 {
+       struct switchdev_notifier_fdb_info info = {};
        u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-       struct switchdev_notifier_fdb_info info;
        struct net_device *br_dev;
        struct net_device *dev;
 
index c5ef9aa..8f90cd3 100644 (file)
@@ -2508,7 +2508,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
                            const char *mac, u16 vid,
                            struct net_device *dev, bool offloaded)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = mac;
        info.vid = vid;
index 0443f66..9a8e4f2 100644 (file)
@@ -277,7 +277,7 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
                                      const char *mac, u16 vid,
                                      struct net_device *dev, bool offloaded)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = mac;
        info.vid = vid;
index 9d485a9..cb68eaa 100644 (file)
  */
 #define VSTAX 73
 
-static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+#define ifh_encode_bitfield(ifh, value, pos, _width)                   \
+       ({                                                              \
+               u32 width = (_width);                                   \
+                                                                       \
+               /* Max width is 5 bytes - 40 bits. In worst case this will
+                * spread over 6 bytes - 48 bits
+                */                                                     \
+               compiletime_assert(width <= 40,                         \
+                                  "Unsupported width, must be <= 40"); \
+               __ifh_encode_bitfield((ifh), (value), (pos), width);    \
+       })
+
+static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
 {
        u8 *ifh_hdr = ifh;
        /* Calculate the Start IFH byte position of this IFH bit position */
        u32 byte = (35 - (pos / 8));
        /* Calculate the Start bit position in the Start IFH byte */
        u32 bit  = (pos % 8);
-       u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
-
-       /* Max width is 5 bytes - 40 bits. In worst case this will
-        * spread over 6 bytes - 48 bits
-        */
-       compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+       u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
 
        /* The b0-b7 goes into the start IFH byte */
        if (encode & 0xFF)
index adfb978..2948d73 100644 (file)
@@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
                        struct net_device *bond = ocelot_port->bond;
 
                        mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+                       mask |= cpu_fwd_mask;
                        mask &= ~BIT(port);
                        if (bond) {
                                mask &= ~ocelot_get_bond_mask(ocelot, bond,
index ea4e834..7390fa3 100644 (file)
@@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
                    ocelot->map[target][reg & REG_MASK] + offset, &val);
        return val;
 }
-EXPORT_SYMBOL(__ocelot_read_ix);
+EXPORT_SYMBOL_GPL(__ocelot_read_ix);
 
 void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
 {
@@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
        regmap_write(ocelot->targets[target],
                     ocelot->map[target][reg & REG_MASK] + offset, val);
 }
-EXPORT_SYMBOL(__ocelot_write_ix);
+EXPORT_SYMBOL_GPL(__ocelot_write_ix);
 
 void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
                     u32 offset)
@@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
                           ocelot->map[target][reg & REG_MASK] + offset,
                           mask, val);
 }
-EXPORT_SYMBOL(__ocelot_rmw_ix);
+EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
 
 u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
 {
@@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
        regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
        return val;
 }
-EXPORT_SYMBOL(ocelot_port_readl);
+EXPORT_SYMBOL_GPL(ocelot_port_readl);
 
 void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 {
@@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 
        regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
 }
-EXPORT_SYMBOL(ocelot_port_writel);
+EXPORT_SYMBOL_GPL(ocelot_port_writel);
 
 void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
 {
@@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
 
        ocelot_port_writel(port, (cur & (~mask)) | val, reg);
 }
-EXPORT_SYMBOL(ocelot_port_rmwl);
+EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
 
 u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
                            u32 reg, u32 offset)
@@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
 
        return 0;
 }
-EXPORT_SYMBOL(ocelot_regfields_init);
+EXPORT_SYMBOL_GPL(ocelot_regfields_init);
 
 static struct regmap_config ocelot_regmap_config = {
        .reg_bits       = 32,
@@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
 
        return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
 }
-EXPORT_SYMBOL(ocelot_regmap_init);
+EXPORT_SYMBOL_GPL(ocelot_regmap_init);
index 51b4b25..84f7dbe 100644 (file)
@@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
                printk(version);
 #endif
 
-       i = pci_enable_device(pdev);
+       i = pcim_enable_device(pdev);
        if (i) return i;
 
        /* natsemi has a non-standard PM control register
@@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        ioaddr = ioremap(iostart, iosize);
        if (!ioaddr) {
                i = -ENOMEM;
-               goto err_ioremap;
+               goto err_pci_request_regions;
        }
 
        /* Work around the dropped serial bit. */
@@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
  err_register_netdev:
        iounmap(ioaddr);
 
- err_ioremap:
-       pci_release_regions(pdev);
-
  err_pci_request_regions:
        free_netdev(dev);
        return i;
@@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
 
        NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
        unregister_netdev (dev);
-       pci_release_regions (pdev);
        iounmap(ioaddr);
        free_netdev (dev);
 }
index 82eef4c..7abd13e 100644 (file)
@@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 
        kfree(vdev->vpaths);
 
-       /* we are safe to free it now */
-       free_netdev(dev);
-
        vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
                        buf);
        vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
                             __func__, __LINE__);
+
+       /* we are safe to free it now */
+       free_netdev(dev);
 }
 
 /*
index 1b48244..8803faa 100644 (file)
@@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
 
        /* Init to unknowns */
        ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
        cmd->base.port = PORT_OTHER;
        cmd->base.speed = SPEED_UNKNOWN;
        cmd->base.duplex = DUPLEX_UNKNOWN;
index 02a4610..c46a7f7 100644 (file)
@@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        unsigned long flags;
        int rc = -EINVAL;
 
+       if (!p_ll2_conn)
+               return rc;
+
        spin_lock_irqsave(&p_tx->lock, flags);
        if (p_tx->b_completing_packet) {
                rc = -EBUSY;
@@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
        unsigned long flags = 0;
        int rc = 0;
 
+       if (!p_ll2_conn)
+               return rc;
+
        spin_lock_irqsave(&p_rx->lock, flags);
+
+       if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+               spin_unlock_irqrestore(&p_rx->lock, flags);
+               return 0;
+       }
+
        cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
        cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
 
@@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
        int rc;
 
+       if (!p_ll2_conn)
+               return 0;
+
        if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
                return 0;
 
@@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        u16 new_idx = 0, num_bds = 0;
        int rc;
 
+       if (!p_ll2_conn)
+               return 0;
+
        if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
                return 0;
 
@@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
        if (!p_ll2_conn)
                return -EINVAL;
        p_rx = &p_ll2_conn->rx_queue;
+       if (!p_rx->set_prod_addr)
+               return -EIO;
 
        spin_lock_irqsave(&p_rx->lock, flags);
        if (!list_empty(&p_rx->free_descq))
index 5bd58c6..6bb9ec9 100644 (file)
@@ -616,7 +616,12 @@ static int qed_enable_msix(struct qed_dev *cdev,
                        rc = cnt;
        }
 
-       if (rc > 0) {
+       /* For VFs, we should return with an error in case we didn't get the
+        * exact number of msix vectors as we requested.
+        * Not doing that will lead to a crash when starting queues for
+        * this VF.
+        */
+       if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
                /* MSI-x configuration was achieved */
                int_params->out.int_mode = QED_INT_MODE_MSIX;
                int_params->out.num_vectors = rc;
index da864d1..4f4b792 100644 (file)
@@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
 
        if (!rdma_cxt || !in_params || !out_params ||
            !p_hwfn->p_rdma_info->active) {
-               DP_ERR(p_hwfn->cdev,
-                      "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+               pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
                       rdma_cxt, in_params, out_params);
                return NULL;
        }
index 2e62a2c..5630008 100644 (file)
@@ -501,6 +501,7 @@ struct qede_fastpath {
 #define QEDE_SP_HW_ERR                  4
 #define QEDE_SP_ARFS_CONFIG             5
 #define QEDE_SP_AER                    7
+#define QEDE_SP_DISABLE                        8
 
 #ifdef CONFIG_RFS_ACCEL
 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
index 01ac1e9..1c7f9ed 100644 (file)
@@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work)
        struct qede_dev *edev = container_of(work, struct qede_dev,
                                             sp_task.work);
 
+       /* Disable execution of this deferred work once
+        * qede removal is in progress, this stop any future
+        * scheduling of sp_task.
+        */
+       if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+               return;
+
        /* The locking scheme depends on the specific flag:
         * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
         * ensure that ongoing flows are ended and new ones are not started.
@@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
        qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
 
        if (mode != QEDE_REMOVE_RECOVERY) {
+               set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
                unregister_netdev(ndev);
 
                cancel_delayed_work_sync(&edev->sp_task);
@@ -1866,6 +1874,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
        }
 
        edev->int_info.used_cnt = 0;
+       edev->int_info.msix_cnt = 0;
 }
 
 static int qede_req_msix_irqs(struct qede_dev *edev)
@@ -2419,7 +2428,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
        goto out;
 err4:
        qede_sync_free_irqs(edev);
-       memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
 err3:
        qede_napi_disable_remove(edev);
 err2:
index d8882d0..d51bac7 100644 (file)
@@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
 
                indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
                ret = QLCRD32(adapter, indirect_addr, &err);
-               if (err == -EIO)
+               if (err == -EIO) {
+                       qlcnic_83xx_unlock_flash(adapter);
                        return err;
+               }
 
                word = ret;
                *(u32 *)p_data  = word;
index c7af5bc..4d8e337 100644 (file)
@@ -3502,12 +3502,16 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
 
+       /* The default value is 0x13. Change it to 0x2f */
+       rtl_csi_access_enable(tp, 0x2f);
+
        rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
 
        /* disable EEE */
        rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
 
        rtl_pcie_state_l2l3_disable(tp);
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
index a466336..1f06b92 100644 (file)
@@ -2715,7 +2715,7 @@ static void
 rocker_fdb_offload_notify(struct rocker_port *rocker_port,
                          struct switchdev_notifier_fdb_info *recv_info)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = recv_info->addr;
        info.vid = recv_info->vid;
index 967a634..e33a9d2 100644 (file)
@@ -1822,7 +1822,7 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
                container_of(work, struct ofdpa_fdb_learn_work, work);
        bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
        bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = lw->addr;
        info.vid = lw->vid;
index 280ac01..ed81701 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/delay.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
-#include <linux/pm_runtime.h>
 
 #include "stmmac_platform.h"
 
@@ -1529,9 +1528,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
                return ret;
        }
 
-       pm_runtime_enable(dev);
-       pm_runtime_get_sync(dev);
-
        if (bsp_priv->integrated_phy)
                rk_gmac_integrated_phy_powerup(bsp_priv);
 
@@ -1540,14 +1536,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
 
 static void rk_gmac_powerdown(struct rk_priv_data *gmac)
 {
-       struct device *dev = &gmac->pdev->dev;
-
        if (gmac->integrated_phy)
                rk_gmac_integrated_phy_powerdown(gmac);
 
-       pm_runtime_put_sync(dev);
-       pm_runtime_disable(dev);
-
        phy_power_on(gmac, false);
        gmac_clk_enable(gmac, false);
 }
index fcdb1d2..43eead7 100644 (file)
@@ -339,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
 static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
 {
        if (stmmac_xdp_is_enabled(priv))
-               return XDP_PACKET_HEADROOM + NET_IP_ALIGN;
+               return XDP_PACKET_HEADROOM;
 
-       return NET_SKB_PAD + NET_IP_ALIGN;
+       return 0;
 }
 
 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
index 7b8404a..fa90bcd 100644 (file)
@@ -4914,6 +4914,10 @@ read_again:
 
                prefetch(np);
 
+               /* Ensure a valid XSK buffer before proceed */
+               if (!buf->xdp)
+                       break;
+
                if (priv->extend_desc)
                        stmmac_rx_extended_status(priv, &priv->dev->stats,
                                                  &priv->xstats,
@@ -4934,10 +4938,6 @@ read_again:
                        continue;
                }
 
-               /* Ensure a valid XSK buffer before proceed */
-               if (!buf->xdp)
-                       break;
-
                /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
                if (likely(status & rx_not_ls)) {
                        xsk_buff_free(buf->xdp);
index 4f3b643..8160087 100644 (file)
@@ -884,11 +884,13 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
        return 0;
 
 disable:
-       mutex_lock(&priv->plat->est->lock);
-       priv->plat->est->enable = false;
-       stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
-                            priv->plat->clk_ptp_rate);
-       mutex_unlock(&priv->plat->est->lock);
+       if (priv->plat->est) {
+               mutex_lock(&priv->plat->est->lock);
+               priv->plat->est->enable = false;
+               stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+                                    priv->plat->clk_ptp_rate);
+               mutex_unlock(&priv->plat->est->lock);
+       }
 
        priv->plat->fpe_cfg->enable = false;
        stmmac_fpe_configure(priv, priv->ioaddr,
index 105821b..2a616c6 100644 (file)
@@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
        need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
 
        if (need_update) {
-               stmmac_disable_rx_queue(priv, queue);
-               stmmac_disable_tx_queue(priv, queue);
                napi_disable(&ch->rx_napi);
                napi_disable(&ch->tx_napi);
+               stmmac_disable_rx_queue(priv, queue);
+               stmmac_disable_tx_queue(priv, queue);
        }
 
        set_bit(queue, priv->af_xdp_zc_qps);
 
        if (need_update) {
-               napi_enable(&ch->rxtx_napi);
                stmmac_enable_rx_queue(priv, queue);
                stmmac_enable_tx_queue(priv, queue);
+               napi_enable(&ch->rxtx_napi);
 
                err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
                if (err)
@@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
        need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
 
        if (need_update) {
+               napi_disable(&ch->rxtx_napi);
                stmmac_disable_rx_queue(priv, queue);
                stmmac_disable_tx_queue(priv, queue);
                synchronize_rcu();
-               napi_disable(&ch->rxtx_napi);
        }
 
        xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
@@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
        clear_bit(queue, priv->af_xdp_zc_qps);
 
        if (need_update) {
-               napi_enable(&ch->rx_napi);
-               napi_enable(&ch->tx_napi);
                stmmac_enable_rx_queue(priv, queue);
                stmmac_enable_tx_queue(priv, queue);
+               napi_enable(&ch->rx_napi);
+               napi_enable(&ch->tx_napi);
        }
 
        return 0;
index 718539c..67a08cb 100644 (file)
@@ -2060,8 +2060,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm
 
        for (i = 1; i <= common->port_num; i++) {
                struct am65_cpsw_port *port = am65_common_get_port(common, i);
-               struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+               struct am65_cpsw_ndev_priv *priv;
 
+               if (!port->ndev)
+                       continue;
+
+               priv = am65_ndev_to_priv(port->ndev);
                priv->offload_fwd_mark = set_val;
        }
 }
index 9c29b36..599708a 100644 (file)
@@ -358,7 +358,7 @@ static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
 static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
                                         struct switchdev_notifier_fdb_info *rcv)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = rcv->addr;
        info.vid = rcv->vid;
index 57d279f..d1d0200 100644 (file)
@@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
        struct cpdma_chan *txch;
        int ret, q_idx;
 
-       if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+       if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
                cpsw_err(priv, tx_err, "packet pad failed\n");
                ndev->stats.tx_dropped++;
                return NET_XMIT_DROP;
@@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
 
        for (i = 0; i < n; i++) {
                xdpf = frames[i];
-               if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+               if (xdpf->len < READ_ONCE(priv->tx_packet_min))
                        break;
 
                if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
@@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
                priv->dev  = dev;
                priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
                priv->emac_port = i + 1;
+               priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
 
                if (is_valid_ether_addr(slave_data->mac_addr)) {
                        ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
@@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
                        priv = netdev_priv(sl_ndev);
                        slave->port_vlan = vlan;
+                       WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
                        if (netif_running(sl_ndev))
                                cpsw_port_add_switch_def_ale_entries(priv,
                                                                     slave);
@@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
                        priv = netdev_priv(slave->ndev);
                        slave->port_vlan = slave->data->dual_emac_res_vlan;
+                       WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
                        cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
                }
 
index a323bea..2951fb7 100644 (file)
@@ -89,7 +89,8 @@ do {                                                          \
 
 #define CPSW_POLL_WEIGHT       64
 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE            4
-#define CPSW_MIN_PACKET_SIZE   (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE_VLAN      (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE   (ETH_ZLEN)
 #define CPSW_MAX_PACKET_SIZE   (VLAN_ETH_FRAME_LEN +\
                                 ETH_FCS_LEN +\
                                 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
@@ -380,6 +381,7 @@ struct cpsw_priv {
        u32 emac_port;
        struct cpsw_common *cpsw;
        int offload_fwd_mark;
+       u32 tx_packet_min;
 };
 
 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
index f7fb6e1..a7d97d4 100644 (file)
@@ -368,7 +368,7 @@ static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
 static void cpsw_fdb_offload_notify(struct net_device *ndev,
                                    struct switchdev_notifier_fdb_info *rcv)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = rcv->addr;
        info.vid = rcv->vid;
index fcf3af7..8fe8887 100644 (file)
@@ -827,6 +827,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
                return;
        }
 
+       if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
+               pr_err("6pack: cooked buffer overrun, data loss\n");
+               sp->rx_count = 0;
+               return;
+       }
+
        buf = sp->raw_buf;
        sp->cooked_buf[sp->rx_count_cooked++] =
                buf[0] | ((buf[1] << 2) & 0xc0);
index ebc976b..8caa61e 100644 (file)
@@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
        struct hwsim_edge *e;
        u32 v0, v1;
 
-       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
@@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
        u32 v0, v1;
        u8 lqi;
 
-       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
        if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
                return -EINVAL;
 
-       if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
+       if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
            !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
                return -EINVAL;
 
index 110e4ee..ebd001f 100644 (file)
@@ -82,6 +82,17 @@ out:
 
 static int parent_count;
 
+static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
+{
+       struct mdio_mux_child_bus *cb = pb->children;
+
+       while (cb) {
+               mdiobus_unregister(cb->mii_bus);
+               mdiobus_free(cb->mii_bus);
+               cb = cb->next;
+       }
+}
+
 int mdio_mux_init(struct device *dev,
                  struct device_node *mux_node,
                  int (*switch_fn)(int cur, int desired, void *data),
@@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
                cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
                if (!cb) {
                        ret_val = -ENOMEM;
-                       continue;
+                       goto err_loop;
                }
                cb->bus_number = v;
                cb->parent = pb;
@@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus = mdiobus_alloc();
                if (!cb->mii_bus) {
                        ret_val = -ENOMEM;
-                       devm_kfree(dev, cb);
-                       continue;
+                       goto err_loop;
                }
                cb->mii_bus->priv = cb;
 
@@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus->write = mdio_mux_write;
                r = of_mdiobus_register(cb->mii_bus, child_bus_node);
                if (r) {
+                       mdiobus_free(cb->mii_bus);
+                       if (r == -EPROBE_DEFER) {
+                               ret_val = r;
+                               goto err_loop;
+                       }
+                       devm_kfree(dev, cb);
                        dev_err(dev,
                                "Error: Failed to register MDIO bus for child %pOF\n",
                                child_bus_node);
-                       mdiobus_free(cb->mii_bus);
-                       devm_kfree(dev, cb);
                } else {
                        cb->next = pb->children;
                        pb->children = cb;
@@ -181,7 +195,10 @@ int mdio_mux_init(struct device *dev,
        }
 
        dev_err(dev, "Error: No acceptable child buses found\n");
-       devm_kfree(dev, pb);
+
+err_loop:
+       mdio_mux_uninit_children(pb);
+       of_node_put(child_bus_node);
 err_pb_kz:
        put_device(&parent_bus->dev);
 err_parent_bus:
@@ -193,14 +210,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
 void mdio_mux_uninit(void *mux_handle)
 {
        struct mdio_mux_parent_bus *pb = mux_handle;
-       struct mdio_mux_child_bus *cb = pb->children;
-
-       while (cb) {
-               mdiobus_unregister(cb->mii_bus);
-               mdiobus_free(cb->mii_bus);
-               cb = cb->next;
-       }
 
+       mdio_mux_uninit_children(pb);
        put_device(&pb->mii_bus->dev);
 }
 EXPORT_SYMBOL_GPL(mdio_mux_uninit);
index 63fda3f..4bd6133 100644 (file)
@@ -1089,7 +1089,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
 
        xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
        if (!xpcs)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        xpcs->mdiodev = mdiodev;
 
index 11ff335..b7a5ae2 100644 (file)
@@ -81,6 +81,8 @@ static struct phy_driver mtk_gephy_driver[] = {
                 */
                .config_intr    = genphy_no_config_intr,
                .handle_interrupt = genphy_handle_interrupt_no_ack,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
                .read_page      = mtk_gephy_read_page,
                .write_page     = mtk_gephy_write_page,
        },
@@ -93,6 +95,8 @@ static struct phy_driver mtk_gephy_driver[] = {
                 */
                .config_intr    = genphy_no_config_intr,
                .handle_interrupt = genphy_handle_interrupt_no_ack,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
                .read_page      = mtk_gephy_read_page,
                .write_page     = mtk_gephy_write_page,
        },
index 4d53886..5c928f8 100644 (file)
@@ -401,11 +401,11 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
 }
 
 static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
-                                           const u32 ksz_phy_id)
+                                           const bool ksz_8051)
 {
        int ret;
 
-       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051)
                return 0;
 
        ret = phy_read(phydev, MII_BMSR);
@@ -418,7 +418,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
         * the switch does not.
         */
        ret &= BMSR_ERCAP;
-       if (ksz_phy_id == PHY_ID_KSZ8051)
+       if (ksz_8051)
                return ret;
        else
                return !ret;
@@ -426,7 +426,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
 
 static int ksz8051_match_phy_device(struct phy_device *phydev)
 {
-       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+       return ksz8051_ksz8795_match_phy_device(phydev, true);
 }
 
 static int ksz8081_config_init(struct phy_device *phydev)
@@ -535,7 +535,7 @@ static int ksz8061_config_init(struct phy_device *phydev)
 
 static int ksz8795_match_phy_device(struct phy_device *phydev)
 {
-       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+       return ksz8051_ksz8795_match_phy_device(phydev, false);
 }
 
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
@@ -1760,8 +1760,6 @@ static struct phy_driver ksphy_driver[] = {
        .name           = "Micrel KSZ87XX Switch",
        /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
-       .config_aneg    = ksz8873mll_config_aneg,
-       .read_status    = ksz8873mll_read_status,
        .match_phy_device = ksz8795_match_phy_device,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
index 930e49e..7a099c3 100644 (file)
@@ -284,7 +284,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
 static int ppp_connect_channel(struct channel *pch, int unit);
 static int ppp_disconnect_channel(struct channel *pch);
 static void ppp_destroy_channel(struct channel *pch);
-static int unit_get(struct idr *p, void *ptr);
+static int unit_get(struct idr *p, void *ptr, int min);
 static int unit_set(struct idr *p, void *ptr, int n);
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
@@ -1155,9 +1155,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
        mutex_lock(&pn->all_ppp_mutex);
 
        if (unit < 0) {
-               ret = unit_get(&pn->units_idr, ppp);
+               ret = unit_get(&pn->units_idr, ppp, 0);
                if (ret < 0)
                        goto err;
+               if (!ifname_is_set) {
+                       while (1) {
+                               snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
+                               if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
+                                       break;
+                               unit_put(&pn->units_idr, ret);
+                               ret = unit_get(&pn->units_idr, ppp, ret + 1);
+                               if (ret < 0)
+                                       goto err;
+                       }
+               }
        } else {
                /* Caller asked for a specific unit number. Fail with -EEXIST
                 * if unavailable. For backward compatibility, return -EEXIST
@@ -1306,7 +1317,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
         * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
         * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
         */
-       if (!tb[IFLA_IFNAME])
+       if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
                conf.ifname_is_set = false;
 
        err = ppp_dev_configure(src_net, dev, &conf);
@@ -3552,9 +3563,9 @@ static int unit_set(struct idr *p, void *ptr, int n)
 }
 
 /* get new free unit number and associate pointer with it */
-static int unit_get(struct idr *p, void *ptr)
+static int unit_get(struct idr *p, void *ptr, int min)
 {
-       return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
+       return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
 }
 
 /* put unit number back to a pool */
index e1994a2..2a1e31d 100644 (file)
@@ -184,6 +184,7 @@ struct asix_common_private {
        struct phy_device *phydev;
        u16 phy_addr;
        char phy_name[20];
+       bool embd_phy;
 };
 
 extern const struct driver_info ax88172a_info;
index ac92bc5..38cda59 100644 (file)
@@ -63,6 +63,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                               value, index, data, size);
 }
 
+static int asix_check_host_enable(struct usbnet *dev, int in_pm)
+{
+       int i, ret;
+       u8 smsr;
+
+       for (i = 0; i < 30; ++i) {
+               ret = asix_set_sw_mii(dev, in_pm);
+               if (ret == -ENODEV || ret == -ETIMEDOUT)
+                       break;
+               usleep_range(1000, 1100);
+               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+                                   0, 0, 1, &smsr, in_pm);
+               if (ret == -ENODEV)
+                       break;
+               else if (ret < 0)
+                       continue;
+               else if (smsr & AX_HOST_EN)
+                       break;
+       }
+
+       return ret;
+}
+
 static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
 {
        /* Reset the variables that have a lifetime outside of
@@ -467,19 +490,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res;
-       u8 smsr;
-       int i = 0;
        int ret;
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 0);
-               if (ret == -ENODEV || ret == -ETIMEDOUT)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 0);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 0);
        if (ret == -ENODEV || ret == -ETIMEDOUT) {
                mutex_unlock(&dev->phy_mutex);
                return ret;
@@ -505,23 +520,14 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res = cpu_to_le16(val);
-       u8 smsr;
-       int i = 0;
        int ret;
 
        netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
                        phy_id, loc, val);
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 0);
-               if (ret == -ENODEV)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 0);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
 
+       ret = asix_check_host_enable(dev, 0);
        if (ret == -ENODEV)
                goto out;
 
@@ -561,19 +567,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res;
-       u8 smsr;
-       int i = 0;
        int ret;
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 1);
-               if (ret == -ENODEV || ret == -ETIMEDOUT)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 1);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 1);
        if (ret == -ENODEV || ret == -ETIMEDOUT) {
                mutex_unlock(&dev->phy_mutex);
                return ret;
@@ -595,22 +593,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res = cpu_to_le16(val);
-       u8 smsr;
-       int i = 0;
        int ret;
 
        netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
                        phy_id, loc, val);
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 1);
-               if (ret == -ENODEV)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 1);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 1);
        if (ret == -ENODEV) {
                mutex_unlock(&dev->phy_mutex);
                return;
index 2c11521..dc87e8c 100644 (file)
@@ -354,24 +354,23 @@ out:
 static int ax88772_hw_reset(struct usbnet *dev, int in_pm)
 {
        struct asix_data *data = (struct asix_data *)&dev->data;
-       int ret, embd_phy;
+       struct asix_common_private *priv = dev->driver_priv;
        u16 rx_ctl;
+       int ret;
 
        ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 |
                              AX_GPIO_GPO2EN, 5, in_pm);
        if (ret < 0)
                goto out;
 
-       embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
-
-       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy,
+       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy,
                             0, 0, NULL, in_pm);
        if (ret < 0) {
                netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
                goto out;
        }
 
-       if (embd_phy) {
+       if (priv->embd_phy) {
                ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm);
                if (ret < 0)
                        goto out;
@@ -449,17 +448,16 @@ out:
 static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
 {
        struct asix_data *data = (struct asix_data *)&dev->data;
-       int ret, embd_phy;
+       struct asix_common_private *priv = dev->driver_priv;
        u16 rx_ctl, phy14h, phy15h, phy16h;
        u8 chipcode = 0;
+       int ret;
 
        ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
        if (ret < 0)
                goto out;
 
-       embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
-
-       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy |
+       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy |
                             AX_PHYSEL_SSEN, 0, 0, NULL, in_pm);
        if (ret < 0) {
                netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
@@ -683,12 +681,6 @@ static int ax88772_init_phy(struct usbnet *dev)
        struct asix_common_private *priv = dev->driver_priv;
        int ret;
 
-       ret = asix_read_phy_addr(dev, true);
-       if (ret < 0)
-               return ret;
-
-       priv->phy_addr = ret;
-
        snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
                 priv->mdio->id, priv->phy_addr);
 
@@ -716,6 +708,12 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        int ret, i;
        u32 phyid;
 
+       priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev->driver_priv = priv;
+
        usbnet_get_endpoints(dev, intf);
 
        /* Maybe the boot loader passed the MAC address via device tree */
@@ -751,6 +749,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
        dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
 
+       ret = asix_read_phy_addr(dev, true);
+       if (ret < 0)
+               return ret;
+
+       priv->phy_addr = ret;
+       priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
+
        asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
        chipcode &= AX_CHIPCODE_MASK;
 
@@ -773,12 +778,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
                dev->rx_urb_size = 2048;
        }
 
-       priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       dev->driver_priv = priv;
-
        priv->presvd_phy_bmcr = 0;
        priv->presvd_phy_advertise = 0;
        if (chipcode == AX_AX88772_CHIPCODE) {
@@ -817,6 +816,12 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
        asix_rx_fixup_common_free(dev->driver_priv);
 }
 
+static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+       asix_rx_fixup_common_free(dev->driver_priv);
+       kfree(dev->driver_priv);
+}
+
 static const struct ethtool_ops ax88178_ethtool_ops = {
        .get_drvinfo            = asix_get_drvinfo,
        .get_link               = asix_get_link,
@@ -1225,7 +1230,7 @@ static const struct driver_info ax88772b_info = {
 static const struct driver_info ax88178_info = {
        .description = "ASIX AX88178 USB 2.0 Ethernet",
        .bind = ax88178_bind,
-       .unbind = ax88772_unbind,
+       .unbind = ax88178_unbind,
        .status = asix_status,
        .link_reset = ax88178_link_reset,
        .reset = ax88178_reset,
index 2548938..6d092d7 100644 (file)
@@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
 {
        struct phy_device *phydev = dev->net->phydev;
        struct ethtool_link_ksettings ecmd;
-       int ladv, radv, ret;
+       int ladv, radv, ret, link;
        u32 buf;
 
        /* clear LAN78xx interrupt status */
@@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
        if (unlikely(ret < 0))
                return -EIO;
 
+       mutex_lock(&phydev->lock);
        phy_read_status(phydev);
+       link = phydev->link;
+       mutex_unlock(&phydev->lock);
 
-       if (!phydev->link && dev->link_on) {
+       if (!link && dev->link_on) {
                dev->link_on = false;
 
                /* reset MAC */
@@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                        return -EIO;
 
                del_timer(&dev->stat_monitor);
-       } else if (phydev->link && !dev->link_on) {
+       } else if (link && !dev->link_on) {
                dev->link_on = true;
 
                phy_ethtool_ksettings_get(phydev, &ecmd);
@@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
 
 static u32 lan78xx_get_link(struct net_device *net)
 {
+       u32 link;
+
+       mutex_lock(&net->phydev->lock);
        phy_read_status(net->phydev);
+       link = net->phydev->link;
+       mutex_unlock(&net->phydev->lock);
 
-       return net->phydev->link;
+       return link;
 }
 
 static void lan78xx_get_drvinfo(struct net_device *net,
index 9a90718..9f9dd0d 100644 (file)
@@ -1,31 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- *  Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com)
+ *  Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com)
  *
- *     ChangeLog:
- *             ....    Most of the time spent on reading sources & docs.
- *             v0.2.x  First official release for the Linux kernel.
- *             v0.3.0  Beutified and structured, some bugs fixed.
- *             v0.3.x  URBifying bulk requests and bugfixing. First relatively
- *                     stable release. Still can touch device's registers only
- *                     from top-halves.
- *             v0.4.0  Control messages remained unurbified are now URBs.
- *                     Now we can touch the HW at any time.
- *             v0.4.9  Control urbs again use process context to wait. Argh...
- *                     Some long standing bugs (enable_net_traffic) fixed.
- *                     Also nasty trick about resubmiting control urb from
- *                     interrupt context used. Please let me know how it
- *                     behaves. Pegasus II support added since this version.
- *                     TODO: suppressing HCD warnings spewage on disconnect.
- *             v0.4.13 Ethernet address is now set at probe(), not at open()
- *                     time as this seems to break dhcpd.
- *             v0.5.0  branch to 2.5.x kernels
- *             v0.5.1  ethtool support added
- *             v0.5.5  rx socket buffers are in a pool and the their allocation
- *                     is out of the interrupt routine.
- *             ...
- *             v0.9.3  simplified [get|set]_register(s), async update registers
- *                     logic revisited, receive skb_pool removed.
  */
 
 #include <linux/sched.h>
@@ -45,7 +21,6 @@
 /*
  * Version Information
  */
-#define DRIVER_VERSION "v0.9.3 (2013/04/25)"
 #define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>"
 #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
 
@@ -132,9 +107,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
 static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
                         const void *data)
 {
-       return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
+       int ret;
+
+       ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
                                    PEGASUS_REQT_WRITE, 0, indx, data, size,
                                    1000, GFP_NOIO);
+       if (ret < 0)
+               netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+       return ret;
 }
 
 /*
@@ -145,10 +126,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
 static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
 {
        void *buf = &data;
+       int ret;
 
-       return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
+       ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
                                    PEGASUS_REQT_WRITE, data, indx, buf, 1,
                                    1000, GFP_NOIO);
+       if (ret < 0)
+               netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+       return ret;
 }
 
 static int update_eth_regs_async(pegasus_t *pegasus)
@@ -188,10 +174,9 @@ static int update_eth_regs_async(pegasus_t *pegasus)
 
 static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
 {
-       int i;
-       __u8 data[4] = { phy, 0, 0, indx };
+       int i, ret;
        __le16 regdi;
-       int ret = -ETIMEDOUT;
+       __u8 data[4] = { phy, 0, 0, indx };
 
        if (cmd & PHY_WRITE) {
                __le16 *t = (__le16 *) & data[1];
@@ -207,12 +192,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
                if (data[0] & PHY_DONE)
                        break;
        }
-       if (i >= REG_TIMEOUT)
+       if (i >= REG_TIMEOUT) {
+               ret = -ETIMEDOUT;
                goto fail;
+       }
        if (cmd & PHY_READ) {
                ret = get_registers(p, PhyData, 2, &regdi);
+               if (ret < 0)
+                       goto fail;
                *regd = le16_to_cpu(regdi);
-               return ret;
        }
        return 0;
 fail:
@@ -235,9 +223,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
 static int mdio_read(struct net_device *dev, int phy_id, int loc)
 {
        pegasus_t *pegasus = netdev_priv(dev);
+       int ret;
        u16 res;
 
-       read_mii_word(pegasus, phy_id, loc, &res);
+       ret = read_mii_word(pegasus, phy_id, loc, &res);
+       if (ret < 0)
+               return ret;
+
        return (int)res;
 }
 
@@ -251,10 +243,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
-       int i;
-       __u8 tmp = 0;
+       int ret, i;
        __le16 retdatai;
-       int ret;
+       __u8 tmp = 0;
 
        set_register(pegasus, EpromCtrl, 0);
        set_register(pegasus, EpromOffset, index);
@@ -262,21 +253,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 
        for (i = 0; i < REG_TIMEOUT; i++) {
                ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
+               if (ret < 0)
+                       goto fail;
                if (tmp & EPROM_DONE)
                        break;
-               if (ret == -ESHUTDOWN)
-                       goto fail;
        }
-       if (i >= REG_TIMEOUT)
+       if (i >= REG_TIMEOUT) {
+               ret = -ETIMEDOUT;
                goto fail;
+       }
 
        ret = get_registers(pegasus, EpromData, 2, &retdatai);
+       if (ret < 0)
+               goto fail;
        *retdata = le16_to_cpu(retdatai);
        return ret;
 
 fail:
-       netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
-       return -ETIMEDOUT;
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       return ret;
 }
 
 #ifdef PEGASUS_WRITE_EEPROM
@@ -324,10 +319,10 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
        return ret;
 
 fail:
-       netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
        return -ETIMEDOUT;
 }
-#endif                         /* PEGASUS_WRITE_EEPROM */
+#endif /* PEGASUS_WRITE_EEPROM */
 
 static inline int get_node_id(pegasus_t *pegasus, u8 *id)
 {
@@ -367,19 +362,21 @@ static void set_ethernet_addr(pegasus_t *pegasus)
        return;
 err:
        eth_hw_addr_random(pegasus->net);
-       dev_info(&pegasus->intf->dev, "software assigned MAC address.\n");
+       netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n");
 
        return;
 }
 
 static inline int reset_mac(pegasus_t *pegasus)
 {
+       int ret, i;
        __u8 data = 0x8;
-       int i;
 
        set_register(pegasus, EthCtrl1, data);
        for (i = 0; i < REG_TIMEOUT; i++) {
-               get_registers(pegasus, EthCtrl1, 1, &data);
+               ret = get_registers(pegasus, EthCtrl1, 1, &data);
+               if (ret < 0)
+                       goto fail;
                if (~data & 0x08) {
                        if (loopback)
                                break;
@@ -402,22 +399,29 @@ static inline int reset_mac(pegasus_t *pegasus)
        }
        if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
                __u16 auxmode;
-               read_mii_word(pegasus, 3, 0x1b, &auxmode);
+               ret = read_mii_word(pegasus, 3, 0x1b, &auxmode);
+               if (ret < 0)
+                       goto fail;
                auxmode |= 4;
                write_mii_word(pegasus, 3, 0x1b, &auxmode);
        }
 
        return 0;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       return ret;
 }
 
 static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
 {
-       __u16 linkpart;
-       __u8 data[4];
        pegasus_t *pegasus = netdev_priv(dev);
        int ret;
+       __u16 linkpart;
+       __u8 data[4];
 
-       read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+       ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+       if (ret < 0)
+               goto fail;
        data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
        data[1] = 0;
        if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
@@ -435,11 +439,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
            usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
            usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
                u16 auxmode;
-               read_mii_word(pegasus, 0, 0x1b, &auxmode);
+               ret = read_mii_word(pegasus, 0, 0x1b, &auxmode);
+               if (ret < 0)
+                       goto fail;
                auxmode |= 4;
                write_mii_word(pegasus, 0, 0x1b, &auxmode);
        }
 
+       return ret;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
        return ret;
 }
 
@@ -447,9 +456,9 @@ static void read_bulk_callback(struct urb *urb)
 {
        pegasus_t *pegasus = urb->context;
        struct net_device *net;
+       u8 *buf = urb->transfer_buffer;
        int rx_status, count = urb->actual_length;
        int status = urb->status;
-       u8 *buf = urb->transfer_buffer;
        __u16 pkt_len;
 
        if (!pegasus)
@@ -735,12 +744,16 @@ static inline void disable_net_traffic(pegasus_t *pegasus)
        set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
 }
 
-static inline void get_interrupt_interval(pegasus_t *pegasus)
+static inline int get_interrupt_interval(pegasus_t *pegasus)
 {
        u16 data;
        u8 interval;
+       int ret;
+
+       ret = read_eprom_word(pegasus, 4, &data);
+       if (ret < 0)
+               return ret;
 
-       read_eprom_word(pegasus, 4, &data);
        interval = data >> 8;
        if (pegasus->usb->speed != USB_SPEED_HIGH) {
                if (interval < 0x80) {
@@ -755,6 +768,8 @@ static inline void get_interrupt_interval(pegasus_t *pegasus)
                }
        }
        pegasus->intr_interval = interval;
+
+       return 0;
 }
 
 static void set_carrier(struct net_device *net)
@@ -820,7 +835,7 @@ static int pegasus_open(struct net_device *net)
        if (!pegasus->rx_skb)
                goto exit;
 
-       res = set_registers(pegasus, EthID, 6, net->dev_addr);
+       set_registers(pegasus, EthID, 6, net->dev_addr);
 
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
@@ -880,7 +895,6 @@ static void pegasus_get_drvinfo(struct net_device *dev,
        pegasus_t *pegasus = netdev_priv(dev);
 
        strlcpy(info->driver, driver_name, sizeof(info->driver));
-       strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
        usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
 }
 
@@ -998,8 +1012,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
                data[0] = pegasus->phy;
                fallthrough;
        case SIOCDEVPRIVATE + 1:
-               read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
-               res = 0;
+               res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
                break;
        case SIOCDEVPRIVATE + 2:
                if (!capable(CAP_NET_ADMIN))
@@ -1033,22 +1046,25 @@ static void pegasus_set_multicast(struct net_device *net)
 
 static __u8 mii_phy_probe(pegasus_t *pegasus)
 {
-       int i;
+       int i, ret;
        __u16 tmp;
 
        for (i = 0; i < 32; i++) {
-               read_mii_word(pegasus, i, MII_BMSR, &tmp);
+               ret = read_mii_word(pegasus, i, MII_BMSR, &tmp);
+               if (ret < 0)
+                       goto fail;
                if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
                        continue;
                else
                        return i;
        }
-
+fail:
        return 0xff;
 }
 
 static inline void setup_pegasus_II(pegasus_t *pegasus)
 {
+       int ret;
        __u8 data = 0xa5;
 
        set_register(pegasus, Reg1d, 0);
@@ -1060,7 +1076,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
                set_register(pegasus, Reg7b, 2);
 
        set_register(pegasus, 0x83, data);
-       get_registers(pegasus, 0x83, 1, &data);
+       ret = get_registers(pegasus, 0x83, 1, &data);
+       if (ret < 0)
+               goto fail;
 
        if (data == 0xa5)
                pegasus->chip = 0x8513;
@@ -1075,6 +1093,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
                set_register(pegasus, Reg81, 6);
        else
                set_register(pegasus, Reg81, 2);
+
+       return;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
 }
 
 static void check_carrier(struct work_struct *work)
@@ -1149,7 +1171,9 @@ static int pegasus_probe(struct usb_interface *intf,
                                | NETIF_MSG_PROBE | NETIF_MSG_LINK);
 
        pegasus->features = usb_dev_id[dev_index].private;
-       get_interrupt_interval(pegasus);
+       res = get_interrupt_interval(pegasus);
+       if (res)
+               goto out2;
        if (reset_mac(pegasus)) {
                dev_err(&intf->dev, "can't reset MAC\n");
                res = -EIO;
@@ -1296,7 +1320,7 @@ static void __init parse_id(char *id)
 
 static int __init pegasus_init(void)
 {
-       pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION);
+       pr_info("%s: " DRIVER_DESC "\n", driver_name);
        if (devid)
                parse_id(devid);
        return usb_register(&pegasus_driver);
index e09b107..7983237 100644 (file)
@@ -3955,17 +3955,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
        case RTL_VER_06:
                ocp_write_byte(tp, type, PLA_BP_EN, 0);
                break;
+       case RTL_VER_14:
+               ocp_write_word(tp, type, USB_BP2_EN, 0);
+
+               ocp_write_word(tp, type, USB_BP_8, 0);
+               ocp_write_word(tp, type, USB_BP_9, 0);
+               ocp_write_word(tp, type, USB_BP_10, 0);
+               ocp_write_word(tp, type, USB_BP_11, 0);
+               ocp_write_word(tp, type, USB_BP_12, 0);
+               ocp_write_word(tp, type, USB_BP_13, 0);
+               ocp_write_word(tp, type, USB_BP_14, 0);
+               ocp_write_word(tp, type, USB_BP_15, 0);
+               break;
        case RTL_VER_08:
        case RTL_VER_09:
        case RTL_VER_10:
        case RTL_VER_11:
        case RTL_VER_12:
        case RTL_VER_13:
-       case RTL_VER_14:
        case RTL_VER_15:
        default:
                if (type == MCU_TYPE_USB) {
-                       ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+                       ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
 
                        ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
                        ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
@@ -4331,7 +4342,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
                case RTL_VER_11:
                case RTL_VER_12:
                case RTL_VER_13:
-               case RTL_VER_14:
                case RTL_VER_15:
                        fw_reg = 0xf800;
                        bp_ba_addr = PLA_BP_BA;
@@ -4339,6 +4349,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
                        bp_start = PLA_BP_0;
                        max_bp = 8;
                        break;
+               case RTL_VER_14:
+                       fw_reg = 0xf800;
+                       bp_ba_addr = PLA_BP_BA;
+                       bp_en_addr = USB_BP2_EN;
+                       bp_start = PLA_BP_0;
+                       max_bp = 16;
+                       break;
                default:
                        goto out;
                }
index 56c3f85..eee4936 100644 (file)
@@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
        VIRTIO_NET_F_GUEST_CSUM
 };
 
-#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
                                (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
                                (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
                                (1ULL << VIRTIO_NET_F_GUEST_UFO))
@@ -2515,7 +2515,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
-               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
+               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
                return -EOPNOTSUPP;
        }
 
@@ -2646,15 +2646,15 @@ static int virtnet_set_features(struct net_device *dev,
        u64 offloads;
        int err;
 
-       if ((dev->features ^ features) & NETIF_F_LRO) {
+       if ((dev->features ^ features) & NETIF_F_GRO_HW) {
                if (vi->xdp_enabled)
                        return -EBUSY;
 
-               if (features & NETIF_F_LRO)
+               if (features & NETIF_F_GRO_HW)
                        offloads = vi->guest_offloads_capable;
                else
                        offloads = vi->guest_offloads_capable &
-                                  ~GUEST_OFFLOAD_LRO_MASK;
+                                  ~GUEST_OFFLOAD_GRO_HW_MASK;
 
                err = virtnet_set_guest_offloads(vi, offloads);
                if (err)
@@ -3134,9 +3134,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                dev->features |= NETIF_F_RXCSUM;
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
-               dev->features |= NETIF_F_LRO;
+               dev->features |= NETIF_F_GRO_HW;
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
-               dev->hw_features |= NETIF_F_LRO;
+               dev->hw_features |= NETIF_F_GRO_HW;
 
        dev->vlan_features = dev->features;
 
index 2b1b944..8bbe2a7 100644 (file)
@@ -1367,6 +1367,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
        bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        bool is_ndisc = ipv6_ndisc_frame(skb);
 
+       nf_reset_ct(skb);
+
        /* loopback, multicast & non-ND link-local traffic; do not push through
         * packet taps again. Reset pkt_type for upper layers to process skb.
         * For strict packets with a source LLA, determine the dst using the
@@ -1429,6 +1431,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
        skb->skb_iif = vrf_dev->ifindex;
        IPCB(skb)->flags |= IPSKB_L3SLAVE;
 
+       nf_reset_ct(skb);
+
        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
                goto out;
 
index 2403490..b4b1f75 100644 (file)
@@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
        u32 sha1 = 0;
        u16 mac_type = 0, rf_id = 0;
        u8 *pnvm_data = NULL, *tmp;
+       bool hw_match = false;
        u32 size = 0;
        int ret;
 
@@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                break;
                        }
 
+                       if (hw_match)
+                               break;
+
                        mac_type = le16_to_cpup((__le16 *)data);
                        rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
 
@@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                     "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
                                     mac_type, rf_id);
 
-                       if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
-                           rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
-                               IWL_DEBUG_FW(trans,
-                                            "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
-                                            CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
-                               ret = -ENOENT;
-                               goto out;
-                       }
-
+                       if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
+                           rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+                               hw_match = true;
                        break;
                case IWL_UCODE_TLV_SEC_RT: {
                        struct iwl_pnvm_section *section = (void *)data;
@@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
        }
 
 done:
+       if (!hw_match) {
+               IWL_DEBUG_FW(trans,
+                            "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
+                            CSR_HW_REV_TYPE(trans->hw_rev),
+                            CSR_HW_RFID_TYPE(trans->hw_rf_id));
+               ret = -ENOENT;
+               goto out;
+       }
+
        if (!size) {
                IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
                ret = -ENOENT;
index 16baee3..0b8a0cd 100644 (file)
@@ -1110,12 +1110,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
                      iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
 
+/* SoF with JF2 */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* SoF with JF */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
 /* So with GF */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
                      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
-                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name)
+                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+
+/* So with JF2 */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* So with JF */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
 
 #endif /* CONFIG_IWLMVM */
 };
index 863aa18..4396077 100644 (file)
@@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher)
        case WLAN_CIPHER_SUITE_SMS4:
                return MCU_CIPHER_WAPI;
        default:
-               return MT_CIPHER_NONE;
+               return MCU_CIPHER_NONE;
        }
 }
 
index edd3ba3..e68a562 100644 (file)
@@ -1073,7 +1073,8 @@ enum {
 };
 
 enum mcu_cipher_type {
-       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_NONE = 0,
+       MCU_CIPHER_WEP40,
        MCU_CIPHER_WEP104,
        MCU_CIPHER_WEP128,
        MCU_CIPHER_TKIP,
index cd690c6..9fbaacc 100644 (file)
@@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher)
        case WLAN_CIPHER_SUITE_SMS4:
                return MCU_CIPHER_WAPI;
        default:
-               return MT_CIPHER_NONE;
+               return MCU_CIPHER_NONE;
        }
 }
 
index d76cf8f..de3c091 100644 (file)
@@ -199,7 +199,8 @@ struct sta_rec_sec {
 } __packed;
 
 enum mcu_cipher_type {
-       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_NONE = 0,
+       MCU_CIPHER_WEP40,
        MCU_CIPHER_WEP104,
        MCU_CIPHER_WEP128,
        MCU_CIPHER_TKIP,
index 804e6c4..519361e 100644 (file)
@@ -64,10 +64,9 @@ static struct ipc_chnl_cfg modem_cfg[] = {
 
 int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
 {
-       int array_size = ARRAY_SIZE(modem_cfg);
-
-       if (index >= array_size) {
-               pr_err("index: %d and array_size %d", index, array_size);
+       if (index >= ARRAY_SIZE(modem_cfg)) {
+               pr_err("index: %d and array size %zu", index,
+                      ARRAY_SIZE(modem_cfg));
                return -ECHRNG;
        }
 
index 45e6923..f861994 100644 (file)
 #define IOSM_CP_VERSION 0x0100UL
 
 /* DL dir Aggregation support mask */
-#define DL_AGGR BIT(23)
+#define DL_AGGR BIT(9)
 
 /* UL dir Aggregation support mask */
-#define UL_AGGR BIT(22)
+#define UL_AGGR BIT(8)
 
 /* UL flow credit support mask */
 #define UL_FLOW_CREDIT BIT(21)
index 562de27..bdb2d32 100644 (file)
@@ -320,7 +320,7 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
                return;
        }
 
-       ul_credits = fct->vfl.nr_of_bytes;
+       ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
 
        dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
                if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
@@ -586,7 +586,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
                qlt->reserved[0] = 0;
                qlt->reserved[1] = 0;
 
-               qlt->vfl.nr_of_bytes = session->ul_list.qlen;
+               qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
 
                /* Add QLT to the transfer list. */
                skb_queue_tail(&ipc_mux->channel->ul_list,
index 4a74e3c..aae83db 100644 (file)
@@ -106,7 +106,7 @@ struct mux_lite_cmdh {
  * @nr_of_bytes:       Number of bytes available to transmit in the queue.
  */
 struct mux_lite_vfl {
-       u32 nr_of_bytes;
+       __le32 nr_of_bytes;
 };
 
 /**
index 91109e2..35d5907 100644 (file)
@@ -412,8 +412,8 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
        }
 
        if (p_td->buffer.address != IPC_CB(skb)->mapping) {
-               dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p",
-                       (void *)p_td->buffer.address, skb->data);
+               dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
+                       (unsigned long long)p_td->buffer.address, skb->data);
                ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
                skb = NULL;
                goto ret;
index b2357ad..b571d9c 100644 (file)
@@ -228,7 +228,7 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
 
        RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
        /* unregistering includes synchronize_net() */
-       unregister_netdevice(dev);
+       unregister_netdevice_queue(dev, head);
 
 unlock:
        mutex_unlock(&ipc_wwan->if_mutex);
index 1bc6b69..e4d0f69 100644 (file)
@@ -41,14 +41,14 @@ struct mhi_wwan_dev {
 /* Increment RX budget and schedule RX refill if necessary */
 static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
 {
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
 
        mhiwwan->rx_budget++;
 
        if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
                schedule_work(&mhiwwan->rx_refill);
 
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 }
 
 /* Decrement RX budget if non-zero and return true on success */
@@ -56,7 +56,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
 {
        bool ret = false;
 
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
 
        if (mhiwwan->rx_budget) {
                mhiwwan->rx_budget--;
@@ -64,7 +64,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
                        ret = true;
        }
 
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 
        return ret;
 }
@@ -130,9 +130,9 @@ static void mhi_wwan_ctrl_stop(struct wwan_port *port)
 {
        struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
 
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
        clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 
        cancel_work_sync(&mhiwwan->rx_refill);
 
index 674a81d..35ece98 100644 (file)
@@ -164,11 +164,14 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
                goto done_unlock;
 
        id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
-       if (id < 0)
+       if (id < 0) {
+               wwandev = ERR_PTR(id);
                goto done_unlock;
+       }
 
        wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
        if (!wwandev) {
+               wwandev = ERR_PTR(-ENOMEM);
                ida_free(&wwan_dev_ids, id);
                goto done_unlock;
        }
@@ -182,7 +185,8 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
        err = device_register(&wwandev->dev);
        if (err) {
                put_device(&wwandev->dev);
-               wwandev = NULL;
+               wwandev = ERR_PTR(err);
+               goto done_unlock;
        }
 
 done_unlock:
@@ -1014,8 +1018,8 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
                return -EINVAL;
 
        wwandev = wwan_create_dev(parent);
-       if (!wwandev)
-               return -ENOMEM;
+       if (IS_ERR(wwandev))
+               return PTR_ERR(wwandev);
 
        if (WARN_ON(wwandev->ops)) {
                wwan_remove_dev(wwandev);
index 2403b71..7454782 100644 (file)
@@ -2527,7 +2527,7 @@ static void deactivate_labels(void *region)
 
 static int init_active_labels(struct nd_region *nd_region)
 {
-       int i;
+       int i, rc = 0;
 
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
@@ -2546,13 +2546,14 @@ static int init_active_labels(struct nd_region *nd_region)
                        else if (test_bit(NDD_LABELING, &nvdimm->flags))
                                /* fail, labels needed to disambiguate dpa */;
                        else
-                               return 0;
+                               continue;
 
                        dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
                                        dev_name(&nd_mapping->nvdimm->dev),
                                        test_bit(NDD_LOCKED, &nvdimm->flags)
                                        ? "locked" : "disabled");
-                       return -ENXIO;
+                       rc = -ENXIO;
+                       goto out;
                }
                nd_mapping->ndd = ndd;
                atomic_inc(&nvdimm->busy);
@@ -2586,13 +2587,17 @@ static int init_active_labels(struct nd_region *nd_region)
                        break;
        }
 
-       if (i < nd_region->ndr_mappings) {
+       if (i < nd_region->ndr_mappings)
+               rc = -ENOMEM;
+
+out:
+       if (rc) {
                deactivate_labels(nd_region);
-               return -ENOMEM;
+               return rc;
        }
 
        return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
-                       nd_region);
+                                       nd_region);
 }
 
 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
index c3f3d77..dc0450c 100644 (file)
@@ -33,12 +33,12 @@ config NVME_HWMON
          in the system.
 
 config NVME_FABRICS
+       select NVME_CORE
        tristate
 
 config NVME_RDMA
        tristate "NVM Express over Fabrics RDMA host driver"
        depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
@@ -55,7 +55,6 @@ config NVME_FC
        tristate "NVM Express over Fabrics FC host driver"
        depends on BLOCK
        depends on HAS_DMA
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
@@ -72,7 +71,6 @@ config NVME_TCP
        tristate "NVM Express over Fabrics TCP host driver"
        depends on INET
        depends on BLOCK
-       select NVME_CORE
        select NVME_FABRICS
        select CRYPTO
        select CRYPTO_CRC32C
index cbc5097..dfaacd4 100644 (file)
@@ -12,7 +12,6 @@ obj-$(CONFIG_NVME_TCP)                        += nvme-tcp.o
 nvme-core-y                            := core.o ioctl.o
 nvme-core-$(CONFIG_TRACING)            += trace.o
 nvme-core-$(CONFIG_NVME_MULTIPATH)     += multipath.o
-nvme-core-$(CONFIG_NVM)                        += lightnvm.o
 nvme-core-$(CONFIG_BLK_DEV_ZONED)      += zns.o
 nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS)   += fault_inject.o
 nvme-core-$(CONFIG_NVME_HWMON)         += hwmon.o
index dfd9dec..8679a10 100644 (file)
@@ -587,9 +587,6 @@ static void nvme_free_ns(struct kref *kref)
 {
        struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
 
-       if (ns->ndev)
-               nvme_nvm_unregister(ns);
-
        put_disk(ns->disk);
        nvme_put_ns_head(ns->head);
        nvme_put_ctrl(ns->ctrl);
@@ -968,12 +965,11 @@ void nvme_cleanup_cmd(struct request *req)
 {
        if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
                struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
-               struct page *page = req->special_vec.bv_page;
 
-               if (page == ctrl->discard_page)
+               if (req->special_vec.bv_page == ctrl->discard_page)
                        clear_bit_unlock(0, &ctrl->discard_page_busy);
                else
-                       kfree(page_address(page) + req->special_vec.bv_offset);
+                       kfree(bvec_virt(&req->special_vec));
        }
 }
 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -1029,7 +1025,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
                return BLK_STS_IOERR;
        }
 
-       cmd->common.command_id = req->tag;
+       nvme_req(req)->genctr++;
+       cmd->common.command_id = nvme_cid(req);
        trace_nvme_setup_cmd(req, cmd);
        return ret;
 }
@@ -1822,7 +1819,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
 static inline bool nvme_first_scan(struct gendisk *disk)
 {
        /* nvme_alloc_ns() scans the disk prior to adding it */
-       return !(disk->flags & GENHD_FL_UP);
+       return !disk_live(disk);
 }
 
 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
@@ -1890,7 +1887,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
                nvme_update_disk_info(ns->head->disk, ns, id);
                blk_stack_limits(&ns->head->disk->queue->limits,
                                 &ns->queue->limits, 0);
-               blk_queue_update_readahead(ns->head->disk->queue);
+               disk_update_readahead(ns->head->disk);
                blk_mq_unfreeze_queue(ns->head->disk->queue);
        }
        return 0;
@@ -3218,9 +3215,6 @@ static const struct attribute_group nvme_ns_id_attr_group = {
 
 const struct attribute_group *nvme_ns_id_attr_groups[] = {
        &nvme_ns_id_attr_group,
-#ifdef CONFIG_NVM
-       &nvme_nvm_attr_group,
-#endif
        NULL,
 };
 
@@ -3729,9 +3723,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if (!ns)
                goto out_free_id;
 
-       ns->queue = blk_mq_init_queue(ctrl->tagset);
-       if (IS_ERR(ns->queue))
+       disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+       if (IS_ERR(disk))
                goto out_free_ns;
+       disk->fops = &nvme_bdev_ops;
+       disk->private_data = ns;
+
+       ns->disk = disk;
+       ns->queue = disk->queue;
 
        if (ctrl->opts && ctrl->opts->data_digest)
                blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
@@ -3740,20 +3739,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
                blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
 
-       ns->queue->queuedata = ns;
        ns->ctrl = ctrl;
        kref_init(&ns->kref);
 
        if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
-               goto out_free_queue;
+               goto out_cleanup_disk;
 
-       disk = alloc_disk_node(0, node);
-       if (!disk)
-               goto out_unlink_ns;
-
-       disk->fops = &nvme_bdev_ops;
-       disk->private_data = ns;
-       disk->queue = ns->queue;
        /*
         * Without the multipath code enabled, multiple controller per
         * subsystems are visible as devices and thus we cannot use the
@@ -3762,17 +3753,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
                sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
                        ns->head->instance);
-       ns->disk = disk;
 
        if (nvme_update_ns_info(ns, id))
-               goto out_put_disk;
-
-       if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
-               if (nvme_nvm_register(ns, disk->disk_name, node)) {
-                       dev_warn(ctrl->device, "LightNVM init failure\n");
-                       goto out_put_disk;
-               }
-       }
+               goto out_unlink_ns;
 
        down_write(&ctrl->namespaces_rwsem);
        list_add_tail(&ns->list, &ctrl->namespaces);
@@ -3789,10 +3772,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        kfree(id);
 
        return;
- out_put_disk:
-       /* prevent double queue cleanup */
-       ns->disk->queue = NULL;
-       put_disk(ns->disk);
+
  out_unlink_ns:
        mutex_lock(&ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
@@ -3800,8 +3780,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
                list_del_init(&ns->head->entry);
        mutex_unlock(&ctrl->subsys->lock);
        nvme_put_ns_head(ns->head);
- out_free_queue:
-       blk_cleanup_queue(ns->queue);
+ out_cleanup_disk:
+       blk_cleanup_disk(disk);
  out_free_ns:
        kfree(ns);
  out_free_id:
@@ -3826,14 +3806,12 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        nvme_mpath_clear_current_path(ns);
        synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
 
-       if (ns->disk->flags & GENHD_FL_UP) {
-               if (!nvme_ns_head_multipath(ns->head))
-                       nvme_cdev_del(&ns->cdev, &ns->cdev_device);
-               del_gendisk(ns->disk);
-               blk_cleanup_queue(ns->queue);
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
-       }
+       if (!nvme_ns_head_multipath(ns->head))
+               nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+       del_gendisk(ns->disk);
+       blk_cleanup_queue(ns->queue);
+       if (blk_get_integrity(ns->disk))
+               blk_integrity_unregister(ns->disk);
 
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
index a5469fd..668c6bb 100644 (file)
@@ -719,7 +719,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -EINVAL;
                                goto out;
                        }
-                       nvmf_host_put(opts->host);
                        opts->host = nvmf_host_add(p);
                        kfree(p);
                        if (!opts->host) {
index 305ddd4..2231496 100644 (file)
@@ -342,9 +342,7 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
        case NVME_IOCTL_IO64_CMD:
                return nvme_user_cmd64(ns->ctrl, ns, argp);
        default:
-               if (!ns->ndev)
-                       return -ENOTTY;
-               return nvme_nvm_ioctl(ns, cmd, argp);
+               return -ENOTTY;
        }
 }
 
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
deleted file mode 100644 (file)
index e9d9ad4..0000000
+++ /dev/null
@@ -1,1274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * nvme-lightnvm.c - LightNVM NVMe device
- *
- * Copyright (C) 2014-2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <mb@lightnvm.io>
- */
-
-#include "nvme.h"
-
-#include <linux/nvme.h>
-#include <linux/bitops.h>
-#include <linux/lightnvm.h>
-#include <linux/vmalloc.h>
-#include <linux/sched/sysctl.h>
-#include <uapi/linux/lightnvm.h>
-
-enum nvme_nvm_admin_opcode {
-       nvme_nvm_admin_identity         = 0xe2,
-       nvme_nvm_admin_get_bb_tbl       = 0xf2,
-       nvme_nvm_admin_set_bb_tbl       = 0xf1,
-};
-
-enum nvme_nvm_log_page {
-       NVME_NVM_LOG_REPORT_CHUNK       = 0xca,
-};
-
-struct nvme_nvm_ph_rw {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd2;
-       __le64                  metadata;
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __le16                  length;
-       __le16                  control;
-       __le32                  dsmgmt;
-       __le64                  resv;
-};
-
-struct nvme_nvm_erase_blk {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __le16                  length;
-       __le16                  control;
-       __le32                  dsmgmt;
-       __le64                  resv;
-};
-
-struct nvme_nvm_identity {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __u32                   rsvd11[6];
-};
-
-struct nvme_nvm_getbbtbl {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __u32                   rsvd4[4];
-};
-
-struct nvme_nvm_setbbtbl {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __le64                  rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __le16                  nlb;
-       __u8                    value;
-       __u8                    rsvd3;
-       __u32                   rsvd4[3];
-};
-
-struct nvme_nvm_command {
-       union {
-               struct nvme_common_command common;
-               struct nvme_nvm_ph_rw ph_rw;
-               struct nvme_nvm_erase_blk erase;
-               struct nvme_nvm_identity identity;
-               struct nvme_nvm_getbbtbl get_bb;
-               struct nvme_nvm_setbbtbl set_bb;
-       };
-};
-
-struct nvme_nvm_id12_grp {
-       __u8                    mtype;
-       __u8                    fmtype;
-       __le16                  res16;
-       __u8                    num_ch;
-       __u8                    num_lun;
-       __u8                    num_pln;
-       __u8                    rsvd1;
-       __le16                  num_chk;
-       __le16                  num_pg;
-       __le16                  fpg_sz;
-       __le16                  csecs;
-       __le16                  sos;
-       __le16                  rsvd2;
-       __le32                  trdt;
-       __le32                  trdm;
-       __le32                  tprt;
-       __le32                  tprm;
-       __le32                  tbet;
-       __le32                  tbem;
-       __le32                  mpos;
-       __le32                  mccap;
-       __le16                  cpar;
-       __u8                    reserved[906];
-} __packed;
-
-struct nvme_nvm_id12_addrf {
-       __u8                    ch_offset;
-       __u8                    ch_len;
-       __u8                    lun_offset;
-       __u8                    lun_len;
-       __u8                    pln_offset;
-       __u8                    pln_len;
-       __u8                    blk_offset;
-       __u8                    blk_len;
-       __u8                    pg_offset;
-       __u8                    pg_len;
-       __u8                    sec_offset;
-       __u8                    sec_len;
-       __u8                    res[4];
-} __packed;
-
-struct nvme_nvm_id12 {
-       __u8                    ver_id;
-       __u8                    vmnt;
-       __u8                    cgrps;
-       __u8                    res;
-       __le32                  cap;
-       __le32                  dom;
-       struct nvme_nvm_id12_addrf ppaf;
-       __u8                    resv[228];
-       struct nvme_nvm_id12_grp grp;
-       __u8                    resv2[2880];
-} __packed;
-
-struct nvme_nvm_bb_tbl {
-       __u8    tblid[4];
-       __le16  verid;
-       __le16  revid;
-       __le32  rvsd1;
-       __le32  tblks;
-       __le32  tfact;
-       __le32  tgrown;
-       __le32  tdresv;
-       __le32  thresv;
-       __le32  rsvd2[8];
-       __u8    blk[];
-};
-
-struct nvme_nvm_id20_addrf {
-       __u8                    grp_len;
-       __u8                    pu_len;
-       __u8                    chk_len;
-       __u8                    lba_len;
-       __u8                    resv[4];
-};
-
-struct nvme_nvm_id20 {
-       __u8                    mjr;
-       __u8                    mnr;
-       __u8                    resv[6];
-
-       struct nvme_nvm_id20_addrf lbaf;
-
-       __le32                  mccap;
-       __u8                    resv2[12];
-
-       __u8                    wit;
-       __u8                    resv3[31];
-
-       /* Geometry */
-       __le16                  num_grp;
-       __le16                  num_pu;
-       __le32                  num_chk;
-       __le32                  clba;
-       __u8                    resv4[52];
-
-       /* Write data requirements */
-       __le32                  ws_min;
-       __le32                  ws_opt;
-       __le32                  mw_cunits;
-       __le32                  maxoc;
-       __le32                  maxocpu;
-       __u8                    resv5[44];
-
-       /* Performance related metrics */
-       __le32                  trdt;
-       __le32                  trdm;
-       __le32                  twrt;
-       __le32                  twrm;
-       __le32                  tcrst;
-       __le32                  tcrsm;
-       __u8                    resv6[40];
-
-       /* Reserved area */
-       __u8                    resv7[2816];
-
-       /* Vendor specific */
-       __u8                    vs[1024];
-};
-
-struct nvme_nvm_chk_meta {
-       __u8    state;
-       __u8    type;
-       __u8    wi;
-       __u8    rsvd[5];
-       __le64  slba;
-       __le64  cnlb;
-       __le64  wp;
-};
-
-/*
- * Check we didn't inadvertently grow the command struct
- */
-static inline void _nvme_nvm_check_size(void)
-{
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
-                                               sizeof(struct nvm_chk_meta));
-}
-
-static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
-                                struct nvme_nvm_id12_addrf *src)
-{
-       dst->ch_len = src->ch_len;
-       dst->lun_len = src->lun_len;
-       dst->blk_len = src->blk_len;
-       dst->pg_len = src->pg_len;
-       dst->pln_len = src->pln_len;
-       dst->sec_len = src->sec_len;
-
-       dst->ch_offset = src->ch_offset;
-       dst->lun_offset = src->lun_offset;
-       dst->blk_offset = src->blk_offset;
-       dst->pg_offset = src->pg_offset;
-       dst->pln_offset = src->pln_offset;
-       dst->sec_offset = src->sec_offset;
-
-       dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
-       dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
-       dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
-       dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
-       dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
-       dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-}
-
-static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
-                            struct nvm_geo *geo)
-{
-       struct nvme_nvm_id12_grp *src;
-       int sec_per_pg, sec_per_pl, pg_per_blk;
-
-       if (id->cgrps != 1)
-               return -EINVAL;
-
-       src = &id->grp;
-
-       if (src->mtype != 0) {
-               pr_err("nvm: memory type not supported\n");
-               return -EINVAL;
-       }
-
-       /* 1.2 spec. only reports a single version id - unfold */
-       geo->major_ver_id = id->ver_id;
-       geo->minor_ver_id = 2;
-
-       /* Set compacted version for upper layers */
-       geo->version = NVM_OCSSD_SPEC_12;
-
-       geo->num_ch = src->num_ch;
-       geo->num_lun = src->num_lun;
-       geo->all_luns = geo->num_ch * geo->num_lun;
-
-       geo->num_chk = le16_to_cpu(src->num_chk);
-
-       geo->csecs = le16_to_cpu(src->csecs);
-       geo->sos = le16_to_cpu(src->sos);
-
-       pg_per_blk = le16_to_cpu(src->num_pg);
-       sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
-       sec_per_pl = sec_per_pg * src->num_pln;
-       geo->clba = sec_per_pl * pg_per_blk;
-
-       geo->all_chunks = geo->all_luns * geo->num_chk;
-       geo->total_secs = geo->clba * geo->all_chunks;
-
-       geo->ws_min = sec_per_pg;
-       geo->ws_opt = sec_per_pg;
-       geo->mw_cunits = geo->ws_opt << 3;      /* default to MLC safe values */
-
-       /* Do not impose values for maximum number of open blocks as it is
-        * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
-        * specify these values through a quirk if restrictions apply.
-        */
-       geo->maxoc = geo->all_luns * geo->num_chk;
-       geo->maxocpu = geo->num_chk;
-
-       geo->mccap = le32_to_cpu(src->mccap);
-
-       geo->trdt = le32_to_cpu(src->trdt);
-       geo->trdm = le32_to_cpu(src->trdm);
-       geo->tprt = le32_to_cpu(src->tprt);
-       geo->tprm = le32_to_cpu(src->tprm);
-       geo->tbet = le32_to_cpu(src->tbet);
-       geo->tbem = le32_to_cpu(src->tbem);
-
-       /* 1.2 compatibility */
-       geo->vmnt = id->vmnt;
-       geo->cap = le32_to_cpu(id->cap);
-       geo->dom = le32_to_cpu(id->dom);
-
-       geo->mtype = src->mtype;
-       geo->fmtype = src->fmtype;
-
-       geo->cpar = le16_to_cpu(src->cpar);
-       geo->mpos = le32_to_cpu(src->mpos);
-
-       geo->pln_mode = NVM_PLANE_SINGLE;
-
-       if (geo->mpos & 0x020202) {
-               geo->pln_mode = NVM_PLANE_DOUBLE;
-               geo->ws_opt <<= 1;
-       } else if (geo->mpos & 0x040404) {
-               geo->pln_mode = NVM_PLANE_QUAD;
-               geo->ws_opt <<= 2;
-       }
-
-       geo->num_pln = src->num_pln;
-       geo->num_pg = le16_to_cpu(src->num_pg);
-       geo->fpg_sz = le16_to_cpu(src->fpg_sz);
-
-       nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
-
-       return 0;
-}
-
-static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
-                                struct nvme_nvm_id20_addrf *src)
-{
-       dst->ch_len = src->grp_len;
-       dst->lun_len = src->pu_len;
-       dst->chk_len = src->chk_len;
-       dst->sec_len = src->lba_len;
-
-       dst->sec_offset = 0;
-       dst->chk_offset = dst->sec_len;
-       dst->lun_offset = dst->chk_offset + dst->chk_len;
-       dst->ch_offset = dst->lun_offset + dst->lun_len;
-
-       dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
-       dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
-       dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
-       dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-}
-
-static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
-                            struct nvm_geo *geo)
-{
-       geo->major_ver_id = id->mjr;
-       geo->minor_ver_id = id->mnr;
-
-       /* Set compacted version for upper layers */
-       geo->version = NVM_OCSSD_SPEC_20;
-
-       geo->num_ch = le16_to_cpu(id->num_grp);
-       geo->num_lun = le16_to_cpu(id->num_pu);
-       geo->all_luns = geo->num_ch * geo->num_lun;
-
-       geo->num_chk = le32_to_cpu(id->num_chk);
-       geo->clba = le32_to_cpu(id->clba);
-
-       geo->all_chunks = geo->all_luns * geo->num_chk;
-       geo->total_secs = geo->clba * geo->all_chunks;
-
-       geo->ws_min = le32_to_cpu(id->ws_min);
-       geo->ws_opt = le32_to_cpu(id->ws_opt);
-       geo->mw_cunits = le32_to_cpu(id->mw_cunits);
-       geo->maxoc = le32_to_cpu(id->maxoc);
-       geo->maxocpu = le32_to_cpu(id->maxocpu);
-
-       geo->trdt = le32_to_cpu(id->trdt);
-       geo->trdm = le32_to_cpu(id->trdm);
-       geo->tprt = le32_to_cpu(id->twrt);
-       geo->tprm = le32_to_cpu(id->twrm);
-       geo->tbet = le32_to_cpu(id->tcrst);
-       geo->tbem = le32_to_cpu(id->tcrsm);
-
-       nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
-
-       return 0;
-}
-
-static int nvme_nvm_identity(struct nvm_dev *nvmdev)
-{
-       struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_nvm_id12 *id;
-       struct nvme_nvm_command c = {};
-       int ret;
-
-       c.identity.opcode = nvme_nvm_admin_identity;
-       c.identity.nsid = cpu_to_le32(ns->head->ns_id);
-
-       id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
-       if (!id)
-               return -ENOMEM;
-
-       ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
-                               id, sizeof(struct nvme_nvm_id12));
-       if (ret) {
-               ret = -EIO;
-               goto out;
-       }
-
-       /*
-        * The 1.2 and 2.0 specifications share the first byte in their geometry
-        * command to make it possible to know what version a device implements.
-        */
-       switch (id->ver_id) {
-       case 1:
-               ret = nvme_nvm_setup_12(id, &nvmdev->geo);
-               break;
-       case 2:
-               ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
-                                                       &nvmdev->geo);
-               break;
-       default:
-               dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
-                                                       id->ver_id);
-               ret = -EINVAL;
-       }
-
-out:
-       kfree(id);
-       return ret;
-}
-
-static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
-                                                               u8 *blks)
-{
-       struct request_queue *q = nvmdev->q;
-       struct nvm_geo *geo = &nvmdev->geo;
-       struct nvme_ns *ns = q->queuedata;
-       struct nvme_ctrl *ctrl = ns->ctrl;
-       struct nvme_nvm_command c = {};
-       struct nvme_nvm_bb_tbl *bb_tbl;
-       int nr_blks = geo->num_chk * geo->num_pln;
-       int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
-       int ret = 0;
-
-       c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
-       c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
-       c.get_bb.spba = cpu_to_le64(ppa.ppa);
-
-       bb_tbl = kzalloc(tblsz, GFP_KERNEL);
-       if (!bb_tbl)
-               return -ENOMEM;
-
-       ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
-                                                               bb_tbl, tblsz);
-       if (ret) {
-               dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
-               ret = -EIO;
-               goto out;
-       }
-
-       if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
-               bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
-               dev_err(ctrl->device, "bbt format mismatch\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (le16_to_cpu(bb_tbl->verid) != 1) {
-               ret = -EINVAL;
-               dev_err(ctrl->device, "bbt version not supported\n");
-               goto out;
-       }
-
-       if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
-               ret = -EINVAL;
-               dev_err(ctrl->device,
-                               "bbt unsuspected blocks returned (%u!=%u)",
-                               le32_to_cpu(bb_tbl->tblks), nr_blks);
-               goto out;
-       }
-
-       memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
-out:
-       kfree(bb_tbl);
-       return ret;
-}
-
-static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
-                                                       int nr_ppas, int type)
-{
-       struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_nvm_command c = {};
-       int ret = 0;
-
-       c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
-       c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
-       c.set_bb.spba = cpu_to_le64(ppas->ppa);
-       c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
-       c.set_bb.value = type;
-
-       ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
-                                                               NULL, 0);
-       if (ret)
-               dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
-                                                                       ret);
-       return ret;
-}
-
-/*
- * Expect the lba in device format
- */
-static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
-                                sector_t slba, int nchks,
-                                struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &ndev->geo;
-       struct nvme_ns *ns = ndev->q->queuedata;
-       struct nvme_ctrl *ctrl = ns->ctrl;
-       struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
-       struct ppa_addr ppa;
-       size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
-       size_t log_pos, offset, len;
-       int i, max_len;
-       int ret = 0;
-
-       /*
-        * limit requests to maximum 256K to avoid issuing arbitrary large
-        * requests when the device does not specific a maximum transfer size.
-        */
-       max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
-
-       dev_meta = kmalloc(max_len, GFP_KERNEL);
-       if (!dev_meta)
-               return -ENOMEM;
-
-       /* Normalize lba address space to obtain log offset */
-       ppa.ppa = slba;
-       ppa = dev_to_generic_addr(ndev, ppa);
-
-       log_pos = ppa.m.chk;
-       log_pos += ppa.m.pu * geo->num_chk;
-       log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
-
-       offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
-
-       while (left) {
-               len = min_t(unsigned int, left, max_len);
-
-               memset(dev_meta, 0, max_len);
-               dev_meta_off = dev_meta;
-
-               ret = nvme_get_log(ctrl, ns->head->ns_id,
-                               NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM,
-                               dev_meta, len, offset);
-               if (ret) {
-                       dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
-                       break;
-               }
-
-               for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
-                       meta->state = dev_meta_off->state;
-                       meta->type = dev_meta_off->type;
-                       meta->wi = dev_meta_off->wi;
-                       meta->slba = le64_to_cpu(dev_meta_off->slba);
-                       meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
-                       meta->wp = le64_to_cpu(dev_meta_off->wp);
-
-                       meta++;
-                       dev_meta_off++;
-               }
-
-               offset += len;
-               left -= len;
-       }
-
-       kfree(dev_meta);
-
-       return ret;
-}
-
-static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
-                                   struct nvme_nvm_command *c)
-{
-       c->ph_rw.opcode = rqd->opcode;
-       c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
-       c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
-       c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
-       c->ph_rw.control = cpu_to_le16(rqd->flags);
-       c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-}
-
-static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
-{
-       struct nvm_rq *rqd = rq->end_io_data;
-
-       rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
-       rqd->error = nvme_req(rq)->status;
-       nvm_end_io(rqd);
-
-       kfree(nvme_req(rq)->cmd);
-       blk_mq_free_request(rq);
-}
-
-static struct request *nvme_nvm_alloc_request(struct request_queue *q,
-                                             struct nvm_rq *rqd,
-                                             struct nvme_nvm_command *cmd)
-{
-       struct nvme_ns *ns = q->queuedata;
-       struct request *rq;
-
-       nvme_nvm_rqtocmd(rqd, ns, cmd);
-
-       rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
-       if (IS_ERR(rq))
-               return rq;
-
-       rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-
-       if (rqd->bio)
-               blk_rq_append_bio(rq, rqd->bio);
-       else
-               rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
-
-       return rq;
-}
-
-static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
-                             void *buf)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct request_queue *q = dev->q;
-       struct nvme_nvm_command *cmd;
-       struct request *rq;
-       int ret;
-
-       cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
-
-       rq = nvme_nvm_alloc_request(q, rqd, cmd);
-       if (IS_ERR(rq)) {
-               ret = PTR_ERR(rq);
-               goto err_free_cmd;
-       }
-
-       if (buf) {
-               ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
-                               GFP_KERNEL);
-               if (ret)
-                       goto err_free_cmd;
-       }
-
-       rq->end_io_data = rqd;
-
-       blk_execute_rq_nowait(NULL, rq, 0, nvme_nvm_end_io);
-
-       return 0;
-
-err_free_cmd:
-       kfree(cmd);
-       return ret;
-}
-
-static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
-                                       int size)
-{
-       struct nvme_ns *ns = nvmdev->q->queuedata;
-
-       return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
-}
-
-static void nvme_nvm_destroy_dma_pool(void *pool)
-{
-       struct dma_pool *dma_pool = pool;
-
-       dma_pool_destroy(dma_pool);
-}
-
-static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
-                                   gfp_t mem_flags, dma_addr_t *dma_handler)
-{
-       return dma_pool_alloc(pool, mem_flags, dma_handler);
-}
-
-static void nvme_nvm_dev_dma_free(void *pool, void *addr,
-                                                       dma_addr_t dma_handler)
-{
-       dma_pool_free(pool, addr, dma_handler);
-}
-
-static struct nvm_dev_ops nvme_nvm_dev_ops = {
-       .identity               = nvme_nvm_identity,
-
-       .get_bb_tbl             = nvme_nvm_get_bb_tbl,
-       .set_bb_tbl             = nvme_nvm_set_bb_tbl,
-
-       .get_chk_meta           = nvme_nvm_get_chk_meta,
-
-       .submit_io              = nvme_nvm_submit_io,
-
-       .create_dma_pool        = nvme_nvm_create_dma_pool,
-       .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
-       .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
-       .dev_dma_free           = nvme_nvm_dev_dma_free,
-};
-
-static int nvme_nvm_submit_user_cmd(struct request_queue *q,
-                               struct nvme_ns *ns,
-                               struct nvme_nvm_command *vcmd,
-                               void __user *ubuf, unsigned int bufflen,
-                               void __user *meta_buf, unsigned int meta_len,
-                               void __user *ppa_buf, unsigned int ppa_len,
-                               u32 *result, u64 *status, unsigned int timeout)
-{
-       bool write = nvme_is_write((struct nvme_command *)vcmd);
-       struct nvm_dev *dev = ns->ndev;
-       struct request *rq;
-       struct bio *bio = NULL;
-       __le64 *ppa_list = NULL;
-       dma_addr_t ppa_dma;
-       __le64 *metadata = NULL;
-       dma_addr_t metadata_dma;
-       DECLARE_COMPLETION_ONSTACK(wait);
-       int ret = 0;
-
-       rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
-       if (IS_ERR(rq)) {
-               ret = -ENOMEM;
-               goto err_cmd;
-       }
-
-       if (timeout)
-               rq->timeout = timeout;
-
-       if (ppa_buf && ppa_len) {
-               ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
-               if (!ppa_list) {
-                       ret = -ENOMEM;
-                       goto err_rq;
-               }
-               if (copy_from_user(ppa_list, (void __user *)ppa_buf,
-                                               sizeof(u64) * (ppa_len + 1))) {
-                       ret = -EFAULT;
-                       goto err_ppa;
-               }
-               vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
-       } else {
-               vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
-       }
-
-       if (ubuf && bufflen) {
-               ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
-               if (ret)
-                       goto err_ppa;
-               bio = rq->bio;
-
-               if (meta_buf && meta_len) {
-                       metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
-                                                               &metadata_dma);
-                       if (!metadata) {
-                               ret = -ENOMEM;
-                               goto err_map;
-                       }
-
-                       if (write) {
-                               if (copy_from_user(metadata,
-                                               (void __user *)meta_buf,
-                                               meta_len)) {
-                                       ret = -EFAULT;
-                                       goto err_meta;
-                               }
-                       }
-                       vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
-               }
-
-               bio_set_dev(bio, ns->disk->part0);
-       }
-
-       blk_execute_rq(NULL, rq, 0);
-
-       if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
-               ret = -EINTR;
-       else if (nvme_req(rq)->status & 0x7ff)
-               ret = -EIO;
-       if (result)
-               *result = nvme_req(rq)->status & 0x7ff;
-       if (status)
-               *status = le64_to_cpu(nvme_req(rq)->result.u64);
-
-       if (metadata && !ret && !write) {
-               if (copy_to_user(meta_buf, (void *)metadata, meta_len))
-                       ret = -EFAULT;
-       }
-err_meta:
-       if (meta_buf && meta_len)
-               dma_pool_free(dev->dma_pool, metadata, metadata_dma);
-err_map:
-       if (bio)
-               blk_rq_unmap_user(bio);
-err_ppa:
-       if (ppa_buf && ppa_len)
-               dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
-err_rq:
-       blk_mq_free_request(rq);
-err_cmd:
-       return ret;
-}
-
-static int nvme_nvm_submit_vio(struct nvme_ns *ns,
-                                       struct nvm_user_vio __user *uvio)
-{
-       struct nvm_user_vio vio;
-       struct nvme_nvm_command c;
-       unsigned int length;
-       int ret;
-
-       if (copy_from_user(&vio, uvio, sizeof(vio)))
-               return -EFAULT;
-       if (vio.flags)
-               return -EINVAL;
-
-       memset(&c, 0, sizeof(c));
-       c.ph_rw.opcode = vio.opcode;
-       c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
-       c.ph_rw.control = cpu_to_le16(vio.control);
-       c.ph_rw.length = cpu_to_le16(vio.nppas);
-
-       length = (vio.nppas + 1) << ns->lba_shift;
-
-       ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
-                       (void __user *)(uintptr_t)vio.addr, length,
-                       (void __user *)(uintptr_t)vio.metadata,
-                                                       vio.metadata_len,
-                       (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
-                       &vio.result, &vio.status, 0);
-
-       if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
-               return -EFAULT;
-
-       return ret;
-}
-
-static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
-                                       struct nvm_passthru_vio __user *uvcmd)
-{
-       struct nvm_passthru_vio vcmd;
-       struct nvme_nvm_command c;
-       struct request_queue *q;
-       unsigned int timeout = 0;
-       int ret;
-
-       if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
-               return -EFAULT;
-       if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
-               return -EACCES;
-       if (vcmd.flags)
-               return -EINVAL;
-
-       memset(&c, 0, sizeof(c));
-       c.common.opcode = vcmd.opcode;
-       c.common.nsid = cpu_to_le32(ns->head->ns_id);
-       c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
-       c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
-       /* cdw11-12 */
-       c.ph_rw.length = cpu_to_le16(vcmd.nppas);
-       c.ph_rw.control  = cpu_to_le16(vcmd.control);
-       c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
-       c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
-       c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
-
-       if (vcmd.timeout_ms)
-               timeout = msecs_to_jiffies(vcmd.timeout_ms);
-
-       q = admin ? ns->ctrl->admin_q : ns->queue;
-
-       ret = nvme_nvm_submit_user_cmd(q, ns,
-                       (struct nvme_nvm_command *)&c,
-                       (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
-                       (void __user *)(uintptr_t)vcmd.metadata,
-                                                       vcmd.metadata_len,
-                       (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
-                       &vcmd.result, &vcmd.status, timeout);
-
-       if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
-               return -EFAULT;
-
-       return ret;
-}
-
-int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp)
-{
-       switch (cmd) {
-       case NVME_NVM_IOCTL_ADMIN_VIO:
-               return nvme_nvm_user_vcmd(ns, 1, argp);
-       case NVME_NVM_IOCTL_IO_VIO:
-               return nvme_nvm_user_vcmd(ns, 0, argp);
-       case NVME_NVM_IOCTL_SUBMIT_VIO:
-               return nvme_nvm_submit_vio(ns, argp);
-       default:
-               return -ENOTTY;
-       }
-}
-
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
-{
-       struct request_queue *q = ns->queue;
-       struct nvm_dev *dev;
-       struct nvm_geo *geo;
-
-       _nvme_nvm_check_size();
-
-       dev = nvm_alloc_dev(node);
-       if (!dev)
-               return -ENOMEM;
-
-       /* Note that csecs and sos will be overridden if it is a 1.2 drive. */
-       geo = &dev->geo;
-       geo->csecs = 1 << ns->lba_shift;
-       geo->sos = ns->ms;
-       if (ns->features & NVME_NS_EXT_LBAS)
-               geo->ext = true;
-       else
-               geo->ext = false;
-       geo->mdts = ns->ctrl->max_hw_sectors;
-
-       dev->q = q;
-       memcpy(dev->name, disk_name, DISK_NAME_LEN);
-       dev->ops = &nvme_nvm_dev_ops;
-       dev->private_data = ns;
-       ns->ndev = dev;
-
-       return nvm_register(dev);
-}
-
-void nvme_nvm_unregister(struct nvme_ns *ns)
-{
-       nvm_unregister(ns->ndev);
-}
-
-static ssize_t nvm_dev_attr_show(struct device *dev,
-               struct device_attribute *dattr, char *page)
-{
-       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       struct nvm_dev *ndev = ns->ndev;
-       struct nvm_geo *geo = &ndev->geo;
-       struct attribute *attr;
-
-       if (!ndev)
-               return 0;
-
-       attr = &dattr->attr;
-
-       if (strcmp(attr->name, "version") == 0) {
-               if (geo->major_ver_id == 1)
-                       return scnprintf(page, PAGE_SIZE, "%u\n",
-                                               geo->major_ver_id);
-               else
-                       return scnprintf(page, PAGE_SIZE, "%u.%u\n",
-                                               geo->major_ver_id,
-                                               geo->minor_ver_id);
-       } else if (strcmp(attr->name, "capabilities") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
-       } else if (strcmp(attr->name, "read_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
-       } else if (strcmp(attr->name, "read_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
-       } else {
-               return scnprintf(page,
-                                PAGE_SIZE,
-                                "Unhandled attr(%s) in `%s`\n",
-                                attr->name, __func__);
-       }
-}
-
-static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
-{
-       return scnprintf(page, PAGE_SIZE,
-               "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-                               ppaf->ch_offset, ppaf->ch_len,
-                               ppaf->lun_offset, ppaf->lun_len,
-                               ppaf->pln_offset, ppaf->pln_len,
-                               ppaf->blk_offset, ppaf->blk_len,
-                               ppaf->pg_offset, ppaf->pg_len,
-                               ppaf->sec_offset, ppaf->sec_len);
-}
-
-static ssize_t nvm_dev_attr_show_12(struct device *dev,
-               struct device_attribute *dattr, char *page)
-{
-       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       struct nvm_dev *ndev = ns->ndev;
-       struct nvm_geo *geo = &ndev->geo;
-       struct attribute *attr;
-
-       if (!ndev)
-               return 0;
-
-       attr = &dattr->attr;
-
-       if (strcmp(attr->name, "vendor_opcode") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
-       } else if (strcmp(attr->name, "device_mode") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
-       /* kept for compatibility */
-       } else if (strcmp(attr->name, "media_manager") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
-       } else if (strcmp(attr->name, "ppa_format") == 0) {
-               return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
-       } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
-       } else if (strcmp(attr->name, "flash_media_type") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
-       } else if (strcmp(attr->name, "num_channels") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
-       } else if (strcmp(attr->name, "num_luns") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
-       } else if (strcmp(attr->name, "num_planes") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
-       } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
-       } else if (strcmp(attr->name, "num_pages") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
-       } else if (strcmp(attr->name, "page_size") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
-       } else if (strcmp(attr->name, "hw_sector_size") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
-       } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
-       } else if (strcmp(attr->name, "prog_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
-       } else if (strcmp(attr->name, "prog_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
-       } else if (strcmp(attr->name, "erase_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
-       } else if (strcmp(attr->name, "erase_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
-       } else if (strcmp(attr->name, "multiplane_modes") == 0) {
-               return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
-       } else if (strcmp(attr->name, "media_capabilities") == 0) {
-               return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
-       } else if (strcmp(attr->name, "max_phys_secs") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
-       } else {
-               return scnprintf(page, PAGE_SIZE,
-                       "Unhandled attr(%s) in `%s`\n",
-                       attr->name, __func__);
-       }
-}
-
-static ssize_t nvm_dev_attr_show_20(struct device *dev,
-               struct device_attribute *dattr, char *page)
-{
-       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       struct nvm_dev *ndev = ns->ndev;
-       struct nvm_geo *geo = &ndev->geo;
-       struct attribute *attr;
-
-       if (!ndev)
-               return 0;
-
-       attr = &dattr->attr;
-
-       if (strcmp(attr->name, "groups") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
-       } else if (strcmp(attr->name, "punits") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
-       } else if (strcmp(attr->name, "chunks") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
-       } else if (strcmp(attr->name, "clba") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
-       } else if (strcmp(attr->name, "ws_min") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
-       } else if (strcmp(attr->name, "ws_opt") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
-       } else if (strcmp(attr->name, "maxoc") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
-       } else if (strcmp(attr->name, "maxocpu") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
-       } else if (strcmp(attr->name, "mw_cunits") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
-       } else if (strcmp(attr->name, "write_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
-       } else if (strcmp(attr->name, "write_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
-       } else if (strcmp(attr->name, "reset_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
-       } else if (strcmp(attr->name, "reset_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
-       } else {
-               return scnprintf(page, PAGE_SIZE,
-                       "Unhandled attr(%s) in `%s`\n",
-                       attr->name, __func__);
-       }
-}
-
-#define NVM_DEV_ATTR_RO(_name)                                 \
-       DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
-#define NVM_DEV_ATTR_12_RO(_name)                                      \
-       DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
-#define NVM_DEV_ATTR_20_RO(_name)                                      \
-       DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
-
-/* general attributes */
-static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(capabilities);
-
-static NVM_DEV_ATTR_RO(read_typ);
-static NVM_DEV_ATTR_RO(read_max);
-
-/* 1.2 values */
-static NVM_DEV_ATTR_12_RO(vendor_opcode);
-static NVM_DEV_ATTR_12_RO(device_mode);
-static NVM_DEV_ATTR_12_RO(ppa_format);
-static NVM_DEV_ATTR_12_RO(media_manager);
-static NVM_DEV_ATTR_12_RO(media_type);
-static NVM_DEV_ATTR_12_RO(flash_media_type);
-static NVM_DEV_ATTR_12_RO(num_channels);
-static NVM_DEV_ATTR_12_RO(num_luns);
-static NVM_DEV_ATTR_12_RO(num_planes);
-static NVM_DEV_ATTR_12_RO(num_blocks);
-static NVM_DEV_ATTR_12_RO(num_pages);
-static NVM_DEV_ATTR_12_RO(page_size);
-static NVM_DEV_ATTR_12_RO(hw_sector_size);
-static NVM_DEV_ATTR_12_RO(oob_sector_size);
-static NVM_DEV_ATTR_12_RO(prog_typ);
-static NVM_DEV_ATTR_12_RO(prog_max);
-static NVM_DEV_ATTR_12_RO(erase_typ);
-static NVM_DEV_ATTR_12_RO(erase_max);
-static NVM_DEV_ATTR_12_RO(multiplane_modes);
-static NVM_DEV_ATTR_12_RO(media_capabilities);
-static NVM_DEV_ATTR_12_RO(max_phys_secs);
-
-/* 2.0 values */
-static NVM_DEV_ATTR_20_RO(groups);
-static NVM_DEV_ATTR_20_RO(punits);
-static NVM_DEV_ATTR_20_RO(chunks);
-static NVM_DEV_ATTR_20_RO(clba);
-static NVM_DEV_ATTR_20_RO(ws_min);
-static NVM_DEV_ATTR_20_RO(ws_opt);
-static NVM_DEV_ATTR_20_RO(maxoc);
-static NVM_DEV_ATTR_20_RO(maxocpu);
-static NVM_DEV_ATTR_20_RO(mw_cunits);
-static NVM_DEV_ATTR_20_RO(write_typ);
-static NVM_DEV_ATTR_20_RO(write_max);
-static NVM_DEV_ATTR_20_RO(reset_typ);
-static NVM_DEV_ATTR_20_RO(reset_max);
-
-static struct attribute *nvm_dev_attrs[] = {
-       /* version agnostic attrs */
-       &dev_attr_version.attr,
-       &dev_attr_capabilities.attr,
-       &dev_attr_read_typ.attr,
-       &dev_attr_read_max.attr,
-
-       /* 1.2 attrs */
-       &dev_attr_vendor_opcode.attr,
-       &dev_attr_device_mode.attr,
-       &dev_attr_media_manager.attr,
-       &dev_attr_ppa_format.attr,
-       &dev_attr_media_type.attr,
-       &dev_attr_flash_media_type.attr,
-       &dev_attr_num_channels.attr,
-       &dev_attr_num_luns.attr,
-       &dev_attr_num_planes.attr,
-       &dev_attr_num_blocks.attr,
-       &dev_attr_num_pages.attr,
-       &dev_attr_page_size.attr,
-       &dev_attr_hw_sector_size.attr,
-       &dev_attr_oob_sector_size.attr,
-       &dev_attr_prog_typ.attr,
-       &dev_attr_prog_max.attr,
-       &dev_attr_erase_typ.attr,
-       &dev_attr_erase_max.attr,
-       &dev_attr_multiplane_modes.attr,
-       &dev_attr_media_capabilities.attr,
-       &dev_attr_max_phys_secs.attr,
-
-       /* 2.0 attrs */
-       &dev_attr_groups.attr,
-       &dev_attr_punits.attr,
-       &dev_attr_chunks.attr,
-       &dev_attr_clba.attr,
-       &dev_attr_ws_min.attr,
-       &dev_attr_ws_opt.attr,
-       &dev_attr_maxoc.attr,
-       &dev_attr_maxocpu.attr,
-       &dev_attr_mw_cunits.attr,
-
-       &dev_attr_write_typ.attr,
-       &dev_attr_write_max.attr,
-       &dev_attr_reset_typ.attr,
-       &dev_attr_reset_max.attr,
-
-       NULL,
-};
-
-static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
-                                    struct attribute *attr, int index)
-{
-       struct device *dev = kobj_to_dev(kobj);
-       struct gendisk *disk = dev_to_disk(dev);
-       struct nvme_ns *ns = disk->private_data;
-       struct nvm_dev *ndev = ns->ndev;
-       struct device_attribute *dev_attr =
-               container_of(attr, typeof(*dev_attr), attr);
-
-       if (!ndev)
-               return 0;
-
-       if (dev_attr->show == nvm_dev_attr_show)
-               return attr->mode;
-
-       switch (ndev->geo.major_ver_id) {
-       case 1:
-               if (dev_attr->show == nvm_dev_attr_show_12)
-                       return attr->mode;
-               break;
-       case 2:
-               if (dev_attr->show == nvm_dev_attr_show_20)
-                       return attr->mode;
-               break;
-       }
-
-       return 0;
-}
-
-const struct attribute_group nvme_nvm_attr_group = {
-       .name           = "lightnvm",
-       .attrs          = nvm_dev_attrs,
-       .is_visible     = nvm_dev_attrs_visible,
-};
index 3f32c5e..37ce3e8 100644 (file)
@@ -765,7 +765,7 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
        if (!head->disk)
                return;
        kblockd_schedule_work(&head->requeue_work);
-       if (head->disk->flags & GENHD_FL_UP) {
+       if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
                nvme_cdev_del(&head->cdev, &head->cdev_device);
                del_gendisk(head->disk);
        }
index 5cd1fa3..a2e1f29 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/pci.h>
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
-#include <linux/lightnvm.h>
 #include <linux/sed-opal.h>
 #include <linux/fault-inject.h>
 #include <linux/rcupdate.h>
@@ -48,11 +47,6 @@ extern struct workqueue_struct *nvme_wq;
 extern struct workqueue_struct *nvme_reset_wq;
 extern struct workqueue_struct *nvme_delete_wq;
 
-enum {
-       NVME_NS_LBA             = 0,
-       NVME_NS_LIGHTNVM        = 1,
-};
-
 /*
  * List of workarounds for devices that required behavior not specified in
  * the standard.
@@ -92,11 +86,6 @@ enum nvme_quirks {
         */
        NVME_QUIRK_NO_DEEPEST_PS                = (1 << 5),
 
-       /*
-        * Supports the LighNVM command set if indicated in vs[1].
-        */
-       NVME_QUIRK_LIGHTNVM                     = (1 << 6),
-
        /*
         * Set MEDIUM priority on SQ creation
         */
@@ -158,6 +147,7 @@ enum nvme_quirks {
 struct nvme_request {
        struct nvme_command     *cmd;
        union nvme_result       result;
+       u8                      genctr;
        u8                      retries;
        u8                      flags;
        u16                     status;
@@ -449,7 +439,6 @@ struct nvme_ns {
        u32 ana_grpid;
 #endif
        struct list_head siblings;
-       struct nvm_dev *ndev;
        struct kref kref;
        struct nvme_ns_head *head;
 
@@ -497,6 +486,49 @@ struct nvme_ctrl_ops {
        int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
 };
 
+/*
+ * nvme command_id is constructed as such:
+ * | xxxx | xxxxxxxxxxxx |
+ *   gen    request tag
+ */
+#define nvme_genctr_mask(gen)                  (gen & 0xf)
+#define nvme_cid_install_genctr(gen)           (nvme_genctr_mask(gen) << 12)
+#define nvme_genctr_from_cid(cid)              ((cid & 0xf000) >> 12)
+#define nvme_tag_from_cid(cid)                 (cid & 0xfff)
+
+static inline u16 nvme_cid(struct request *rq)
+{
+       return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
+}
+
+static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
+               u16 command_id)
+{
+       u8 genctr = nvme_genctr_from_cid(command_id);
+       u16 tag = nvme_tag_from_cid(command_id);
+       struct request *rq;
+
+       rq = blk_mq_tag_to_rq(tags, tag);
+       if (unlikely(!rq)) {
+               pr_err("could not locate request for tag %#x\n",
+                       tag);
+               return NULL;
+       }
+       if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
+               dev_err(nvme_req(rq)->ctrl->device,
+                       "request %#x genctr mismatch (got %#x expected %#x)\n",
+                       tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
+               return NULL;
+       }
+       return rq;
+}
+
+static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
+                u16 command_id)
+{
+       return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
+}
+
 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
                            const char *dev_name);
@@ -594,7 +626,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
 
 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
 {
-       return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+       return !qid &&
+               nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
 }
 
 void nvme_complete_rq(struct request *req);
@@ -823,26 +856,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
 }
 #endif
 
-#ifdef CONFIG_NVM
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
-void nvme_nvm_unregister(struct nvme_ns *ns);
-extern const struct attribute_group nvme_nvm_attr_group;
-int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp);
-#else
-static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
-                                   int node)
-{
-       return 0;
-}
-
-static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
-               void __user *argp)
-{
-       return -ENOTTY;
-}
-#endif /* CONFIG_NVM */
-
 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 {
        return dev_to_disk(dev)->private_data;
index 5185208..b82492c 100644 (file)
@@ -60,6 +60,8 @@ MODULE_PARM_DESC(sgl_threshold,
                "Use SGLs when average request segment size is larger or equal to "
                "this size. Use 0 to disable SGLs.");
 
+#define NVME_PCI_MIN_QUEUE_SIZE 2
+#define NVME_PCI_MAX_QUEUE_SIZE 4095
 static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
 static const struct kernel_param_ops io_queue_depth_ops = {
        .set = io_queue_depth_set,
@@ -68,7 +70,7 @@ static const struct kernel_param_ops io_queue_depth_ops = {
 
 static unsigned int io_queue_depth = 1024;
 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
-MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096");
 
 static int io_queue_count_set(const char *val, const struct kernel_param *kp)
 {
@@ -135,6 +137,7 @@ struct nvme_dev {
        u32 cmbloc;
        struct nvme_ctrl ctrl;
        u32 last_ps;
+       bool hmb;
 
        mempool_t *iod_mempool;
 
@@ -153,18 +156,14 @@ struct nvme_dev {
        unsigned int nr_allocated_queues;
        unsigned int nr_write_queues;
        unsigned int nr_poll_queues;
+
+       bool attrs_added;
 };
 
 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
 {
-       int ret;
-       u32 n;
-
-       ret = kstrtou32(val, 10, &n);
-       if (ret != 0 || n < 2)
-               return -EINVAL;
-
-       return param_set_uint(val, kp);
+       return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE,
+                       NVME_PCI_MAX_QUEUE_SIZE);
 }
 
 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -1014,7 +1013,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
                return;
        }
 
-       req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
+       req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
        if (unlikely(!req)) {
                dev_warn(nvmeq->dev->ctrl.device,
                        "invalid id %d completed on queue %d\n",
@@ -1808,17 +1807,6 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
        return ret >= 0 ? 0 : ret;
 }
 
-static ssize_t nvme_cmb_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
-
-       return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
-                      ndev->cmbloc, ndev->cmbsz);
-}
-static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-
 static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
 {
        u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
@@ -1887,20 +1875,6 @@ static void nvme_map_cmb(struct nvme_dev *dev)
        if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
                        (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
                pci_p2pmem_publish(pdev, true);
-
-       if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
-                                   &dev_attr_cmb.attr, NULL))
-               dev_warn(dev->ctrl.device,
-                        "failed to add sysfs attribute for CMB\n");
-}
-
-static inline void nvme_release_cmb(struct nvme_dev *dev)
-{
-       if (dev->cmb_size) {
-               sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
-                                            &dev_attr_cmb.attr, NULL);
-               dev->cmb_size = 0;
-       }
 }
 
 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
@@ -1923,7 +1897,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
                dev_warn(dev->ctrl.device,
                         "failed to set host mem (err %d, flags %#x).\n",
                         ret, bits);
-       }
+       } else
+               dev->hmb = bits & NVME_HOST_MEM_ENABLE;
+
        return ret;
 }
 
@@ -2080,6 +2056,102 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
        return ret;
 }
 
+static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz  : x%08x\n",
+                      ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmb);
+
+static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%u\n", ndev->cmbloc);
+}
+static DEVICE_ATTR_RO(cmbloc);
+
+static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%u\n", ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmbsz);
+
+static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%d\n", ndev->hmb);
+}
+
+static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+       bool new;
+       int ret;
+
+       if (strtobool(buf, &new) < 0)
+               return -EINVAL;
+
+       if (new == ndev->hmb)
+               return count;
+
+       if (new) {
+               ret = nvme_setup_host_mem(ndev);
+       } else {
+               ret = nvme_set_host_mem(ndev, 0);
+               if (!ret)
+                       nvme_free_host_mem(ndev);
+       }
+
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(hmb);
+
+static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
+               struct attribute *a, int n)
+{
+       struct nvme_ctrl *ctrl =
+               dev_get_drvdata(container_of(kobj, struct device, kobj));
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+       if (a == &dev_attr_cmb.attr ||
+           a == &dev_attr_cmbloc.attr ||
+           a == &dev_attr_cmbsz.attr) {
+               if (!dev->cmbsz)
+                       return 0;
+       }
+       if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
+               return 0;
+
+       return a->mode;
+}
+
+static struct attribute *nvme_pci_attrs[] = {
+       &dev_attr_cmb.attr,
+       &dev_attr_cmbloc.attr,
+       &dev_attr_cmbsz.attr,
+       &dev_attr_hmb.attr,
+       NULL,
+};
+
+static const struct attribute_group nvme_pci_attr_group = {
+       .attrs          = nvme_pci_attrs,
+       .is_visible     = nvme_pci_attrs_are_visible,
+};
+
 /*
  * nirqs is the number of interrupts available for write and read
  * queues. The core already reserved an interrupt for the admin queue.
@@ -2751,6 +2823,10 @@ static void nvme_reset_work(struct work_struct *work)
                goto out;
        }
 
+       if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj,
+                       &nvme_pci_attr_group))
+               dev->attrs_added = true;
+
        nvme_start_ctrl(&dev->ctrl);
        return;
 
@@ -2999,6 +3075,13 @@ static void nvme_shutdown(struct pci_dev *pdev)
        nvme_disable_prepare_reset(dev, true);
 }
 
+static void nvme_remove_attrs(struct nvme_dev *dev)
+{
+       if (dev->attrs_added)
+               sysfs_remove_group(&dev->ctrl.device->kobj,
+                                  &nvme_pci_attr_group);
+}
+
 /*
  * The driver's remove may be called on a device in a partially initialized
  * state. This function must not have any dependencies on the device state in
@@ -3020,7 +3103,7 @@ static void nvme_remove(struct pci_dev *pdev)
        nvme_stop_ctrl(&dev->ctrl);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_dev_disable(dev, true);
-       nvme_release_cmb(dev);
+       nvme_remove_attrs(dev);
        nvme_free_host_mem(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
@@ -3047,8 +3130,13 @@ static int nvme_resume(struct device *dev)
 
        if (ndev->last_ps == U32_MAX ||
            nvme_set_power_state(ctrl, ndev->last_ps) != 0)
-               return nvme_try_sched_reset(&ndev->ctrl);
+               goto reset;
+       if (ctrl->hmpre && nvme_setup_host_mem(ndev))
+               goto reset;
+
        return 0;
+reset:
+       return nvme_try_sched_reset(ctrl);
 }
 
 static int nvme_suspend(struct device *dev)
@@ -3072,15 +3160,9 @@ static int nvme_suspend(struct device *dev)
         * the PCI bus layer to put it into D3 in order to take the PCIe link
         * down, so as to allow the platform to achieve its minimum low-power
         * state (which may not be possible if the link is up).
-        *
-        * If a host memory buffer is enabled, shut down the device as the NVMe
-        * specification allows the device to access the host memory buffer in
-        * host DRAM from all power states, but hosts will fail access to DRAM
-        * during S3.
         */
        if (pm_suspend_via_firmware() || !ctrl->npss ||
            !pcie_aspm_enabled(pdev) ||
-           ndev->nr_host_mem_descs ||
            (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
                return nvme_disable_prepare_reset(ndev, true);
 
@@ -3091,6 +3173,17 @@ static int nvme_suspend(struct device *dev)
        if (ctrl->state != NVME_CTRL_LIVE)
                goto unfreeze;
 
+       /*
+        * Host memory access may not be successful in a system suspend state,
+        * but the specification allows the controller to access memory in a
+        * non-operational power state.
+        */
+       if (ndev->hmb) {
+               ret = nvme_set_host_mem(ndev, 0);
+               if (ret < 0)
+                       goto unfreeze;
+       }
+
        ret = nvme_get_power_state(ctrl, &ndev->last_ps);
        if (ret < 0)
                goto unfreeze;
@@ -3243,12 +3336,6 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x1b4b, 0x1092),   /* Lexar 256 GB SSD */
                .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
-       { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
-               .driver_data = NVME_QUIRK_LIGHTNVM, },
-       { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
-               .driver_data = NVME_QUIRK_LIGHTNVM, },
-       { PCI_DEVICE(0x1d1d, 0x2601),   /* CNEX Granby */
-               .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
index 7f6b3a9..a68704e 100644 (file)
@@ -735,13 +735,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
        if (ret)
                return ret;
 
-       ctrl->ctrl.queue_count = nr_io_queues + 1;
-       if (ctrl->ctrl.queue_count < 2) {
+       if (nr_io_queues == 0) {
                dev_err(ctrl->ctrl.device,
                        "unable to set any I/O queues\n");
                return -ENOMEM;
        }
 
+       ctrl->ctrl.queue_count = nr_io_queues + 1;
        dev_info(ctrl->ctrl.device,
                "creating %d I/O queues.\n", nr_io_queues);
 
@@ -1730,10 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
        struct request *rq;
        struct nvme_rdma_request *req;
 
-       rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+       rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "tag 0x%x on QP %#x not found\n",
+                       "got bad command_id %#x on QP %#x\n",
                        cqe->command_id, queue->qp->qp_num);
                nvme_rdma_error_recovery(queue->ctrl);
                return;
index 8cb15ee..6450256 100644 (file)
@@ -487,11 +487,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
 {
        struct request *rq;
 
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
+       rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag 0x%x not found\n",
-                       nvme_tcp_queue_id(queue), cqe->command_id);
+                       "got bad cqe.command_id %#x on queue %d\n",
+                       cqe->command_id, nvme_tcp_queue_id(queue));
                nvme_tcp_error_recovery(&queue->ctrl->ctrl);
                return -EINVAL;
        }
@@ -508,11 +508,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
 {
        struct request *rq;
 
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+       rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag %#x not found\n",
-                       nvme_tcp_queue_id(queue), pdu->command_id);
+                       "got bad c2hdata.command_id %#x on queue %d\n",
+                       pdu->command_id, nvme_tcp_queue_id(queue));
                return -ENOENT;
        }
 
@@ -606,7 +606,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
        data->hdr.plen =
                cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
        data->ttag = pdu->ttag;
-       data->command_id = rq->tag;
+       data->command_id = nvme_cid(rq);
        data->data_offset = cpu_to_le32(req->data_sent);
        data->data_length = cpu_to_le32(req->pdu_len);
        return 0;
@@ -619,11 +619,11 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
        struct request *rq;
        int ret;
 
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+       rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag %#x not found\n",
-                       nvme_tcp_queue_id(queue), pdu->command_id);
+                       "got bad r2t.command_id %#x on queue %d\n",
+                       pdu->command_id, nvme_tcp_queue_id(queue));
                return -ENOENT;
        }
        req = blk_mq_rq_to_pdu(rq);
@@ -702,17 +702,9 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
                              unsigned int *offset, size_t *len)
 {
        struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
-       struct nvme_tcp_request *req;
-       struct request *rq;
-
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
-       if (!rq) {
-               dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag %#x not found\n",
-                       nvme_tcp_queue_id(queue), pdu->command_id);
-               return -ENOENT;
-       }
-       req = blk_mq_rq_to_pdu(rq);
+       struct request *rq =
+               nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+       struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
 
        while (true) {
                int recv_len, ret;
@@ -804,8 +796,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
        }
 
        if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
-               struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
-                                               pdu->command_id);
+               struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+                                       pdu->command_id);
 
                nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
                queue->nr_cqe++;
@@ -1228,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
 
        sock_release(queue->sock);
        kfree(queue->pdu);
+       mutex_destroy(&queue->send_mutex);
        mutex_destroy(&queue->queue_lock);
 }
 
@@ -1533,6 +1526,7 @@ err_sock:
        sock_release(queue->sock);
        queue->sock = NULL;
 err_destroy_mutex:
+       mutex_destroy(&queue->send_mutex);
        mutex_destroy(&queue->queue_lock);
        return ret;
 }
@@ -1769,13 +1763,13 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
        if (ret)
                return ret;
 
-       ctrl->queue_count = nr_io_queues + 1;
-       if (ctrl->queue_count < 2) {
+       if (nr_io_queues == 0) {
                dev_err(ctrl->device,
                        "unable to set any I/O queues\n");
                return -ENOMEM;
        }
 
+       ctrl->queue_count = nr_io_queues + 1;
        dev_info(ctrl->device,
                "creating %d I/O queues.\n", nr_io_queues);
 
index 6543015..2a89c5a 100644 (file)
@@ -72,6 +72,20 @@ static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
        return ret;
 }
 
+static const char *nvme_trace_admin_set_features(struct trace_seq *p,
+                                                u8 *cdw10)
+{
+       const char *ret = trace_seq_buffer_ptr(p);
+       u8 fid = cdw10[0];
+       u8 sv = cdw10[3] & 0x8;
+       u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+       trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+       trace_seq_putc(p, 0);
+
+       return ret;
+}
+
 static const char *nvme_trace_admin_get_features(struct trace_seq *p,
                                                 u8 *cdw10)
 {
@@ -80,7 +94,7 @@ static const char *nvme_trace_admin_get_features(struct trace_seq *p,
        u8 sel = cdw10[1] & 0x7;
        u32 cdw11 = get_unaligned_le32(cdw10 + 4);
 
-       trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+       trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
        trace_seq_putc(p, 0);
 
        return ret;
@@ -201,6 +215,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
                return nvme_trace_create_cq(p, cdw10);
        case nvme_admin_identify:
                return nvme_trace_admin_identify(p, cdw10);
+       case nvme_admin_set_features:
+               return nvme_trace_admin_set_features(p, cdw10);
        case nvme_admin_get_features:
                return nvme_trace_admin_get_features(p, cdw10);
        case nvme_admin_get_lba_status:
index 4be2ece..973561c 100644 (file)
@@ -31,7 +31,6 @@ config NVME_TARGET_PASSTHRU
 config NVME_TARGET_LOOP
        tristate "NVMe loopback device support"
        depends on NVME_TARGET
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
@@ -65,7 +64,6 @@ config NVME_TARGET_FC
 config NVME_TARGET_FCLOOP
        tristate "NVMe over Fabrics FC Transport Loopback Test driver"
        depends on NVME_TARGET
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        depends on NVME_FC
index ac7210a..66d05ee 100644 (file)
@@ -802,6 +802,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
                 * controller teardown as a result of a keep-alive expiration.
                 */
                ctrl->reset_tbkas = true;
+               sq->ctrl->sqs[sq->qid] = NULL;
                nvmet_ctrl_put(ctrl);
                sq->ctrl = NULL; /* allows reusing the queue later */
        }
index 7d0f352..7d0454c 100644 (file)
@@ -109,21 +109,38 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
        u16 qid = le16_to_cpu(c->qid);
        u16 sqsize = le16_to_cpu(c->sqsize);
        struct nvmet_ctrl *old;
+       u16 mqes = NVME_CAP_MQES(ctrl->cap);
        u16 ret;
 
-       old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
-       if (old) {
-               pr_warn("queue already connected!\n");
-               req->error_loc = offsetof(struct nvmf_connect_command, opcode);
-               return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
-       }
        if (!sqsize) {
                pr_warn("queue size zero!\n");
                req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+               req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
                ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
                goto err;
        }
 
+       if (ctrl->sqs[qid] != NULL) {
+               pr_warn("qid %u has already been created\n", qid);
+               req->error_loc = offsetof(struct nvmf_connect_command, qid);
+               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+       }
+
+       if (sqsize > mqes) {
+               pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
+                               sqsize, mqes, ctrl->cntlid);
+               req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+               req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
+               return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+       }
+
+       old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+       if (old) {
+               pr_warn("queue already connected!\n");
+               req->error_loc = offsetof(struct nvmf_connect_command, opcode);
+               return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+       }
+
        /* note: convert queue size from 0's-based value to 1's-based value */
        nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
        nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
@@ -138,6 +155,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
                if (ret) {
                        pr_err("failed to install queue %d cntlid %d ret %x\n",
                                qid, ctrl->cntlid, ret);
+                       ctrl->sqs[qid] = NULL;
                        goto err;
                }
        }
@@ -260,11 +278,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
        }
 
        status = nvmet_install_queue(ctrl, req);
-       if (status) {
-               /* pass back cntlid that had the issue of installing queue */
-               req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+       if (status)
                goto out_ctrl_put;
-       }
+
+       /* pass back cntlid for successful completion */
+       req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
 
        pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
 
index 3a17a7e..0285ccc 100644 (file)
@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
        } else {
                struct request *rq;
 
-               rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+               rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
                if (!rq) {
                        dev_err(queue->ctrl->ctrl.device,
-                               "tag 0x%x on queue %d not found\n",
+                               "got bad command_id %#x on queue %d\n",
                                cqe->command_id, nvme_loop_queue_idx(queue));
                        return;
                }
index 1373a3c..bff454d 100644 (file)
@@ -27,7 +27,7 @@ static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
        u8 sel = cdw10[1] & 0x7;
        u32 cdw11 = get_unaligned_le32(cdw10 + 4);
 
-       trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+       trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
        trace_seq_putc(p, 0);
 
        return ret;
@@ -49,6 +49,20 @@ static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
        return ret;
 }
 
+static const char *nvmet_trace_admin_set_features(struct trace_seq *p,
+                                                u8 *cdw10)
+{
+       const char *ret = trace_seq_buffer_ptr(p);
+       u8 fid = cdw10[0];
+       u8 sv = cdw10[3] & 0x8;
+       u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+       trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+       trace_seq_putc(p, 0);
+
+       return ret;
+}
+
 static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
 {
        const char *ret = trace_seq_buffer_ptr(p);
@@ -94,6 +108,8 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
        switch (opcode) {
        case nvme_admin_identify:
                return nvmet_trace_admin_identify(p, cdw10);
+       case nvme_admin_set_features:
+               return nvmet_trace_admin_set_features(p, cdw10);
        case nvme_admin_get_features:
                return nvmet_trace_admin_get_features(p, cdw10);
        case nvme_admin_get_lba_status:
index 17f8b7a..46bc30f 100644 (file)
@@ -115,14 +115,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
        }
 
        status = nvmet_req_find_ns(req);
-       if (status) {
-               status = NVME_SC_INTERNAL;
+       if (status)
                goto done;
-       }
 
        if (!bdev_is_zoned(req->ns->bdev)) {
                req->error_loc = offsetof(struct nvme_identify, nsid);
-               status = NVME_SC_INVALID_NS | NVME_SC_DNR;
                goto done;
        }
 
index b335c07..04b4691 100644 (file)
@@ -893,6 +893,10 @@ static int _set_required_opps(struct device *dev,
        if (!required_opp_tables)
                return 0;
 
+       /* required-opps not fully initialized yet */
+       if (lazy_linking_pending(opp_table))
+               return -EBUSY;
+
        /*
         * We only support genpd's OPPs in the "required-opps" for now, as we
         * don't know much about other use cases. Error out if the required OPP
@@ -903,10 +907,6 @@ static int _set_required_opps(struct device *dev,
                return -ENOENT;
        }
 
-       /* required-opps not fully initialized yet */
-       if (lazy_linking_pending(opp_table))
-               return -EBUSY;
-
        /* Single genpd case */
        if (!genpd_virt_devs)
                return _set_required_opp(dev, dev, opp, 0);
@@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        kfree(opp_table->supported_hw);
        opp_table->supported_hw = NULL;
        opp_table->supported_hw_count = 0;
@@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        kfree(opp_table->prop_name);
        opp_table->prop_name = NULL;
 
@@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
        if (!opp_table->regulators)
                goto put_opp_table;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        if (opp_table->enabled) {
                for (i = opp_table->regulator_count - 1; i >= 0; i--)
                        regulator_disable(opp_table->regulators[i]);
@@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        clk_put(opp_table->clk);
        opp_table->clk = ERR_PTR(-EINVAL);
 
@@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        opp_table->set_opp = NULL;
 
        mutex_lock(&opp_table->lock);
index d298e38..2a97c65 100644 (file)
@@ -95,15 +95,7 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
 static struct device_node *of_parse_required_opp(struct device_node *np,
                                                 int index)
 {
-       struct device_node *required_np;
-
-       required_np = of_parse_phandle(np, "required-opps", index);
-       if (unlikely(!required_np)) {
-               pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
-                      __func__, np, index);
-       }
-
-       return required_np;
+       return of_parse_phandle(np, "required-opps", index);
 }
 
 /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
@@ -964,8 +956,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
                }
        }
 
-       /* There should be one of more OPP defined */
-       if (WARN_ON(!count)) {
+       /* There should be one or more OPPs defined */
+       if (!count) {
+               dev_err(dev, "%s: no supported OPPs", __func__);
                ret = -ENOENT;
                goto remove_static_opp;
        }
@@ -1327,7 +1320,7 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
 
        required_np = of_parse_required_opp(np, index);
        if (!required_np)
-               return -EINVAL;
+               return -ENODEV;
 
        opp_table = _find_table_of_opp_np(required_np);
        if (IS_ERR(opp_table)) {
index 896a45b..654ac4a 100644 (file)
@@ -145,7 +145,7 @@ static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p)
        return 0;
 }
 
-static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
+static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
 {
        ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
 
@@ -170,7 +170,7 @@ static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
        return ixp4xx_pci_check_master_abort(p);
 }
 
-static int ixp4xx_pci_write(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
+static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
 {
        ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
 
@@ -308,7 +308,7 @@ static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
        dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n",
                where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
 
-       ret = ixp4xx_pci_read(p, addr, cmd, &val);
+       ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val);
        if (ret)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
@@ -356,7 +356,7 @@ static int ixp4xx_pci_write_config(struct pci_bus *bus,  unsigned int devfn,
        dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n",
                value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
 
-       ret = ixp4xx_pci_write(p, addr, cmd, val);
+       ret = ixp4xx_pci_write_indirect(p, addr, cmd, val);
        if (ret)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
index 9232255..0099a00 100644 (file)
@@ -129,93 +129,95 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev)
        return default_restore_msi_irqs(dev);
 }
 
-static inline __attribute_const__ u32 msi_mask(unsigned x)
-{
-       /* Don't shift by >= width of type */
-       if (x >= 5)
-               return 0xffffffff;
-       return (1 << (1 << x)) - 1;
-}
-
 /*
  * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
  * mask all MSI interrupts by clearing the MSI enable bit does not work
  * reliably as devices without an INTx disable bit will then generate a
  * level IRQ which will never be cleared.
  */
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
 {
-       u32 mask_bits = desc->masked;
+       /* Don't shift by >= width of type */
+       if (desc->msi_attrib.multi_cap >= 5)
+               return 0xffffffff;
+       return (1 << (1 << desc->msi_attrib.multi_cap)) - 1;
+}
 
-       if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
-               return 0;
+static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
+{
+       raw_spinlock_t *lock = &desc->dev->msi_lock;
+       unsigned long flags;
 
-       mask_bits &= ~mask;
-       mask_bits |= flag;
+       raw_spin_lock_irqsave(lock, flags);
+       desc->msi_mask &= ~clear;
+       desc->msi_mask |= set;
        pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
-                              mask_bits);
-
-       return mask_bits;
+                              desc->msi_mask);
+       raw_spin_unlock_irqrestore(lock, flags);
 }
 
-static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
 {
-       desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+       pci_msi_update_mask(desc, 0, mask);
 }
 
-static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
+static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
 {
-       if (desc->msi_attrib.is_virtual)
-               return NULL;
+       pci_msi_update_mask(desc, mask, 0);
+}
 
-       return desc->mask_base +
-               desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
+{
+       return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
 }
 
 /*
- * This internal function does not flush PCI writes to the device.
- * All users must ensure that they read from the device before either
- * assuming that the device state is up to date, or returning out of this
- * file.  This saves a few milliseconds when initialising devices with lots
- * of MSI-X interrupts.
+ * This internal function does not flush PCI writes to the device.  All
+ * users must ensure that they read from the device before either assuming
+ * that the device state is up to date, or returning out of this file.
+ * It does not affect the msi_desc::msix_ctrl cache either. Use with care!
  */
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
+static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
 {
-       u32 mask_bits = desc->masked;
-       void __iomem *desc_addr;
+       void __iomem *desc_addr = pci_msix_desc_addr(desc);
 
-       if (pci_msi_ignore_mask)
-               return 0;
-
-       desc_addr = pci_msix_desc_addr(desc);
-       if (!desc_addr)
-               return 0;
-
-       mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
-       if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
-               mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+}
 
-       writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+static inline void pci_msix_mask(struct msi_desc *desc)
+{
+       desc->msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
+       /* Flush write to device */
+       readl(desc->mask_base);
+}
 
-       return mask_bits;
+static inline void pci_msix_unmask(struct msi_desc *desc)
+{
+       desc->msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
 }
 
-static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
 {
-       desc->masked = __pci_msix_desc_mask_irq(desc, flag);
+       if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
+               return;
+
+       if (desc->msi_attrib.is_msix)
+               pci_msix_mask(desc);
+       else if (desc->msi_attrib.maskbit)
+               pci_msi_mask(desc, mask);
 }
 
-static void msi_set_mask_bit(struct irq_data *data, u32 flag)
+static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
 {
-       struct msi_desc *desc = irq_data_get_msi_desc(data);
+       if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
+               return;
 
-       if (desc->msi_attrib.is_msix) {
-               msix_mask_irq(desc, flag);
-               readl(desc->mask_base);         /* Flush write to device */
-       } else {
-               unsigned offset = data->irq - desc->irq;
-               msi_mask_irq(desc, 1 << offset, flag << offset);
-       }
+       if (desc->msi_attrib.is_msix)
+               pci_msix_unmask(desc);
+       else if (desc->msi_attrib.maskbit)
+               pci_msi_unmask(desc, mask);
 }
 
 /**
@@ -224,7 +226,9 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag)
  */
 void pci_msi_mask_irq(struct irq_data *data)
 {
-       msi_set_mask_bit(data, 1);
+       struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+       __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq));
 }
 EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
 
@@ -234,7 +238,9 @@ EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
  */
 void pci_msi_unmask_irq(struct irq_data *data)
 {
-       msi_set_mask_bit(data, 0);
+       struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+       __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq));
 }
 EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
 
@@ -255,10 +261,8 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
        if (entry->msi_attrib.is_msix) {
                void __iomem *base = pci_msix_desc_addr(entry);
 
-               if (!base) {
-                       WARN_ON(1);
+               if (WARN_ON_ONCE(entry->msi_attrib.is_virtual))
                        return;
-               }
 
                msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
                msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -289,13 +293,32 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
                /* Don't touch the hardware now */
        } else if (entry->msi_attrib.is_msix) {
                void __iomem *base = pci_msix_desc_addr(entry);
+               u32 ctrl = entry->msix_ctrl;
+               bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
 
-               if (!base)
+               if (entry->msi_attrib.is_virtual)
                        goto skip;
 
+               /*
+                * The specification mandates that the entry is masked
+                * when the message is modified:
+                *
+                * "If software changes the Address or Data value of an
+                * entry while the entry is unmasked, the result is
+                * undefined."
+                */
+               if (unmasked)
+                       pci_msix_write_vector_ctrl(entry, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
                writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
                writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
                writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+
+               if (unmasked)
+                       pci_msix_write_vector_ctrl(entry, ctrl);
+
+               /* Ensure that the writes are visible in the device */
+               readl(base + PCI_MSIX_ENTRY_DATA);
        } else {
                int pos = dev->msi_cap;
                u16 msgctl;
@@ -316,6 +339,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
                        pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
                                              msg->data);
                }
+               /* Ensure that the writes are visible in the device */
+               pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
        }
 
 skip:
@@ -338,9 +363,7 @@ static void free_msi_irqs(struct pci_dev *dev)
 {
        struct list_head *msi_list = dev_to_msi_list(&dev->dev);
        struct msi_desc *entry, *tmp;
-       struct attribute **msi_attrs;
-       struct device_attribute *dev_attr;
-       int i, count = 0;
+       int i;
 
        for_each_pci_msi_entry(entry, dev)
                if (entry->irq)
@@ -360,18 +383,7 @@ static void free_msi_irqs(struct pci_dev *dev)
        }
 
        if (dev->msi_irq_groups) {
-               sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
-               msi_attrs = dev->msi_irq_groups[0]->attrs;
-               while (msi_attrs[count]) {
-                       dev_attr = container_of(msi_attrs[count],
-                                               struct device_attribute, attr);
-                       kfree(dev_attr->attr.name);
-                       kfree(dev_attr);
-                       ++count;
-               }
-               kfree(msi_attrs);
-               kfree(dev->msi_irq_groups[0]);
-               kfree(dev->msi_irq_groups);
+               msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
                dev->msi_irq_groups = NULL;
        }
 }
@@ -408,8 +420,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
        arch_restore_msi_irqs(dev);
 
        pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
-       msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
-                    entry->masked);
+       pci_msi_update_mask(entry, 0, 0);
        control &= ~PCI_MSI_FLAGS_QSIZE;
        control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
        pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
@@ -440,7 +451,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
 
        arch_restore_msi_irqs(dev);
        for_each_pci_msi_entry(entry, dev)
-               msix_mask_irq(entry, entry->masked);
+               pci_msix_write_vector_ctrl(entry, entry->msix_ctrl);
 
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 }
@@ -452,102 +463,6 @@ void pci_restore_msi_state(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_restore_msi_state);
 
-static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct msi_desc *entry;
-       unsigned long irq;
-       int retval;
-
-       retval = kstrtoul(attr->attr.name, 10, &irq);
-       if (retval)
-               return retval;
-
-       entry = irq_get_msi_desc(irq);
-       if (!entry)
-               return -ENODEV;
-
-       return sysfs_emit(buf, "%s\n",
-                         entry->msi_attrib.is_msix ? "msix" : "msi");
-}
-
-static int populate_msi_sysfs(struct pci_dev *pdev)
-{
-       struct attribute **msi_attrs;
-       struct attribute *msi_attr;
-       struct device_attribute *msi_dev_attr;
-       struct attribute_group *msi_irq_group;
-       const struct attribute_group **msi_irq_groups;
-       struct msi_desc *entry;
-       int ret = -ENOMEM;
-       int num_msi = 0;
-       int count = 0;
-       int i;
-
-       /* Determine how many msi entries we have */
-       for_each_pci_msi_entry(entry, pdev)
-               num_msi += entry->nvec_used;
-       if (!num_msi)
-               return 0;
-
-       /* Dynamically create the MSI attributes for the PCI device */
-       msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
-       if (!msi_attrs)
-               return -ENOMEM;
-       for_each_pci_msi_entry(entry, pdev) {
-               for (i = 0; i < entry->nvec_used; i++) {
-                       msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
-                       if (!msi_dev_attr)
-                               goto error_attrs;
-                       msi_attrs[count] = &msi_dev_attr->attr;
-
-                       sysfs_attr_init(&msi_dev_attr->attr);
-                       msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
-                                                           entry->irq + i);
-                       if (!msi_dev_attr->attr.name)
-                               goto error_attrs;
-                       msi_dev_attr->attr.mode = S_IRUGO;
-                       msi_dev_attr->show = msi_mode_show;
-                       ++count;
-               }
-       }
-
-       msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
-       if (!msi_irq_group)
-               goto error_attrs;
-       msi_irq_group->name = "msi_irqs";
-       msi_irq_group->attrs = msi_attrs;
-
-       msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
-       if (!msi_irq_groups)
-               goto error_irq_group;
-       msi_irq_groups[0] = msi_irq_group;
-
-       ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
-       if (ret)
-               goto error_irq_groups;
-       pdev->msi_irq_groups = msi_irq_groups;
-
-       return 0;
-
-error_irq_groups:
-       kfree(msi_irq_groups);
-error_irq_group:
-       kfree(msi_irq_group);
-error_attrs:
-       count = 0;
-       msi_attr = msi_attrs[count];
-       while (msi_attr) {
-               msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
-               kfree(msi_attr->name);
-               kfree(msi_dev_attr);
-               ++count;
-               msi_attr = msi_attrs[count];
-       }
-       kfree(msi_attrs);
-       return ret;
-}
-
 static struct msi_desc *
 msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
 {
@@ -581,7 +496,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
 
        /* Save the initial mask status */
        if (entry->msi_attrib.maskbit)
-               pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
+               pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
 
 out:
        kfree(masks);
@@ -592,8 +507,11 @@ static int msi_verify_entries(struct pci_dev *dev)
 {
        struct msi_desc *entry;
 
+       if (!dev->no_64bit_msi)
+               return 0;
+
        for_each_pci_msi_entry(entry, dev) {
-               if (entry->msg.address_hi && dev->no_64bit_msi) {
+               if (entry->msg.address_hi) {
                        pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
                                entry->msg.address_hi, entry->msg.address_lo);
                        return -EIO;
@@ -619,7 +537,6 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
 {
        struct msi_desc *entry;
        int ret;
-       unsigned mask;
 
        pci_msi_set_enable(dev, 0);     /* Disable MSI during set up */
 
@@ -628,31 +545,23 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
                return -ENOMEM;
 
        /* All MSIs are unmasked by default; mask them all */
-       mask = msi_mask(entry->msi_attrib.multi_cap);
-       msi_mask_irq(entry, mask, mask);
+       pci_msi_mask(entry, msi_multi_mask(entry));
 
        list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
 
        /* Configure MSI capability structure */
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
-       if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
-               free_msi_irqs(dev);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        ret = msi_verify_entries(dev);
-       if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
-               free_msi_irqs(dev);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
-       ret = populate_msi_sysfs(dev);
-       if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
-               free_msi_irqs(dev);
-               return ret;
+       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(dev->msi_irq_groups)) {
+               ret = PTR_ERR(dev->msi_irq_groups);
+               goto err;
        }
 
        /* Set MSI enabled bits */
@@ -663,6 +572,11 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
        pcibios_free_irq(dev);
        dev->irq = entry->irq;
        return 0;
+
+err:
+       pci_msi_unmask(entry, msi_multi_mask(entry));
+       free_msi_irqs(dev);
+       return ret;
 }
 
 static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
@@ -691,6 +605,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 {
        struct irq_affinity_desc *curmsk, *masks = NULL;
        struct msi_desc *entry;
+       void __iomem *addr;
        int ret, i;
        int vec_count = pci_msix_vec_count(dev);
 
@@ -711,6 +626,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 
                entry->msi_attrib.is_msix       = 1;
                entry->msi_attrib.is_64         = 1;
+
                if (entries)
                        entry->msi_attrib.entry_nr = entries[i].entry;
                else
@@ -722,6 +638,11 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
                entry->msi_attrib.default_irq   = dev->irq;
                entry->mask_base                = base;
 
+               if (!entry->msi_attrib.is_virtual) {
+                       addr = pci_msix_desc_addr(entry);
+                       entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+               }
+
                list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
                if (masks)
                        curmsk++;
@@ -732,26 +653,28 @@ out:
        return ret;
 }
 
-static void msix_program_entries(struct pci_dev *dev,
-                                struct msix_entry *entries)
+static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
 {
        struct msi_desc *entry;
-       int i = 0;
-       void __iomem *desc_addr;
 
        for_each_pci_msi_entry(entry, dev) {
-               if (entries)
-                       entries[i++].vector = entry->irq;
+               if (entries) {
+                       entries->vector = entry->irq;
+                       entries++;
+               }
+       }
+}
 
-               desc_addr = pci_msix_desc_addr(entry);
-               if (desc_addr)
-                       entry->masked = readl(desc_addr +
-                                             PCI_MSIX_ENTRY_VECTOR_CTRL);
-               else
-                       entry->masked = 0;
+static void msix_mask_all(void __iomem *base, int tsize)
+{
+       u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       int i;
 
-               msix_mask_irq(entry, 1);
-       }
+       if (pci_msi_ignore_mask)
+               return;
+
+       for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+               writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
 }
 
 /**
@@ -768,22 +691,33 @@ static void msix_program_entries(struct pci_dev *dev,
 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
                                int nvec, struct irq_affinity *affd)
 {
-       int ret;
-       u16 control;
        void __iomem *base;
+       int ret, tsize;
+       u16 control;
 
-       /* Ensure MSI-X is disabled while it is set up */
-       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+       /*
+        * Some devices require MSI-X to be enabled before the MSI-X
+        * registers can be accessed.  Mask all the vectors to prevent
+        * interrupts coming in before they're fully set up.
+        */
+       pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
+                                   PCI_MSIX_FLAGS_ENABLE);
 
        pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
        /* Request & Map MSI-X table region */
-       base = msix_map_region(dev, msix_table_size(control));
-       if (!base)
-               return -ENOMEM;
+       tsize = msix_table_size(control);
+       base = msix_map_region(dev, tsize);
+       if (!base) {
+               ret = -ENOMEM;
+               goto out_disable;
+       }
+
+       /* Ensure that all table entries are masked. */
+       msix_mask_all(base, tsize);
 
        ret = msix_setup_entries(dev, base, entries, nvec, affd);
        if (ret)
-               return ret;
+               goto out_disable;
 
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
        if (ret)
@@ -794,19 +728,13 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
        if (ret)
                goto out_free;
 
-       /*
-        * Some devices require MSI-X to be enabled before we can touch the
-        * MSI-X registers.  We need to mask all the vectors to prevent
-        * interrupts coming in before they're fully set up.
-        */
-       pci_msix_clear_and_set_ctrl(dev, 0,
-                               PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
-
-       msix_program_entries(dev, entries);
+       msix_update_entries(dev, entries);
 
-       ret = populate_msi_sysfs(dev);
-       if (ret)
+       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(dev->msi_irq_groups)) {
+               ret = PTR_ERR(dev->msi_irq_groups);
                goto out_free;
+       }
 
        /* Set MSI-X enabled bits and unmask the function */
        pci_intx_for_msi(dev, 0);
@@ -836,6 +764,9 @@ out_avail:
 out_free:
        free_msi_irqs(dev);
 
+out_disable:
+       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+
        return ret;
 }
 
@@ -916,7 +847,6 @@ EXPORT_SYMBOL(pci_msi_vec_count);
 static void pci_msi_shutdown(struct pci_dev *dev)
 {
        struct msi_desc *desc;
-       u32 mask;
 
        if (!pci_msi_enable || !dev || !dev->msi_enabled)
                return;
@@ -929,9 +859,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
        dev->msi_enabled = 0;
 
        /* Return the device with MSI unmasked as initial states */
-       mask = msi_mask(desc->msi_attrib.multi_cap);
-       /* Keep cached state to be restored */
-       __pci_msi_desc_mask_irq(desc, mask, ~mask);
+       pci_msi_unmask(desc, msi_multi_mask(desc));
 
        /* Restore dev->irq to its default pin-assertion IRQ */
        dev->irq = desc->msi_attrib.default_irq;
@@ -1016,10 +944,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
        }
 
        /* Return the device with MSI-X masked as initial states */
-       for_each_pci_msi_entry(entry, dev) {
-               /* Keep cached states to be restored */
-               __pci_msix_desc_mask_irq(entry, 1);
-       }
+       for_each_pci_msi_entry(entry, dev)
+               pci_msix_mask(entry);
 
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
        pci_intx_for_msi(dev, 1);
index 5d63df7..7bbf267 100644 (file)
@@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b)
        b->legacy_mem->size = 1024*1024;
        b->legacy_mem->attr.mode = 0600;
        b->legacy_mem->mmap = pci_mmap_legacy_mem;
-       b->legacy_io->mapping = iomem_get_mapping();
+       b->legacy_mem->mapping = iomem_get_mapping();
        pci_adjust_legacy_attr(b, pci_mmap_mem);
        error = device_create_bin_file(&b->dev, b->legacy_mem);
        if (error)
index aacf575..a5e6759 100644 (file)
@@ -1906,11 +1906,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
         * so that things like MSI message writing will behave as expected
         * (e.g. if the device really is in D0 at enable time).
         */
-       if (dev->pm_cap) {
-               u16 pmcsr;
-               pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
-               dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
-       }
+       pci_update_current_state(dev, dev->current_state);
 
        if (atomic_inc_return(&dev->enable_cnt) > 1)
                return 0;               /* already enabled */
@@ -2495,7 +2491,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
        if (enable) {
                int error;
 
-               if (pci_pme_capable(dev, state))
+               /*
+                * Enable PME signaling if the device can signal PME from
+                * D3cold regardless of whether or not it can signal PME from
+                * the current target state, because that will allow it to
+                * signal PME when the hierarchy above it goes into D3cold and
+                * the device itself ends up in D3cold as a result of that.
+                */
+               if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
                        pci_pme_active(dev, true);
                else
                        ret = 1;
@@ -2599,16 +2602,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
        if (dev->current_state == PCI_D3cold)
                target_state = PCI_D3cold;
 
-       if (wakeup) {
+       if (wakeup && dev->pme_support) {
+               pci_power_t state = target_state;
+
                /*
                 * Find the deepest state from which the device can generate
                 * PME#.
                 */
-               if (dev->pme_support) {
-                       while (target_state
-                             && !(dev->pme_support & (1 << target_state)))
-                               target_state--;
-               }
+               while (state && !(dev->pme_support & (1 << state)))
+                       state--;
+
+               if (state)
+                       return state;
+               else if (dev->pme_support & 1)
+                       return PCI_D0;
        }
 
        return target_state;
index 6d74386..ab3de15 100644 (file)
@@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
 
 #ifdef CONFIG_X86_IO_APIC
 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
index 85887d8..192c904 100644 (file)
@@ -112,6 +112,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev,
        for (i = 0; i < socket_count; i++) {
                sockets[i].card_state = 1; /* 1 = present but empty */
                sockets[i].io_base = pci_resource_start(dev, 0);
+               sockets[i].dev = dev;
                sockets[i].socket.features |= SS_CAP_PCCARD;
                sockets[i].socket.map_size = 0x1000;
                sockets[i].socket.irq_mask = 0;
index c8b3e39..781f220 100644 (file)
@@ -833,7 +833,7 @@ static void owl_gpio_irq_handler(struct irq_desc *desc)
        unsigned int parent = irq_desc_get_irq(desc);
        const struct owl_gpio_port *port;
        void __iomem *base;
-       unsigned int pin, irq, offset = 0, i;
+       unsigned int pin, offset = 0, i;
        unsigned long pending_irq;
 
        chained_irq_enter(chip, desc);
@@ -849,8 +849,7 @@ static void owl_gpio_irq_handler(struct irq_desc *desc)
                pending_irq = readl_relaxed(base + port->intc_pd);
 
                for_each_set_bit(pin, &pending_irq, port->pins) {
-                       irq = irq_find_mapping(domain, offset + pin);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(domain, offset + pin);
 
                        /* clear pending interrupt */
                        owl_gpio_update_reg(base + port->intc_pd, pin, true);
index 2c87af1..8b34d2c 100644 (file)
@@ -395,8 +395,8 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
        events &= pc->enabled_irq_map[bank];
        for_each_set_bit(offset, &events, 32) {
                gpio = (32 * bank) + offset;
-               generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irq.domain,
-                                                    gpio));
+               generic_handle_domain_irq(pc->gpio_chip.irq.domain,
+                                         gpio);
        }
 }
 
index dc511b9..a7a0dd6 100644 (file)
@@ -176,7 +176,6 @@ static void iproc_gpio_irq_handler(struct irq_desc *desc)
 
                for_each_set_bit(bit, &val, NGPIOS_PER_BANK) {
                        unsigned pin = NGPIOS_PER_BANK * i + bit;
-                       int child_irq = irq_find_mapping(gc->irq.domain, pin);
 
                        /*
                         * Clear the interrupt before invoking the
@@ -185,7 +184,7 @@ static void iproc_gpio_irq_handler(struct irq_desc *desc)
                        writel(BIT(bit), chip->base + (i * GPIO_BANK_SIZE) +
                               IPROC_GPIO_INT_CLR_OFFSET);
 
-                       generic_handle_irq(child_irq);
+                       generic_handle_domain_irq(gc->irq.domain, pin);
                }
        }
 
index a00a42a..e031428 100644 (file)
@@ -155,8 +155,7 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
                int_bits = level | event;
 
                for_each_set_bit(bit, &int_bits, gc->ngpio)
-                       generic_handle_irq(
-                               irq_linear_revmap(gc->irq.domain, bit));
+                       generic_handle_domain_irq(gc->irq.domain, bit);
        }
 
        return  int_bits ? IRQ_HANDLED : IRQ_NONE;
index 394a421..8f23d12 100644 (file)
@@ -1444,7 +1444,6 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
        u32 base, pin;
        void __iomem *reg;
        unsigned long pending;
-       unsigned int virq;
 
        /* check from GPIO controller which pin triggered the interrupt */
        for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -1460,10 +1459,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
                raw_spin_lock(&byt_lock);
                pending = readl(reg);
                raw_spin_unlock(&byt_lock);
-               for_each_set_bit(pin, &pending, 32) {
-                       virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
-                       generic_handle_irq(virq);
-               }
+               for_each_set_bit(pin, &pending, 32)
+                       generic_handle_domain_irq(vg->chip.irq.domain, base + pin);
        }
        chip->irq_eoi(data);
 }
index 2ed17cd..9800990 100644 (file)
@@ -1409,11 +1409,10 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
        for_each_set_bit(intr_line, &pending, community->nirqs) {
-               unsigned int irq, offset;
+               unsigned int offset;
 
                offset = cctx->intr_lines[intr_line];
-               irq = irq_find_mapping(gc->irq.domain, offset);
-               generic_handle_irq(irq);
+               generic_handle_domain_irq(gc->irq.domain, offset);
        }
 
        chained_irq_exit(chip, desc);
index 0a48ca4..561fa32 100644 (file)
@@ -653,12 +653,8 @@ static void lp_gpio_irq_handler(struct irq_desc *desc)
                /* Only interrupts that are enabled */
                pending = ioread32(reg) & ioread32(ena);
 
-               for_each_set_bit(pin, &pending, 32) {
-                       unsigned int irq;
-
-                       irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
-                       generic_handle_irq(irq);
-               }
+               for_each_set_bit(pin, &pending, 32)
+                       generic_handle_domain_irq(lg->chip.irq.domain, base + pin);
        }
        chip->irq_eoi(data);
 }
index 3e4ef2b..0bcd195 100644 (file)
@@ -701,32 +701,32 @@ static const struct pinctrl_pin_desc tglh_pins[] = {
 
 static const struct intel_padgroup tglh_community0_gpps[] = {
        TGL_GPP(0, 0, 24, 0),                           /* GPP_A */
-       TGL_GPP(1, 25, 44, 128),                        /* GPP_R */
-       TGL_GPP(2, 45, 70, 32),                         /* GPP_B */
-       TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP),      /* vGPIO_0 */
+       TGL_GPP(1, 25, 44, 32),                         /* GPP_R */
+       TGL_GPP(2, 45, 70, 64),                         /* GPP_B */
+       TGL_GPP(3, 71, 78, 96),                         /* vGPIO_0 */
 };
 
 static const struct intel_padgroup tglh_community1_gpps[] = {
-       TGL_GPP(0, 79, 104, 96),                        /* GPP_D */
-       TGL_GPP(1, 105, 128, 64),                       /* GPP_C */
-       TGL_GPP(2, 129, 136, 160),                      /* GPP_S */
-       TGL_GPP(3, 137, 153, 192),                      /* GPP_G */
-       TGL_GPP(4, 154, 180, 224),                      /* vGPIO */
+       TGL_GPP(0, 79, 104, 128),                       /* GPP_D */
+       TGL_GPP(1, 105, 128, 160),                      /* GPP_C */
+       TGL_GPP(2, 129, 136, 192),                      /* GPP_S */
+       TGL_GPP(3, 137, 153, 224),                      /* GPP_G */
+       TGL_GPP(4, 154, 180, 256),                      /* vGPIO */
 };
 
 static const struct intel_padgroup tglh_community3_gpps[] = {
-       TGL_GPP(0, 181, 193, 256),                      /* GPP_E */
-       TGL_GPP(1, 194, 217, 288),                      /* GPP_F */
+       TGL_GPP(0, 181, 193, 288),                      /* GPP_E */
+       TGL_GPP(1, 194, 217, 320),                      /* GPP_F */
 };
 
 static const struct intel_padgroup tglh_community4_gpps[] = {
-       TGL_GPP(0, 218, 241, 320),                      /* GPP_H */
+       TGL_GPP(0, 218, 241, 352),                      /* GPP_H */
        TGL_GPP(1, 242, 251, 384),                      /* GPP_J */
-       TGL_GPP(2, 252, 266, 352),                      /* GPP_K */
+       TGL_GPP(2, 252, 266, 416),                      /* GPP_K */
 };
 
 static const struct intel_padgroup tglh_community5_gpps[] = {
-       TGL_GPP(0, 267, 281, 416),                      /* GPP_I */
+       TGL_GPP(0, 267, 281, 448),                      /* GPP_I */
        TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP),    /* JTAG */
 };
 
index 3b9b5db..f7b54a5 100644 (file)
@@ -319,7 +319,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct mtk_eint *eint = irq_desc_get_handler_data(desc);
        unsigned int status, eint_num;
-       int offset, mask_offset, index, virq;
+       int offset, mask_offset, index;
        void __iomem *reg =  mtk_eint_get_offset(eint, 0, eint->regs->stat);
        int dual_edge, start_level, curr_level;
 
@@ -331,7 +331,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
                        offset = __ffs(status);
                        mask_offset = eint_num >> 5;
                        index = eint_num + offset;
-                       virq = irq_find_mapping(eint->domain, index);
                        status &= ~BIT(offset);
 
                        /*
@@ -361,7 +360,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
                                                                 index);
                        }
 
-                       generic_handle_irq(virq);
+                       generic_handle_domain_irq(eint->domain, index);
 
                        if (dual_edge) {
                                curr_level = mtk_eint_flip_edge(eint, index);
index 5b3b048..45ebdeb 100644 (file)
@@ -925,12 +925,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
                        err = hw->soc->bias_set(hw, desc, pullup);
                        if (err)
                                return err;
-               } else if (hw->soc->bias_set_combo) {
-                       err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
-                       if (err)
-                               return err;
                } else {
-                       return -ENOTSUPP;
+                       err = mtk_pinconf_bias_set_rev1(hw, desc, pullup);
+                       if (err)
+                               err = mtk_pinconf_bias_set(hw, desc, pullup);
                }
        }
 
index abfe11c..39828e9 100644 (file)
@@ -815,7 +815,7 @@ static void nmk_gpio_irq_handler(struct irq_desc *desc)
        while (status) {
                int bit = __ffs(status);
 
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, bit));
+               generic_handle_domain_irq(chip->irq.domain, bit);
                status &= ~BIT(bit);
        }
 
index bb1ea47..4d81908 100644 (file)
@@ -231,7 +231,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc)
 
        sts &= en;
        for_each_set_bit(bit, (const void *)&sts, NPCM7XX_GPIO_PER_BANK)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, bit));
+               generic_handle_domain_irq(gc->irq.domain, bit);
        chained_irq_exit(chip, desc);
 }
 
index a76be6c..c001f2e 100644 (file)
@@ -444,8 +444,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
        unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-       u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
-                       BIT(WAKE_CNTRL_OFF_S4);
+       u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
@@ -621,14 +620,12 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
                        if (!(regval & PIN_IRQ_PENDING) ||
                            !(regval & BIT(INTERRUPT_MASK_OFF)))
                                continue;
-                       irq = irq_find_mapping(gc->irq.domain, irqnr + i);
-                       if (irq != 0)
-                               generic_handle_irq(irq);
+                       generic_handle_domain_irq(gc->irq.domain, irqnr + i);
 
                        /* Clear interrupt.
                         * We must read the pin register again, in case the
                         * value was changed while executing
-                        * generic_handle_irq() above.
+                        * generic_handle_domain_irq() above.
                         * If we didn't find a mapping for the interrupt,
                         * disable it in order to avoid a system hang caused
                         * by an interrupt storm.
index 72e6df7..6022496 100644 (file)
@@ -1712,10 +1712,8 @@ static void gpio_irq_handler(struct irq_desc *desc)
                        continue;
                }
 
-               for_each_set_bit(n, &isr, BITS_PER_LONG) {
-                       generic_handle_irq(irq_find_mapping(
-                                          gpio_chip->irq.domain, n));
-               }
+               for_each_set_bit(n, &isr, BITS_PER_LONG)
+                       generic_handle_domain_irq(gpio_chip->irq.domain, n);
        }
        chained_irq_exit(chip, desc);
        /* now it may re-trigger */
index 38cc20f..fb713f9 100644 (file)
@@ -155,7 +155,7 @@ static void eqbr_irq_handler(struct irq_desc *desc)
        pins = readl(gctrl->membase + GPIO_IRNCR);
 
        for_each_set_bit(offset, &pins, gc->ngpio)
-               generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+               generic_handle_domain_irq(gc->irq.domain, offset);
 
        chained_irq_exit(ic, desc);
 }
index 983ba98..ce9cc71 100644 (file)
@@ -3080,7 +3080,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
                flag = ingenic_gpio_read_reg(jzgc, JZ4730_GPIO_GPFR);
 
        for_each_set_bit(i, &flag, 32)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
+               generic_handle_domain_irq(gc->irq.domain, i);
        chained_irq_exit(irq_chip, desc);
 }
 
index f831526..49e3268 100644 (file)
@@ -950,23 +950,37 @@ static int k210_fpioa_probe(struct platform_device *pdev)
                return ret;
 
        pdata->pclk = devm_clk_get_optional(dev, "pclk");
-       if (!IS_ERR(pdata->pclk))
-               clk_prepare_enable(pdata->pclk);
+       if (!IS_ERR(pdata->pclk)) {
+               ret = clk_prepare_enable(pdata->pclk);
+               if (ret)
+                       goto disable_clk;
+       }
 
        pdata->sysctl_map =
                syscon_regmap_lookup_by_phandle_args(np,
                                                "canaan,k210-sysctl-power",
                                                1, &pdata->power_offset);
-       if (IS_ERR(pdata->sysctl_map))
-               return PTR_ERR(pdata->sysctl_map);
+       if (IS_ERR(pdata->sysctl_map)) {
+               ret = PTR_ERR(pdata->sysctl_map);
+               goto disable_pclk;
+       }
 
        k210_fpioa_init_ties(pdata);
 
        pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
-       if (IS_ERR(pdata->pctl))
-               return PTR_ERR(pdata->pctl);
+       if (IS_ERR(pdata->pctl)) {
+               ret = PTR_ERR(pdata->pctl);
+               goto disable_pclk;
+       }
 
        return 0;
+
+disable_pclk:
+       clk_disable_unprepare(pdata->pclk);
+disable_clk:
+       clk_disable_unprepare(pdata->clk);
+
+       return ret;
 }
 
 static const struct of_device_id k210_fpioa_dt_ids[] = {
index 165cb7a..072bccd 100644 (file)
@@ -673,7 +673,7 @@ static void sgpio_irq_handler(struct irq_desc *desc)
 
                for_each_set_bit(port, &val, SGPIO_BITS_PER_WORD) {
                        gpio = sgpio_addr_to_pin(priv, port, bit);
-                       generic_handle_irq(irq_linear_revmap(chip->irq.domain, gpio));
+                       generic_handle_domain_irq(chip->irq.domain, gpio);
                }
 
                chained_irq_exit(parent_chip, desc);
index e470c16..0a36ec8 100644 (file)
@@ -1290,8 +1290,7 @@ static void ocelot_irq_handler(struct irq_desc *desc)
 
                for_each_set_bit(irq, &irqs,
                                 min(32U, info->desc->npins - 32 * i))
-                       generic_handle_irq(irq_linear_revmap(chip->irq.domain,
-                                                            irq + 32 * i));
+                       generic_handle_domain_irq(chip->irq.domain, irq + 32 * i);
 
                chained_irq_exit(parent_chip, desc);
        }
index 5a31227..cebd810 100644 (file)
@@ -1055,7 +1055,7 @@ static void oxnas_gpio_irq_handler(struct irq_desc *desc)
        stat = readl(bank->reg_base + IRQ_PENDING);
 
        for_each_set_bit(pin, &stat, BITS_PER_LONG)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
+               generic_handle_domain_irq(gc->irq.domain, pin);
 
        chained_irq_exit(chip, desc);
 }
index a6e2a4a..748dabd 100644 (file)
@@ -2101,7 +2101,7 @@ static void pic32_gpio_irq_handler(struct irq_desc *desc)
        pending = pic32_gpio_get_pending(gc, stat);
 
        for_each_set_bit(pin, &pending, BITS_PER_LONG)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
+               generic_handle_domain_irq(gc->irq.domain, pin);
 
        chained_irq_exit(chip, desc);
 }
index ec761ba..8d271c6 100644 (file)
@@ -1306,7 +1306,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
        pending = gpio_readl(bank, GPIO_INTERRUPT_STATUS) &
                gpio_readl(bank, GPIO_INTERRUPT_EN);
        for_each_set_bit(pin, &pending, 16)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
+               generic_handle_domain_irq(gc->irq.domain, pin);
        chained_irq_exit(chip, desc);
 }
 
index 067fc42..ae33e37 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/io.h>
 #include <linux/bitops.h>
 #include <linux/gpio/driver.h>
-#include <linux/of_device.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinconf.h>
 
 #include "core.h"
 #include "pinconf.h"
-
-/* GPIO control registers */
-#define GPIO_SWPORT_DR         0x00
-#define GPIO_SWPORT_DDR                0x04
-#define GPIO_INTEN             0x30
-#define GPIO_INTMASK           0x34
-#define GPIO_INTTYPE_LEVEL     0x38
-#define GPIO_INT_POLARITY      0x3c
-#define GPIO_INT_STATUS                0x40
-#define GPIO_INT_RAWSTATUS     0x44
-#define GPIO_DEBOUNCE          0x48
-#define GPIO_PORTS_EOI         0x4c
-#define GPIO_EXT_PORT          0x50
-#define GPIO_LS_SYNC           0x60
-
-enum rockchip_pinctrl_type {
-       PX30,
-       RV1108,
-       RK2928,
-       RK3066B,
-       RK3128,
-       RK3188,
-       RK3288,
-       RK3308,
-       RK3368,
-       RK3399,
-       RK3568,
-};
-
+#include "pinctrl-rockchip.h"
 
 /**
  * Generate a bitmask for setting a value (v) with a write mask bit in hiword
@@ -84,103 +56,6 @@ enum rockchip_pinctrl_type {
 #define IOMUX_WIDTH_3BIT       BIT(4)
 #define IOMUX_WIDTH_2BIT       BIT(5)
 
-/**
- * struct rockchip_iomux
- * @type: iomux variant using IOMUX_* constants
- * @offset: if initialized to -1 it will be autocalculated, by specifying
- *         an initial offset value the relevant source offset can be reset
- *         to a new value for autocalculating the following iomux registers.
- */
-struct rockchip_iomux {
-       int                             type;
-       int                             offset;
-};
-
-/*
- * enum type index corresponding to rockchip_perpin_drv_list arrays index.
- */
-enum rockchip_pin_drv_type {
-       DRV_TYPE_IO_DEFAULT = 0,
-       DRV_TYPE_IO_1V8_OR_3V0,
-       DRV_TYPE_IO_1V8_ONLY,
-       DRV_TYPE_IO_1V8_3V0_AUTO,
-       DRV_TYPE_IO_3V3_ONLY,
-       DRV_TYPE_MAX
-};
-
-/*
- * enum type index corresponding to rockchip_pull_list arrays index.
- */
-enum rockchip_pin_pull_type {
-       PULL_TYPE_IO_DEFAULT = 0,
-       PULL_TYPE_IO_1V8_ONLY,
-       PULL_TYPE_MAX
-};
-
-/**
- * struct rockchip_drv
- * @drv_type: drive strength variant using rockchip_perpin_drv_type
- * @offset: if initialized to -1 it will be autocalculated, by specifying
- *         an initial offset value the relevant source offset can be reset
- *         to a new value for autocalculating the following drive strength
- *         registers. if used chips own cal_drv func instead to calculate
- *         registers offset, the variant could be ignored.
- */
-struct rockchip_drv {
-       enum rockchip_pin_drv_type      drv_type;
-       int                             offset;
-};
-
-/**
- * struct rockchip_pin_bank
- * @reg_base: register base of the gpio bank
- * @regmap_pull: optional separate register for additional pull settings
- * @clk: clock of the gpio bank
- * @irq: interrupt of the gpio bank
- * @saved_masks: Saved content of GPIO_INTEN at suspend time.
- * @pin_base: first pin number
- * @nr_pins: number of pins in this bank
- * @name: name of the bank
- * @bank_num: number of the bank, to account for holes
- * @iomux: array describing the 4 iomux sources of the bank
- * @drv: array describing the 4 drive strength sources of the bank
- * @pull_type: array describing the 4 pull type sources of the bank
- * @valid: is all necessary information present
- * @of_node: dt node of this bank
- * @drvdata: common pinctrl basedata
- * @domain: irqdomain of the gpio bank
- * @gpio_chip: gpiolib chip
- * @grange: gpio range
- * @slock: spinlock for the gpio bank
- * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
- * @recalced_mask: bit mask to indicate a need to recalulate the mask
- * @route_mask: bits describing the routing pins of per bank
- */
-struct rockchip_pin_bank {
-       void __iomem                    *reg_base;
-       struct regmap                   *regmap_pull;
-       struct clk                      *clk;
-       int                             irq;
-       u32                             saved_masks;
-       u32                             pin_base;
-       u8                              nr_pins;
-       char                            *name;
-       u8                              bank_num;
-       struct rockchip_iomux           iomux[4];
-       struct rockchip_drv             drv[4];
-       enum rockchip_pin_pull_type     pull_type[4];
-       bool                            valid;
-       struct device_node              *of_node;
-       struct rockchip_pinctrl         *drvdata;
-       struct irq_domain               *domain;
-       struct gpio_chip                gpio_chip;
-       struct pinctrl_gpio_range       grange;
-       raw_spinlock_t                  slock;
-       u32                             toggle_edge_mode;
-       u32                             recalced_mask;
-       u32                             route_mask;
-};
-
 #define PIN_BANK(id, pins, label)                      \
        {                                               \
                .bank_num       = id,                   \
@@ -320,119 +195,6 @@ struct rockchip_pin_bank {
 #define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL)       \
        PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU)
 
-/**
- * struct rockchip_mux_recalced_data: represent a pin iomux data.
- * @num: bank number.
- * @pin: pin number.
- * @bit: index at register.
- * @reg: register offset.
- * @mask: mask bit
- */
-struct rockchip_mux_recalced_data {
-       u8 num;
-       u8 pin;
-       u32 reg;
-       u8 bit;
-       u8 mask;
-};
-
-enum rockchip_mux_route_location {
-       ROCKCHIP_ROUTE_SAME = 0,
-       ROCKCHIP_ROUTE_PMU,
-       ROCKCHIP_ROUTE_GRF,
-};
-
-/**
- * struct rockchip_mux_recalced_data: represent a pin iomux data.
- * @bank_num: bank number.
- * @pin: index at register or used to calc index.
- * @func: the min pin.
- * @route_location: the mux route location (same, pmu, grf).
- * @route_offset: the max pin.
- * @route_val: the register offset.
- */
-struct rockchip_mux_route_data {
-       u8 bank_num;
-       u8 pin;
-       u8 func;
-       enum rockchip_mux_route_location route_location;
-       u32 route_offset;
-       u32 route_val;
-};
-
-struct rockchip_pin_ctrl {
-       struct rockchip_pin_bank        *pin_banks;
-       u32                             nr_banks;
-       u32                             nr_pins;
-       char                            *label;
-       enum rockchip_pinctrl_type      type;
-       int                             grf_mux_offset;
-       int                             pmu_mux_offset;
-       int                             grf_drv_offset;
-       int                             pmu_drv_offset;
-       struct rockchip_mux_recalced_data *iomux_recalced;
-       u32                             niomux_recalced;
-       struct rockchip_mux_route_data *iomux_routes;
-       u32                             niomux_routes;
-
-       void    (*pull_calc_reg)(struct rockchip_pin_bank *bank,
-                                   int pin_num, struct regmap **regmap,
-                                   int *reg, u8 *bit);
-       void    (*drv_calc_reg)(struct rockchip_pin_bank *bank,
-                                   int pin_num, struct regmap **regmap,
-                                   int *reg, u8 *bit);
-       int     (*schmitt_calc_reg)(struct rockchip_pin_bank *bank,
-                                   int pin_num, struct regmap **regmap,
-                                   int *reg, u8 *bit);
-};
-
-struct rockchip_pin_config {
-       unsigned int            func;
-       unsigned long           *configs;
-       unsigned int            nconfigs;
-};
-
-/**
- * struct rockchip_pin_group: represent group of pins of a pinmux function.
- * @name: name of the pin group, used to lookup the group.
- * @pins: the pins included in this group.
- * @npins: number of pins included in this group.
- * @data: local pin configuration
- */
-struct rockchip_pin_group {
-       const char                      *name;
-       unsigned int                    npins;
-       unsigned int                    *pins;
-       struct rockchip_pin_config      *data;
-};
-
-/**
- * struct rockchip_pmx_func: represent a pin function.
- * @name: name of the pin function, used to lookup the function.
- * @groups: one or more names of pin groups that provide this function.
- * @ngroups: number of groups included in @groups.
- */
-struct rockchip_pmx_func {
-       const char              *name;
-       const char              **groups;
-       u8                      ngroups;
-};
-
-struct rockchip_pinctrl {
-       struct regmap                   *regmap_base;
-       int                             reg_size;
-       struct regmap                   *regmap_pull;
-       struct regmap                   *regmap_pmu;
-       struct device                   *dev;
-       struct rockchip_pin_ctrl        *ctrl;
-       struct pinctrl_desc             pctl;
-       struct pinctrl_dev              *pctl_dev;
-       struct rockchip_pin_group       *groups;
-       unsigned int                    ngroups;
-       struct rockchip_pmx_func        *functions;
-       unsigned int                    nfunctions;
-};
-
 static struct regmap_config rockchip_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
@@ -2295,86 +2057,11 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
        return 0;
 }
 
-static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
-       u32 data;
-       int ret;
-
-       ret = clk_enable(bank->clk);
-       if (ret < 0) {
-               dev_err(bank->drvdata->dev,
-                       "failed to enable clock for bank %s\n", bank->name);
-               return ret;
-       }
-       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
-       clk_disable(bank->clk);
-
-       if (data & BIT(offset))
-               return GPIO_LINE_DIRECTION_OUT;
-
-       return GPIO_LINE_DIRECTION_IN;
-}
-
-/*
- * The calls to gpio_direction_output() and gpio_direction_input()
- * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
- * function called from the gpiolib interface).
- */
-static int _rockchip_pmx_gpio_set_direction(struct gpio_chip *chip,
-                                           int pin, bool input)
-{
-       struct rockchip_pin_bank *bank;
-       int ret;
-       unsigned long flags;
-       u32 data;
-
-       bank = gpiochip_get_data(chip);
-
-       ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
-       if (ret < 0)
-               return ret;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
-       /* set bit to 1 for output, 0 for input */
-       if (!input)
-               data |= BIT(pin);
-       else
-               data &= ~BIT(pin);
-       writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-
-       return 0;
-}
-
-static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
-                                             struct pinctrl_gpio_range *range,
-                                             unsigned offset, bool input)
-{
-       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
-       struct gpio_chip *chip;
-       int pin;
-
-       chip = range->gc;
-       pin = offset - chip->base;
-       dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
-                offset, range->name, pin, input ? "input" : "output");
-
-       return _rockchip_pmx_gpio_set_direction(chip, offset - chip->base,
-                                               input);
-}
-
 static const struct pinmux_ops rockchip_pmx_ops = {
        .get_functions_count    = rockchip_pmx_get_funcs_count,
        .get_function_name      = rockchip_pmx_get_func_name,
        .get_function_groups    = rockchip_pmx_get_groups,
        .set_mux                = rockchip_pmx_set,
-       .gpio_set_direction     = rockchip_pmx_gpio_set_direction,
 };
 
 /*
@@ -2405,15 +2092,13 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
        return false;
 }
 
-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value);
-static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset);
-
 /* set the pin config settings for a specified pin */
 static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                                unsigned long *configs, unsigned num_configs)
 {
        struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
        struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
+       struct gpio_chip *gpio = &bank->gpio_chip;
        enum pin_config_param param;
        u32 arg;
        int i;
@@ -2446,10 +2131,13 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                                return rc;
                        break;
                case PIN_CONFIG_OUTPUT:
-                       rockchip_gpio_set(&bank->gpio_chip,
-                                         pin - bank->pin_base, arg);
-                       rc = _rockchip_pmx_gpio_set_direction(&bank->gpio_chip,
-                                         pin - bank->pin_base, false);
+                       rc = rockchip_set_mux(bank, pin - bank->pin_base,
+                                             RK_FUNC_GPIO);
+                       if (rc != RK_FUNC_GPIO)
+                               return -EINVAL;
+
+                       rc = gpio->direction_output(gpio, pin - bank->pin_base,
+                                                   arg);
                        if (rc)
                                return rc;
                        break;
@@ -2487,6 +2175,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
 {
        struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
        struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
+       struct gpio_chip *gpio = &bank->gpio_chip;
        enum pin_config_param param = pinconf_to_config_param(*config);
        u16 arg;
        int rc;
@@ -2515,7 +2204,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
                if (rc != RK_FUNC_GPIO)
                        return -EINVAL;
 
-               rc = rockchip_gpio_get(&bank->gpio_chip, pin - bank->pin_base);
+               rc = gpio->get(gpio, pin - bank->pin_base);
                if (rc < 0)
                        return rc;
 
@@ -2753,7 +2442,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
        ctrldesc->npins = info->ctrl->nr_pins;
 
        pdesc = pindesc;
-       for (bank = 0 , k = 0; bank < info->ctrl->nr_banks; bank++) {
+       for (bank = 0, k = 0; bank < info->ctrl->nr_banks; bank++) {
                pin_bank = &info->ctrl->pin_banks[bank];
                for (pin = 0; pin < pin_bank->nr_pins; pin++, k++) {
                        pdesc->number = k;
@@ -2773,553 +2462,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
                return PTR_ERR(info->pctl_dev);
        }
 
-       for (bank = 0; bank < info->ctrl->nr_banks; ++bank) {
-               pin_bank = &info->ctrl->pin_banks[bank];
-               pin_bank->grange.name = pin_bank->name;
-               pin_bank->grange.id = bank;
-               pin_bank->grange.pin_base = pin_bank->pin_base;
-               pin_bank->grange.base = pin_bank->gpio_chip.base;
-               pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
-               pin_bank->grange.gc = &pin_bank->gpio_chip;
-               pinctrl_add_gpio_range(info->pctl_dev, &pin_bank->grange);
-       }
-
        return 0;
 }
 
-/*
- * GPIO handling
- */
-
-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       void __iomem *reg = bank->reg_base + GPIO_SWPORT_DR;
-       unsigned long flags;
-       u32 data;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl(reg);
-       data &= ~BIT(offset);
-       if (value)
-               data |= BIT(offset);
-       writel(data, reg);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-}
-
-/*
- * Returns the level of the pin for input direction and setting of the DR
- * register for output gpios.
- */
-static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       u32 data;
-
-       clk_enable(bank->clk);
-       data = readl(bank->reg_base + GPIO_EXT_PORT);
-       clk_disable(bank->clk);
-       data >>= offset;
-       data &= 1;
-       return data;
-}
-
-/*
- * gpiolib gpio_direction_input callback function. The setting of the pin
- * mux function as 'gpio input' will be handled by the pinctrl subsystem
- * interface.
- */
-static int rockchip_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(gc->base + offset);
-}
-
-/*
- * gpiolib gpio_direction_output callback function. The setting of the pin
- * mux function as 'gpio output' will be handled by the pinctrl subsystem
- * interface.
- */
-static int rockchip_gpio_direction_output(struct gpio_chip *gc,
-                                         unsigned offset, int value)
-{
-       rockchip_gpio_set(gc, offset, value);
-       return pinctrl_gpio_direction_output(gc->base + offset);
-}
-
-static void rockchip_gpio_set_debounce(struct gpio_chip *gc,
-                                      unsigned int offset, bool enable)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       void __iomem *reg = bank->reg_base + GPIO_DEBOUNCE;
-       unsigned long flags;
-       u32 data;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl(reg);
-       if (enable)
-               data |= BIT(offset);
-       else
-               data &= ~BIT(offset);
-       writel(data, reg);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-}
-
-/*
- * gpiolib set_config callback function. The setting of the pin
- * mux function as 'gpio output' will be handled by the pinctrl subsystem
- * interface.
- */
-static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
-                                 unsigned long config)
-{
-       enum pin_config_param param = pinconf_to_config_param(config);
-
-       switch (param) {
-       case PIN_CONFIG_INPUT_DEBOUNCE:
-               rockchip_gpio_set_debounce(gc, offset, true);
-               /*
-                * Rockchip's gpio could only support up to one period
-                * of the debounce clock(pclk), which is far away from
-                * satisftying the requirement, as pclk is usually near
-                * 100MHz shared by all peripherals. So the fact is it
-                * has crippled debounce capability could only be useful
-                * to prevent any spurious glitches from waking up the system
-                * if the gpio is conguired as wakeup interrupt source. Let's
-                * still return -ENOTSUPP as before, to make sure the caller
-                * of gpiod_set_debounce won't change its behaviour.
-                */
-               return -ENOTSUPP;
-       default:
-               return -ENOTSUPP;
-       }
-}
-
-/*
- * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
- * and a virtual IRQ, if not already present.
- */
-static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       unsigned int virq;
-
-       if (!bank->domain)
-               return -ENXIO;
-
-       clk_enable(bank->clk);
-       virq = irq_create_mapping(bank->domain, offset);
-       clk_disable(bank->clk);
-
-       return (virq) ? : -ENXIO;
-}
-
-static const struct gpio_chip rockchip_gpiolib_chip = {
-       .request = gpiochip_generic_request,
-       .free = gpiochip_generic_free,
-       .set = rockchip_gpio_set,
-       .get = rockchip_gpio_get,
-       .get_direction  = rockchip_gpio_get_direction,
-       .direction_input = rockchip_gpio_direction_input,
-       .direction_output = rockchip_gpio_direction_output,
-       .set_config = rockchip_gpio_set_config,
-       .to_irq = rockchip_gpio_to_irq,
-       .owner = THIS_MODULE,
-};
-
-/*
- * Interrupt handling
- */
-
-static void rockchip_irq_demux(struct irq_desc *desc)
-{
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-       struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
-       u32 pend;
-
-       dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
-
-       chained_irq_enter(chip, desc);
-
-       pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
-
-       while (pend) {
-               unsigned int irq, virq;
-
-               irq = __ffs(pend);
-               pend &= ~BIT(irq);
-               virq = irq_find_mapping(bank->domain, irq);
-
-               if (!virq) {
-                       dev_err(bank->drvdata->dev, "unmapped irq %d\n", irq);
-                       continue;
-               }
-
-               dev_dbg(bank->drvdata->dev, "handling irq %d\n", irq);
-
-               /*
-                * Triggering IRQ on both rising and falling edge
-                * needs manual intervention.
-                */
-               if (bank->toggle_edge_mode & BIT(irq)) {
-                       u32 data, data_old, polarity;
-                       unsigned long flags;
-
-                       data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
-                       do {
-                               raw_spin_lock_irqsave(&bank->slock, flags);
-
-                               polarity = readl_relaxed(bank->reg_base +
-                                                        GPIO_INT_POLARITY);
-                               if (data & BIT(irq))
-                                       polarity &= ~BIT(irq);
-                               else
-                                       polarity |= BIT(irq);
-                               writel(polarity,
-                                      bank->reg_base + GPIO_INT_POLARITY);
-
-                               raw_spin_unlock_irqrestore(&bank->slock, flags);
-
-                               data_old = data;
-                               data = readl_relaxed(bank->reg_base +
-                                                    GPIO_EXT_PORT);
-                       } while ((data & BIT(irq)) != (data_old & BIT(irq)));
-               }
-
-               generic_handle_irq(virq);
-       }
-
-       chained_irq_exit(chip, desc);
-}
-
-static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-       u32 mask = BIT(d->hwirq);
-       u32 polarity;
-       u32 level;
-       u32 data;
-       unsigned long flags;
-       int ret;
-
-       /* make sure the pin is configured as gpio input */
-       ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
-       if (ret < 0)
-               return ret;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
-       data &= ~mask;
-       writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-
-       if (type & IRQ_TYPE_EDGE_BOTH)
-               irq_set_handler_locked(d, handle_edge_irq);
-       else
-               irq_set_handler_locked(d, handle_level_irq);
-
-       raw_spin_lock_irqsave(&bank->slock, flags);
-       irq_gc_lock(gc);
-
-       level = readl_relaxed(gc->reg_base + GPIO_INTTYPE_LEVEL);
-       polarity = readl_relaxed(gc->reg_base + GPIO_INT_POLARITY);
-
-       switch (type) {
-       case IRQ_TYPE_EDGE_BOTH:
-               bank->toggle_edge_mode |= mask;
-               level |= mask;
-
-               /*
-                * Determine gpio state. If 1 next interrupt should be falling
-                * otherwise rising.
-                */
-               data = readl(bank->reg_base + GPIO_EXT_PORT);
-               if (data & mask)
-                       polarity &= ~mask;
-               else
-                       polarity |= mask;
-               break;
-       case IRQ_TYPE_EDGE_RISING:
-               bank->toggle_edge_mode &= ~mask;
-               level |= mask;
-               polarity |= mask;
-               break;
-       case IRQ_TYPE_EDGE_FALLING:
-               bank->toggle_edge_mode &= ~mask;
-               level |= mask;
-               polarity &= ~mask;
-               break;
-       case IRQ_TYPE_LEVEL_HIGH:
-               bank->toggle_edge_mode &= ~mask;
-               level &= ~mask;
-               polarity |= mask;
-               break;
-       case IRQ_TYPE_LEVEL_LOW:
-               bank->toggle_edge_mode &= ~mask;
-               level &= ~mask;
-               polarity &= ~mask;
-               break;
-       default:
-               irq_gc_unlock(gc);
-               raw_spin_unlock_irqrestore(&bank->slock, flags);
-               clk_disable(bank->clk);
-               return -EINVAL;
-       }
-
-       writel_relaxed(level, gc->reg_base + GPIO_INTTYPE_LEVEL);
-       writel_relaxed(polarity, gc->reg_base + GPIO_INT_POLARITY);
-
-       irq_gc_unlock(gc);
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-
-       return 0;
-}
-
-static void rockchip_irq_suspend(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       clk_enable(bank->clk);
-       bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
-       irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
-       clk_disable(bank->clk);
-}
-
-static void rockchip_irq_resume(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       clk_enable(bank->clk);
-       irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
-       clk_disable(bank->clk);
-}
-
-static void rockchip_irq_enable(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       clk_enable(bank->clk);
-       irq_gc_mask_clr_bit(d);
-}
-
-static void rockchip_irq_disable(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       irq_gc_mask_set_bit(d);
-       clk_disable(bank->clk);
-}
-
-static int rockchip_interrupts_register(struct platform_device *pdev,
-                                               struct rockchip_pinctrl *info)
-{
-       struct rockchip_pin_ctrl *ctrl = info->ctrl;
-       struct rockchip_pin_bank *bank = ctrl->pin_banks;
-       unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
-       struct irq_chip_generic *gc;
-       int ret;
-       int i;
-
-       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-               if (!bank->valid) {
-                       dev_warn(&pdev->dev, "bank %s is not valid\n",
-                                bank->name);
-                       continue;
-               }
-
-               ret = clk_enable(bank->clk);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to enable clock for bank %s\n",
-                               bank->name);
-                       continue;
-               }
-
-               bank->domain = irq_domain_add_linear(bank->of_node, 32,
-                                               &irq_generic_chip_ops, NULL);
-               if (!bank->domain) {
-                       dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
-                                bank->name);
-                       clk_disable(bank->clk);
-                       continue;
-               }
-
-               ret = irq_alloc_domain_generic_chips(bank->domain, 32, 1,
-                                        "rockchip_gpio_irq", handle_level_irq,
-                                        clr, 0, 0);
-               if (ret) {
-                       dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
-                               bank->name);
-                       irq_domain_remove(bank->domain);
-                       clk_disable(bank->clk);
-                       continue;
-               }
-
-               gc = irq_get_domain_generic_chip(bank->domain, 0);
-               gc->reg_base = bank->reg_base;
-               gc->private = bank;
-               gc->chip_types[0].regs.mask = GPIO_INTMASK;
-               gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
-               gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-               gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
-               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
-               gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
-               gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
-               gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
-               gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
-               gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
-               gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
-               gc->wake_enabled = IRQ_MSK(bank->nr_pins);
-
-               /*
-                * Linux assumes that all interrupts start out disabled/masked.
-                * Our driver only uses the concept of masked and always keeps
-                * things enabled, so for us that's all masked and all enabled.
-                */
-               writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
-               writel_relaxed(0xffffffff, bank->reg_base + GPIO_PORTS_EOI);
-               writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
-               gc->mask_cache = 0xffffffff;
-
-               irq_set_chained_handler_and_data(bank->irq,
-                                                rockchip_irq_demux, bank);
-               clk_disable(bank->clk);
-       }
-
-       return 0;
-}
-
-static int rockchip_gpiolib_register(struct platform_device *pdev,
-                                               struct rockchip_pinctrl *info)
-{
-       struct rockchip_pin_ctrl *ctrl = info->ctrl;
-       struct rockchip_pin_bank *bank = ctrl->pin_banks;
-       struct gpio_chip *gc;
-       int ret;
-       int i;
-
-       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-               if (!bank->valid) {
-                       dev_warn(&pdev->dev, "bank %s is not valid\n",
-                                bank->name);
-                       continue;
-               }
-
-               bank->gpio_chip = rockchip_gpiolib_chip;
-
-               gc = &bank->gpio_chip;
-               gc->base = bank->pin_base;
-               gc->ngpio = bank->nr_pins;
-               gc->parent = &pdev->dev;
-               gc->of_node = bank->of_node;
-               gc->label = bank->name;
-
-               ret = gpiochip_add_data(gc, bank);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to register gpio_chip %s, error code: %d\n",
-                                                       gc->label, ret);
-                       goto fail;
-               }
-       }
-
-       rockchip_interrupts_register(pdev, info);
-
-       return 0;
-
-fail:
-       for (--i, --bank; i >= 0; --i, --bank) {
-               if (!bank->valid)
-                       continue;
-               gpiochip_remove(&bank->gpio_chip);
-       }
-       return ret;
-}
-
-static int rockchip_gpiolib_unregister(struct platform_device *pdev,
-                                               struct rockchip_pinctrl *info)
-{
-       struct rockchip_pin_ctrl *ctrl = info->ctrl;
-       struct rockchip_pin_bank *bank = ctrl->pin_banks;
-       int i;
-
-       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-               if (!bank->valid)
-                       continue;
-               gpiochip_remove(&bank->gpio_chip);
-       }
-
-       return 0;
-}
-
-static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
-                                 struct rockchip_pinctrl *info)
-{
-       struct resource res;
-       void __iomem *base;
-
-       if (of_address_to_resource(bank->of_node, 0, &res)) {
-               dev_err(info->dev, "cannot find IO resource for bank\n");
-               return -ENOENT;
-       }
-
-       bank->reg_base = devm_ioremap_resource(info->dev, &res);
-       if (IS_ERR(bank->reg_base))
-               return PTR_ERR(bank->reg_base);
-
-       /*
-        * special case, where parts of the pull setting-registers are
-        * part of the PMU register space
-        */
-       if (of_device_is_compatible(bank->of_node,
-                                   "rockchip,rk3188-gpio-bank0")) {
-               struct device_node *node;
-
-               node = of_parse_phandle(bank->of_node->parent,
-                                       "rockchip,pmu", 0);
-               if (!node) {
-                       if (of_address_to_resource(bank->of_node, 1, &res)) {
-                               dev_err(info->dev, "cannot find IO resource for bank\n");
-                               return -ENOENT;
-                       }
-
-                       base = devm_ioremap_resource(info->dev, &res);
-                       if (IS_ERR(base))
-                               return PTR_ERR(base);
-                       rockchip_regmap_config.max_register =
-                                                   resource_size(&res) - 4;
-                       rockchip_regmap_config.name =
-                                           "rockchip,rk3188-gpio-bank0-pull";
-                       bank->regmap_pull = devm_regmap_init_mmio(info->dev,
-                                                   base,
-                                                   &rockchip_regmap_config);
-               }
-               of_node_put(node);
-       }
-
-       bank->irq = irq_of_parse_and_map(bank->of_node, 0);
-
-       bank->clk = of_clk_get(bank->of_node, 0);
-       if (IS_ERR(bank->clk))
-               return PTR_ERR(bank->clk);
-
-       return clk_prepare(bank->clk);
-}
-
 static const struct of_device_id rockchip_pinctrl_dt_match[];
 
 /* retrieve the soc specific data */
@@ -3329,7 +2474,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
 {
        const struct of_device_id *match;
        struct device_node *node = pdev->dev.of_node;
-       struct device_node *np;
        struct rockchip_pin_ctrl *ctrl;
        struct rockchip_pin_bank *bank;
        int grf_offs, pmu_offs, drv_grf_offs, drv_pmu_offs, i, j;
@@ -3337,23 +2481,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
        match = of_match_node(rockchip_pinctrl_dt_match, node);
        ctrl = (struct rockchip_pin_ctrl *)match->data;
 
-       for_each_child_of_node(node, np) {
-               if (!of_find_property(np, "gpio-controller", NULL))
-                       continue;
-
-               bank = ctrl->pin_banks;
-               for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-                       if (!strcmp(bank->name, np->name)) {
-                               bank->of_node = np;
-
-                               if (!rockchip_get_bank_data(bank, d))
-                                       bank->valid = true;
-
-                               break;
-                       }
-               }
-       }
-
        grf_offs = ctrl->grf_mux_offset;
        pmu_offs = ctrl->pmu_mux_offset;
        drv_pmu_offs = ctrl->pmu_drv_offset;
@@ -3574,18 +2701,18 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
                        return PTR_ERR(info->regmap_pmu);
        }
 
-       ret = rockchip_gpiolib_register(pdev, info);
+       ret = rockchip_pinctrl_register(pdev, info);
        if (ret)
                return ret;
 
-       ret = rockchip_pinctrl_register(pdev, info);
+       platform_set_drvdata(pdev, info);
+
+       ret = of_platform_populate(np, rockchip_bank_match, NULL, NULL);
        if (ret) {
-               rockchip_gpiolib_unregister(pdev, info);
+               dev_err(&pdev->dev, "failed to register gpio device\n");
                return ret;
        }
 
-       platform_set_drvdata(pdev, info);
-
        return 0;
 }
 
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
new file mode 100644 (file)
index 0000000..589d4d2
--- /dev/null
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021 Rockchip Electronics Co. Ltd.
+ *
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * With some ideas taken from pinctrl-samsung:
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ * Copyright (c) 2012 Linaro Ltd
+ *             https://www.linaro.org
+ *
+ * and pinctrl-at91:
+ * Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ */
+
+#ifndef _PINCTRL_ROCKCHIP_H
+#define _PINCTRL_ROCKCHIP_H
+
+enum rockchip_pinctrl_type {
+       PX30,
+       RV1108,
+       RK2928,
+       RK3066B,
+       RK3128,
+       RK3188,
+       RK3288,
+       RK3308,
+       RK3368,
+       RK3399,
+       RK3568,
+};
+
+/**
+ * struct rockchip_gpio_regs
+ * @port_dr: data register
+ * @port_ddr: data direction register
+ * @int_en: interrupt enable
+ * @int_mask: interrupt mask
+ * @int_type: interrupt trigger type, such as high, low, edge trriger type.
+ * @int_polarity: interrupt polarity enable register
+ * @int_bothedge: interrupt bothedge enable register
+ * @int_status: interrupt status register
+ * @int_rawstatus: int_status = int_rawstatus & int_mask
+ * @debounce: enable debounce for interrupt signal
+ * @dbclk_div_en: enable divider for debounce clock
+ * @dbclk_div_con: setting for divider of debounce clock
+ * @port_eoi: end of interrupt of the port
+ * @ext_port: port data from external
+ * @version_id: controller version register
+ */
+struct rockchip_gpio_regs {
+       u32 port_dr;
+       u32 port_ddr;
+       u32 int_en;
+       u32 int_mask;
+       u32 int_type;
+       u32 int_polarity;
+       u32 int_bothedge;
+       u32 int_status;
+       u32 int_rawstatus;
+       u32 debounce;
+       u32 dbclk_div_en;
+       u32 dbclk_div_con;
+       u32 port_eoi;
+       u32 ext_port;
+       u32 version_id;
+};
+
+/**
+ * struct rockchip_iomux
+ * @type: iomux variant using IOMUX_* constants
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ *         an initial offset value the relevant source offset can be reset
+ *         to a new value for autocalculating the following iomux registers.
+ */
+struct rockchip_iomux {
+       int type;
+       int offset;
+};
+
+/*
+ * enum type index corresponding to rockchip_perpin_drv_list arrays index.
+ */
+enum rockchip_pin_drv_type {
+       DRV_TYPE_IO_DEFAULT = 0,
+       DRV_TYPE_IO_1V8_OR_3V0,
+       DRV_TYPE_IO_1V8_ONLY,
+       DRV_TYPE_IO_1V8_3V0_AUTO,
+       DRV_TYPE_IO_3V3_ONLY,
+       DRV_TYPE_MAX
+};
+
+/*
+ * enum type index corresponding to rockchip_pull_list arrays index.
+ */
+enum rockchip_pin_pull_type {
+       PULL_TYPE_IO_DEFAULT = 0,
+       PULL_TYPE_IO_1V8_ONLY,
+       PULL_TYPE_MAX
+};
+
+/**
+ * struct rockchip_drv
+ * @drv_type: drive strength variant using rockchip_perpin_drv_type
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ *         an initial offset value the relevant source offset can be reset
+ *         to a new value for autocalculating the following drive strength
+ *         registers. if used chips own cal_drv func instead to calculate
+ *         registers offset, the variant could be ignored.
+ */
+struct rockchip_drv {
+       enum rockchip_pin_drv_type      drv_type;
+       int                             offset;
+};
+
+/**
+ * struct rockchip_pin_bank
+ * @dev: the pinctrl device bind to the bank
+ * @reg_base: register base of the gpio bank
+ * @regmap_pull: optional separate register for additional pull settings
+ * @clk: clock of the gpio bank
+ * @db_clk: clock of the gpio debounce
+ * @irq: interrupt of the gpio bank
+ * @saved_masks: Saved content of GPIO_INTEN at suspend time.
+ * @pin_base: first pin number
+ * @nr_pins: number of pins in this bank
+ * @name: name of the bank
+ * @bank_num: number of the bank, to account for holes
+ * @iomux: array describing the 4 iomux sources of the bank
+ * @drv: array describing the 4 drive strength sources of the bank
+ * @pull_type: array describing the 4 pull type sources of the bank
+ * @valid: is all necessary information present
+ * @of_node: dt node of this bank
+ * @drvdata: common pinctrl basedata
+ * @domain: irqdomain of the gpio bank
+ * @gpio_chip: gpiolib chip
+ * @grange: gpio range
+ * @slock: spinlock for the gpio bank
+ * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
+ * @recalced_mask: bit mask to indicate a need to recalulate the mask
+ * @route_mask: bits describing the routing pins of per bank
+ */
+struct rockchip_pin_bank {
+       struct device                   *dev;
+       void __iomem                    *reg_base;
+       struct regmap                   *regmap_pull;
+       struct clk                      *clk;
+       struct clk                      *db_clk;
+       int                             irq;
+       u32                             saved_masks;
+       u32                             pin_base;
+       u8                              nr_pins;
+       char                            *name;
+       u8                              bank_num;
+       struct rockchip_iomux           iomux[4];
+       struct rockchip_drv             drv[4];
+       enum rockchip_pin_pull_type     pull_type[4];
+       bool                            valid;
+       struct device_node              *of_node;
+       struct rockchip_pinctrl         *drvdata;
+       struct irq_domain               *domain;
+       struct gpio_chip                gpio_chip;
+       struct pinctrl_gpio_range       grange;
+       raw_spinlock_t                  slock;
+       const struct rockchip_gpio_regs *gpio_regs;
+       u32                             gpio_type;
+       u32                             toggle_edge_mode;
+       u32                             recalced_mask;
+       u32                             route_mask;
+};
+
+/**
+ * struct rockchip_mux_recalced_data: represent a pin iomux data.
+ * @num: bank number.
+ * @pin: pin number.
+ * @bit: index at register.
+ * @reg: register offset.
+ * @mask: mask bit
+ */
+struct rockchip_mux_recalced_data {
+       u8 num;
+       u8 pin;
+       u32 reg;
+       u8 bit;
+       u8 mask;
+};
+
+enum rockchip_mux_route_location {
+       ROCKCHIP_ROUTE_SAME = 0,
+       ROCKCHIP_ROUTE_PMU,
+       ROCKCHIP_ROUTE_GRF,
+};
+
+/**
+ * struct rockchip_mux_recalced_data: represent a pin iomux data.
+ * @bank_num: bank number.
+ * @pin: index at register or used to calc index.
+ * @func: the min pin.
+ * @route_location: the mux route location (same, pmu, grf).
+ * @route_offset: the max pin.
+ * @route_val: the register offset.
+ */
+struct rockchip_mux_route_data {
+       u8 bank_num;
+       u8 pin;
+       u8 func;
+       enum rockchip_mux_route_location route_location;
+       u32 route_offset;
+       u32 route_val;
+};
+
+struct rockchip_pin_ctrl {
+       struct rockchip_pin_bank        *pin_banks;
+       u32                             nr_banks;
+       u32                             nr_pins;
+       char                            *label;
+       enum rockchip_pinctrl_type      type;
+       int                             grf_mux_offset;
+       int                             pmu_mux_offset;
+       int                             grf_drv_offset;
+       int                             pmu_drv_offset;
+       struct rockchip_mux_recalced_data *iomux_recalced;
+       u32                             niomux_recalced;
+       struct rockchip_mux_route_data *iomux_routes;
+       u32                             niomux_routes;
+
+       void    (*pull_calc_reg)(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit);
+       void    (*drv_calc_reg)(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit);
+       int     (*schmitt_calc_reg)(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit);
+};
+
+struct rockchip_pin_config {
+       unsigned int            func;
+       unsigned long           *configs;
+       unsigned int            nconfigs;
+};
+
+/**
+ * struct rockchip_pin_group: represent group of pins of a pinmux function.
+ * @name: name of the pin group, used to lookup the group.
+ * @pins: the pins included in this group.
+ * @npins: number of pins included in this group.
+ * @data: local pin configuration
+ */
+struct rockchip_pin_group {
+       const char                      *name;
+       unsigned int                    npins;
+       unsigned int                    *pins;
+       struct rockchip_pin_config      *data;
+};
+
+/**
+ * struct rockchip_pmx_func: represent a pin function.
+ * @name: name of the pin function, used to lookup the function.
+ * @groups: one or more names of pin groups that provide this function.
+ * @ngroups: number of groups included in @groups.
+ */
+struct rockchip_pmx_func {
+       const char              *name;
+       const char              **groups;
+       u8                      ngroups;
+};
+
+struct rockchip_pinctrl {
+       struct regmap                   *regmap_base;
+       int                             reg_size;
+       struct regmap                   *regmap_pull;
+       struct regmap                   *regmap_pmu;
+       struct device                   *dev;
+       struct rockchip_pin_ctrl        *ctrl;
+       struct pinctrl_desc             pctl;
+       struct pinctrl_dev              *pctl_dev;
+       struct rockchip_pin_group       *groups;
+       unsigned int                    ngroups;
+       struct rockchip_pmx_func        *functions;
+       unsigned int                    nfunctions;
+};
+
+#endif
index e3aa647..aa6e722 100644 (file)
@@ -1491,8 +1491,8 @@ static int pcs_irq_handle(struct pcs_soc_data *pcs_soc)
                mask = pcs->read(pcswi->reg);
                raw_spin_unlock(&pcs->lock);
                if (mask & pcs_soc->irq_status_mask) {
-                       generic_handle_irq(irq_find_mapping(pcs->domain,
-                                                           pcswi->hwirq));
+                       generic_handle_domain_irq(pcs->domain,
+                                                 pcswi->hwirq);
                        count++;
                }
        }
index 43d9e6c..fa3edb4 100644 (file)
@@ -1420,7 +1420,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank)
                                        continue;
                        }
 
-                       generic_handle_irq(irq_find_mapping(bank->gpio_chip.irq.domain, n));
+                       generic_handle_domain_irq(bank->gpio_chip.irq.domain, n);
                }
        }
 }
index 2f51b4f..cad4e60 100644 (file)
@@ -13,7 +13,7 @@ config PINCTRL_MSM
 
 config PINCTRL_APQ8064
        tristate "Qualcomm APQ8064 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -21,7 +21,7 @@ config PINCTRL_APQ8064
 
 config PINCTRL_APQ8084
        tristate "Qualcomm APQ8084 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -29,7 +29,7 @@ config PINCTRL_APQ8084
 
 config PINCTRL_IPQ4019
        tristate "Qualcomm IPQ4019 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -37,7 +37,7 @@ config PINCTRL_IPQ4019
 
 config PINCTRL_IPQ8064
        tristate "Qualcomm IPQ8064 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -45,7 +45,7 @@ config PINCTRL_IPQ8064
 
 config PINCTRL_IPQ8074
        tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -55,7 +55,7 @@ config PINCTRL_IPQ8074
 
 config PINCTRL_IPQ6018
        tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -65,7 +65,7 @@ config PINCTRL_IPQ6018
 
 config PINCTRL_MSM8226
        tristate "Qualcomm 8226 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -74,7 +74,7 @@ config PINCTRL_MSM8226
 
 config PINCTRL_MSM8660
        tristate "Qualcomm 8660 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -82,7 +82,7 @@ config PINCTRL_MSM8660
 
 config PINCTRL_MSM8960
        tristate "Qualcomm 8960 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -90,7 +90,7 @@ config PINCTRL_MSM8960
 
 config PINCTRL_MDM9615
        tristate "Qualcomm 9615 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -98,7 +98,7 @@ config PINCTRL_MDM9615
 
 config PINCTRL_MSM8X74
        tristate "Qualcomm 8x74 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -106,7 +106,7 @@ config PINCTRL_MSM8X74
 
 config PINCTRL_MSM8916
        tristate "Qualcomm 8916 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -114,7 +114,7 @@ config PINCTRL_MSM8916
 
 config PINCTRL_MSM8953
        tristate "Qualcomm 8953 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -124,7 +124,7 @@ config PINCTRL_MSM8953
 
 config PINCTRL_MSM8976
        tristate "Qualcomm 8976 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -134,7 +134,7 @@ config PINCTRL_MSM8976
 
 config PINCTRL_MSM8994
        tristate "Qualcomm 8994 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -143,7 +143,7 @@ config PINCTRL_MSM8994
 
 config PINCTRL_MSM8996
        tristate "Qualcomm MSM8996 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -151,7 +151,7 @@ config PINCTRL_MSM8996
 
 config PINCTRL_MSM8998
        tristate "Qualcomm MSM8998 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -159,7 +159,7 @@ config PINCTRL_MSM8998
 
 config PINCTRL_QCS404
        tristate "Qualcomm QCS404 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -167,7 +167,7 @@ config PINCTRL_QCS404
 
 config PINCTRL_QDF2XXX
        tristate "Qualcomm Technologies QDF2xxx pin controller driver"
-       depends on GPIOLIB && ACPI
+       depends on ACPI
        depends on PINCTRL_MSM
        help
          This is the GPIO driver for the TLMM block found on the
@@ -175,7 +175,7 @@ config PINCTRL_QDF2XXX
 
 config PINCTRL_QCOM_SPMI_PMIC
        tristate "Qualcomm SPMI PMIC pin controller driver"
-       depends on GPIOLIB && OF && SPMI
+       depends on OF && SPMI
        select REGMAP_SPMI
        select PINMUX
        select PINCONF
@@ -190,7 +190,7 @@ config PINCTRL_QCOM_SPMI_PMIC
 
 config PINCTRL_QCOM_SSBI_PMIC
        tristate "Qualcomm SSBI PMIC pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
@@ -204,7 +204,7 @@ config PINCTRL_QCOM_SSBI_PMIC
 
 config PINCTRL_SC7180
        tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -213,7 +213,7 @@ config PINCTRL_SC7180
 
 config PINCTRL_SC7280
        tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -222,7 +222,7 @@ config PINCTRL_SC7280
 
 config PINCTRL_SC8180X
        tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
-       depends on GPIOLIB && (OF || ACPI)
+       depends on (OF || ACPI)
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -231,7 +231,7 @@ config PINCTRL_SC8180X
 
 config PINCTRL_SDM660
        tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -240,7 +240,7 @@ config PINCTRL_SDM660
 
 config PINCTRL_SDM845
        tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
-       depends on GPIOLIB && (OF || ACPI)
+       depends on (OF || ACPI)
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -249,7 +249,7 @@ config PINCTRL_SDM845
 
 config PINCTRL_SDX55
        tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -258,7 +258,7 @@ config PINCTRL_SDX55
 
 config PINCTRL_SM6125
        tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -267,7 +267,7 @@ config PINCTRL_SM6125
 
 config PINCTRL_SM8150
        tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -276,7 +276,7 @@ config PINCTRL_SM8150
 
 config PINCTRL_SM8250
        tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -285,8 +285,7 @@ config PINCTRL_SM8250
 
 config PINCTRL_SM8350
        tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
-       depends on GPIOLIB && OF
-       select PINCTRL_MSM
+       depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
index d70caec..8476a8a 100644 (file)
@@ -1177,7 +1177,6 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
        const struct msm_pingroup *g;
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       int irq_pin;
        int handled = 0;
        u32 val;
        int i;
@@ -1192,8 +1191,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
                g = &pctrl->soc->groups[i];
                val = msm_readl_intr_status(pctrl, g);
                if (val & BIT(g->intr_status_bit)) {
-                       irq_pin = irq_find_mapping(gc->irq.domain, i);
-                       generic_handle_irq(irq_pin);
+                       generic_handle_domain_irq(gc->irq.domain, i);
                        handled++;
                }
        }
index 2b99f41..0489c89 100644 (file)
@@ -246,7 +246,8 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
 {
        struct samsung_pinctrl_drv_data *d = data;
        struct samsung_pin_bank *bank = d->pin_banks;
-       unsigned int svc, group, pin, virq;
+       unsigned int svc, group, pin;
+       int ret;
 
        svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
        group = EXYNOS_SVC_GROUP(svc);
@@ -256,10 +257,10 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
                return IRQ_HANDLED;
        bank += (group - 1);
 
-       virq = irq_linear_revmap(bank->irq_domain, pin);
-       if (!virq)
+       ret = generic_handle_domain_irq(bank->irq_domain, pin);
+       if (ret)
                return IRQ_NONE;
-       generic_handle_irq(virq);
+
        return IRQ_HANDLED;
 }
 
@@ -473,12 +474,10 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
        struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
        struct samsung_pin_bank *bank = eintd->bank;
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       int eint_irq;
 
        chained_irq_enter(chip, desc);
 
-       eint_irq = irq_linear_revmap(bank->irq_domain, eintd->irq);
-       generic_handle_irq(eint_irq);
+       generic_handle_domain_irq(bank->irq_domain, eintd->irq);
 
        chained_irq_exit(chip, desc);
 }
@@ -490,7 +489,7 @@ static inline void exynos_irq_demux_eint(unsigned int pend,
 
        while (pend) {
                irq = fls(pend) - 1;
-               generic_handle_irq(irq_find_mapping(domain, irq));
+               generic_handle_domain_irq(domain, irq);
                pend &= ~(1 << irq);
        }
 }
index 00d77d6..ac1eba3 100644 (file)
@@ -234,14 +234,12 @@ static void s3c2410_demux_eint0_3(struct irq_desc *desc)
 {
        struct irq_data *data = irq_desc_get_irq_data(desc);
        struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
-       unsigned int virq;
+       int ret;
 
        /* the first 4 eints have a simple 1 to 1 mapping */
-       virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq);
+       ret = generic_handle_domain_irq(eint_data->domains[data->hwirq], data->hwirq);
        /* Something must be really wrong if an unmapped EINT is unmasked */
-       BUG_ON(!virq);
-
-       generic_handle_irq(virq);
+       BUG_ON(ret);
 }
 
 /* Handling of EINTs 0-3 on S3C2412 and S3C2413 */
@@ -290,16 +288,14 @@ static void s3c2412_demux_eint0_3(struct irq_desc *desc)
        struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
        struct irq_data *data = irq_desc_get_irq_data(desc);
        struct irq_chip *chip = irq_data_get_irq_chip(data);
-       unsigned int virq;
+       int ret;
 
        chained_irq_enter(chip, desc);
 
        /* the first 4 eints have a simple 1 to 1 mapping */
-       virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq);
+       ret = generic_handle_domain_irq(eint_data->domains[data->hwirq], data->hwirq);
        /* Something must be really wrong if an unmapped EINT is unmasked */
-       BUG_ON(!virq);
-
-       generic_handle_irq(virq);
+       BUG_ON(ret);
 
        chained_irq_exit(chip, desc);
 }
@@ -364,15 +360,14 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
        pend &= range;
 
        while (pend) {
-               unsigned int virq, irq;
+               unsigned int irq;
+               int ret;
 
                irq = __ffs(pend);
                pend &= ~(1 << irq);
-               virq = irq_linear_revmap(data->domains[irq], irq - offset);
+               ret = generic_handle_domain_irq(data->domains[irq], irq - offset);
                /* Something is really wrong if an unmapped EINT is unmasked */
-               BUG_ON(!virq);
-
-               generic_handle_irq(virq);
+               BUG_ON(ret);
        }
 
        chained_irq_exit(chip, desc);
index 53e2a64..c5f95a1 100644 (file)
@@ -414,7 +414,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
                unsigned int svc;
                unsigned int group;
                unsigned int pin;
-               unsigned int virq;
+               int ret;
 
                svc = readl(drvdata->virt_base + SERVICE_REG);
                group = SVC_GROUP(svc);
@@ -431,14 +431,12 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
                                pin -= 8;
                }
 
-               virq = irq_linear_revmap(data->domains[group], pin);
+               ret = generic_handle_domain_irq(data->domains[group], pin);
                /*
                 * Something must be really wrong if an unmapped EINT
                 * was unmasked...
                 */
-               BUG_ON(!virq);
-
-               generic_handle_irq(virq);
+               BUG_ON(ret);
        } while (1);
 
        chained_irq_exit(chip, desc);
@@ -607,18 +605,17 @@ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
        pend &= range;
 
        while (pend) {
-               unsigned int virq, irq;
+               unsigned int irq;
+               int ret;
 
                irq = fls(pend) - 1;
                pend &= ~(1 << irq);
-               virq = irq_linear_revmap(data->domains[irq], data->pins[irq]);
+               ret = generic_handle_domain_irq(data->domains[irq], data->pins[irq]);
                /*
                 * Something must be really wrong if an unmapped EINT
                 * was unmasked...
                 */
-               BUG_ON(!virq);
-
-               generic_handle_irq(virq);
+               BUG_ON(ret);
        }
 
        chained_irq_exit(chip, desc);
index 1ebbc49..43bb334 100644 (file)
@@ -400,8 +400,7 @@ static void plgpio_irq_handler(struct irq_desc *desc)
 
                        /* get correct irq line number */
                        pin = i * MAX_GPIO_PER_REG + pin;
-                       generic_handle_irq(
-                               irq_find_mapping(gc->irq.domain, pin));
+                       generic_handle_domain_irq(gc->irq.domain, pin);
                }
        }
        chained_irq_exit(irqchip, desc);
index dc8d39a..862c84e 100644 (file)
@@ -1149,11 +1149,9 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
        if (val) {
                int irqoffset;
 
-               for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) {
-                       int pin_irq = irq_find_mapping(pctl->domain,
-                                                      bank * IRQ_PER_BANK + irqoffset);
-                       generic_handle_irq(pin_irq);
-               }
+               for_each_set_bit(irqoffset, &val, IRQ_PER_BANK)
+                       generic_handle_domain_irq(pctl->domain,
+                                                 bank * IRQ_PER_BANK + irqoffset);
        }
 
        chained_irq_exit(chip, desc);
@@ -1219,10 +1217,12 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
        }
 
        /*
-        * We suppose that we won't have any more functions than pins,
-        * we'll reallocate that later anyway
+        * Find an upper bound for the maximum number of functions: in
+        * the worst case we have gpio_in, gpio_out, irq and up to four
+        * special functions per pin, plus one entry for the sentinel.
+        * We'll reallocate that later anyway.
         */
-       pctl->functions = kcalloc(pctl->ngroups,
+       pctl->functions = kcalloc(4 * pctl->ngroups + 4,
                                  sizeof(*pctl->functions),
                                  GFP_KERNEL);
        if (!pctl->functions)
index 7d385c3..d12db6c 100644 (file)
@@ -508,6 +508,7 @@ config THINKPAD_ACPI
        depends on RFKILL || RFKILL = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
+       depends on I2C
        select ACPI_PLATFORM_PROFILE
        select HWMON
        select NVRAM
@@ -691,6 +692,7 @@ config INTEL_HID_EVENT
        tristate "INTEL HID Event"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel HID Event hotkey interface.
@@ -742,6 +744,7 @@ config INTEL_VBTN
        tristate "INTEL VIRTUAL BUTTON"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel Virtual Button interface.
index 0cb927f..a81dc4b 100644 (file)
@@ -41,6 +41,10 @@ static int wapf = -1;
 module_param(wapf, uint, 0444);
 MODULE_PARM_DESC(wapf, "WAPF value");
 
+static int tablet_mode_sw = -1;
+module_param(tablet_mode_sw, uint, 0444);
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
+
 static struct quirk_entry *quirks;
 
 static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
@@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_use_lid_flip_devid,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUS TP200s / E205SA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
+               },
+               .driver_data = &quirk_asus_use_lid_flip_devid,
+       },
        {},
 };
 
@@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
        else
                wapf = quirks->wapf;
 
+       switch (tablet_mode_sw) {
+       case 0:
+               quirks->use_kbd_dock_devid = false;
+               quirks->use_lid_flip_devid = false;
+               break;
+       case 1:
+               quirks->use_kbd_dock_devid = true;
+               quirks->use_lid_flip_devid = false;
+               break;
+       case 2:
+               quirks->use_kbd_dock_devid = false;
+               quirks->use_lid_flip_devid = true;
+               break;
+       }
+
        if (quirks->i8042_filter) {
                ret = i8042_install_filter(quirks->i8042_filter);
                if (ret) {
diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h
new file mode 100644 (file)
index 0000000..a9eae17
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers
+ * to allow the OS to determine the angle between the display and the base of the device.
+ *
+ * On Windows these are read by a special HingeAngleService process which calls undocumented
+ * ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode.
+ * The firmware may use this to disable the kbd and touchpad to avoid spurious input in
+ * tablet-mode as well as to report SW_TABLET_MODE info to the OS.
+ *
+ * Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported
+ * by various drivers/platform/x86 drivers is incorrect. These drivers use the detection
+ * code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info
+ * (instead userspace can derive the status itself by directly reading the 2 accels).
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+
+static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data)
+{
+       struct acpi_resource_i2c_serialbus *sb;
+       int *count = data;
+
+       if (i2c_acpi_get_i2c_resource(ares, &sb))
+               *count = *count + 1;
+
+       return 1;
+}
+
+static int dual_accel_i2c_client_count(struct acpi_device *adev)
+{
+       int ret, count = 0;
+       LIST_HEAD(r);
+
+       ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count);
+       if (ret < 0)
+               return ret;
+
+       acpi_dev_free_resource_list(&r);
+       return count;
+}
+
+static bool dual_accel_detect_bosc0200(void)
+{
+       struct acpi_device *adev;
+       int count;
+
+       adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1);
+       if (!adev)
+               return false;
+
+       count = dual_accel_i2c_client_count(adev);
+
+       acpi_dev_put(adev);
+
+       return count == 2;
+}
+
+static bool dual_accel_detect(void)
+{
+       /* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */
+       if (acpi_dev_present("KIOX010A", NULL, -1) &&
+           acpi_dev_present("KIOX020A", NULL, -1))
+               return true;
+
+       /* Systems which use a single DUAL250E ACPI device to model 2 accels */
+       if (acpi_dev_present("DUAL250E", NULL, -1))
+               return true;
+
+       /* Systems which use a single BOSC0200 ACPI device to model 2 accels */
+       if (dual_accel_detect_bosc0200())
+               return true;
+
+       return false;
+}
index fbb224a..7f3a03f 100644 (file)
@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
        }}
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
@@ -147,6 +148,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
        { }
index e5fbe01..2e4e97a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* When NOT in tablet mode, VGBS returns with the flag 0x40 */
 #define TABLET_MODE_FLAG BIT(6)
@@ -122,6 +123,7 @@ struct intel_hid_priv {
        struct input_dev *array;
        struct input_dev *switches;
        bool wakeup_mode;
+       bool dual_accel;
 };
 
 #define HID_EVENT_FILTER_UUID  "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -451,22 +453,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
         * SW_TABLET_MODE report, in these cases we enable support when receiving
         * the first event instead of during driver setup.
         *
-        * Some 360 degree hinges (yoga) style 2-in-1 devices use 2 accelerometers
-        * to allow the OS to determine the angle between the display and the base
-        * of the device. On Windows these are read by a special HingeAngleService
-        * process which calls an ACPI DSM (Device Specific Method) on the
-        * ACPI KIOX010A device node for the sensor in the display, to let the
-        * firmware know if the 2-in-1 is in tablet- or laptop-mode so that it can
-        * disable the kbd and touchpad to avoid spurious input in tablet-mode.
-        *
-        * The linux kxcjk1013 driver calls the DSM for this once at probe time
-        * to ensure that the builtin kbd and touchpad work. On some devices this
-        * causes a "spurious" 0xcd event on the intel-hid ACPI dev. In this case
-        * there is not a functional tablet-mode switch, so we should not register
-        * the tablet-mode switch device.
+        * See dual_accel_detect.h for more info on the dual_accel check.
         */
-       if (!priv->switches && (event == 0xcc || event == 0xcd) &&
-           !acpi_dev_present("KIOX010A", NULL, -1)) {
+       if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
                dev_info(&device->dev, "switch event received, enable switches supports\n");
                err = intel_hid_switches_setup(device);
                if (err)
@@ -607,6 +596,8 @@ static int intel_hid_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel_detect();
+
        err = intel_hid_input_setup(device);
        if (err) {
                pr_err("Failed to setup Intel HID hotkeys\n");
index 888a764..3091664 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* Returned when NOT in tablet mode on some HP Stream x360 11 models */
 #define VGBS_TABLET_MODE_FLAG_ALT      0x10
@@ -66,6 +67,7 @@ static const struct key_entry intel_vbtn_switchmap[] = {
 struct intel_vbtn_priv {
        struct input_dev *buttons_dev;
        struct input_dev *switches_dev;
+       bool dual_accel;
        bool has_buttons;
        bool has_switches;
        bool wakeup_mode;
@@ -160,6 +162,10 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
                input_dev = priv->buttons_dev;
        } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
                if (!priv->has_switches) {
+                       /* See dual_accel_detect.h for more info */
+                       if (priv->dual_accel)
+                               return;
+
                        dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
                        ret = input_register_device(priv->switches_dev);
                        if (ret)
@@ -248,11 +254,15 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
        {} /* Array terminator */
 };
 
-static bool intel_vbtn_has_switches(acpi_handle handle)
+static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
 {
        unsigned long long vgbs;
        acpi_status status;
 
+       /* See dual_accel_detect.h for more info */
+       if (dual_accel)
+               return false;
+
        if (!dmi_check_system(dmi_switches_allow_list))
                return false;
 
@@ -263,13 +273,14 @@ static bool intel_vbtn_has_switches(acpi_handle handle)
 static int intel_vbtn_probe(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
-       bool has_buttons, has_switches;
+       bool dual_accel, has_buttons, has_switches;
        struct intel_vbtn_priv *priv;
        acpi_status status;
        int err;
 
+       dual_accel = dual_accel_detect();
        has_buttons = acpi_has_method(handle, "VBDL");
-       has_switches = intel_vbtn_has_switches(handle);
+       has_switches = intel_vbtn_has_switches(handle, dual_accel);
 
        if (!has_buttons && !has_switches) {
                dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
@@ -281,6 +292,7 @@ static int intel_vbtn_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel;
        priv->has_buttons = has_buttons;
        priv->has_switches = has_switches;
 
index c37349f..d063d91 100644 (file)
@@ -94,6 +94,7 @@ static struct gpiod_lookup_table gpios_led_table = {
                                NULL, 1, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
                                NULL, 2, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
@@ -123,6 +124,7 @@ static struct gpiod_lookup_table gpios_key_table = {
        .table = {
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW,
                                NULL, 0, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
index 603156a..50ff04c 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/uaccess.h>
 #include <acpi/battery.h>
 #include <acpi/video.h>
+#include "dual_accel_detect.h"
 
 /* ThinkPad CMOS commands */
 #define TP_CMOS_VOLUME_DOWN    0
@@ -3232,7 +3233,7 @@ static int hotkey_init_tablet_mode(void)
                 * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
                 * does not support this, so skip the hotkey on these models.
                 */
-               if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
+               if (has_tablet_mode && !dual_accel_detect())
                        tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
                type = "GMMS";
        } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
index 4d11920..4b563db 100644 (file)
@@ -204,6 +204,12 @@ config POWER_RESET_ST
        help
          Reset support for STMicroelectronics boards.
 
+config POWER_RESET_TPS65086
+       bool "TPS65086 restart driver"
+       depends on MFD_TPS65086
+       help
+         This driver adds support for resetting the TPS65086 PMIC on restart.
+
 config POWER_RESET_VERSATILE
        bool "ARM Versatile family reboot driver"
        depends on ARM
index cf3f4d0..f606a2f 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_REGULATOR) += regulator-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
 obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
+obj-$(CONFIG_POWER_RESET_TPS65086) += tps65086-restart.o
 obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
 obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
 obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
index f1e843d..02f5fdb 100644 (file)
@@ -19,6 +19,7 @@
 #define MII_MARVELL_PHY_PAGE           22
 
 #define MII_PHY_LED_CTRL               16
+#define MII_PHY_LED_POL_CTRL           17
 #define MII_88E1318S_PHY_LED_TCR       18
 #define MII_88E1318S_PHY_WOL_CTRL      16
 #define MII_M1011_IEVENT               19
 #define LED2_FORCE_ON                                  (0x8 << 8)
 #define LEDMASK                                                GENMASK(11,8)
 
+#define MII_88E1318S_PHY_LED_POL_LED2          BIT(4)
+
+struct power_off_cfg {
+       char *mdio_node_name;
+       void (*phy_set_reg)(bool restart);
+};
+
 static struct phy_device *phydev;
+static const struct power_off_cfg *cfg;
 
-static void mvphy_reg_intn(u16 data)
+static void linkstation_mvphy_reg_intn(bool restart)
 {
        int rc = 0, saved_page;
+       u16 data = 0;
+
+       if (restart)
+               data = MII_88E1318S_PHY_LED_TCR_FORCE_INT;
 
        saved_page = phy_select_page(phydev, MII_MARVELL_LED_PAGE);
        if (saved_page < 0)
@@ -66,11 +79,52 @@ err:
                dev_err(&phydev->mdio.dev, "Write register failed, %d\n", rc);
 }
 
+static void readynas_mvphy_set_reg(bool restart)
+{
+       int rc = 0, saved_page;
+       u16 data = 0;
+
+       if (restart)
+               data = MII_88E1318S_PHY_LED_POL_LED2;
+
+       saved_page = phy_select_page(phydev, MII_MARVELL_LED_PAGE);
+       if (saved_page < 0)
+               goto err;
+
+       /* Set the LED[2].0 Polarity bit to the required state */
+       __phy_modify(phydev, MII_PHY_LED_POL_CTRL,
+                    MII_88E1318S_PHY_LED_POL_LED2, data);
+
+       if (!data) {
+               /* If WOL was enabled and a magic packet was received before powering
+                * off, we won't be able to wake up by sending another magic packet.
+                * Clear WOL status.
+                */
+               __phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_MARVELL_WOL_PAGE);
+               __phy_set_bits(phydev, MII_88E1318S_PHY_WOL_CTRL,
+                              MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
+       }
+err:
+       rc = phy_restore_page(phydev, saved_page, rc);
+       if (rc < 0)
+               dev_err(&phydev->mdio.dev, "Write register failed, %d\n", rc);
+}
+
+static const struct power_off_cfg linkstation_power_off_cfg = {
+       .mdio_node_name = "mdio",
+       .phy_set_reg = linkstation_mvphy_reg_intn,
+};
+
+static const struct power_off_cfg readynas_power_off_cfg = {
+       .mdio_node_name = "mdio-bus",
+       .phy_set_reg = readynas_mvphy_set_reg,
+};
+
 static int linkstation_reboot_notifier(struct notifier_block *nb,
                                       unsigned long action, void *unused)
 {
        if (action == SYS_RESTART)
-               mvphy_reg_intn(MII_88E1318S_PHY_LED_TCR_FORCE_INT);
+               cfg->phy_set_reg(true);
 
        return NOTIFY_DONE;
 }
@@ -82,14 +136,21 @@ static struct notifier_block linkstation_reboot_nb = {
 static void linkstation_poweroff(void)
 {
        unregister_reboot_notifier(&linkstation_reboot_nb);
-       mvphy_reg_intn(0);
+       cfg->phy_set_reg(false);
 
        kernel_restart("Power off");
 }
 
 static const struct of_device_id ls_poweroff_of_match[] = {
-       { .compatible = "buffalo,ls421d" },
-       { .compatible = "buffalo,ls421de" },
+       { .compatible = "buffalo,ls421d",
+         .data = &linkstation_power_off_cfg,
+       },
+       { .compatible = "buffalo,ls421de",
+         .data = &linkstation_power_off_cfg,
+       },
+       { .compatible = "netgear,readynas-duo-v2",
+         .data = &readynas_power_off_cfg,
+       },
        { },
 };
 
@@ -97,13 +158,17 @@ static int __init linkstation_poweroff_init(void)
 {
        struct mii_bus *bus;
        struct device_node *dn;
+       const struct of_device_id *match;
 
        dn = of_find_matching_node(NULL, ls_poweroff_of_match);
        if (!dn)
                return -ENODEV;
        of_node_put(dn);
 
-       dn = of_find_node_by_name(NULL, "mdio");
+       match = of_match_node(ls_poweroff_of_match, dn);
+       cfg = match->data;
+
+       dn = of_find_node_by_name(NULL, cfg->mdio_node_name);
        if (!dn)
                return -ENODEV;
 
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
new file mode 100644 (file)
index 0000000..78b89f7
--- /dev/null
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Emil Renner Berthing
+ */
+
+#include <linux/mfd/tps65086.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+struct tps65086_restart {
+       struct notifier_block handler;
+       struct device *dev;
+};
+
+static int tps65086_restart_notify(struct notifier_block *this,
+                                  unsigned long mode, void *cmd)
+{
+       struct tps65086_restart *tps65086_restart =
+               container_of(this, struct tps65086_restart, handler);
+       struct tps65086 *tps65086 = dev_get_drvdata(tps65086_restart->dev->parent);
+       int ret;
+
+       ret = regmap_write(tps65086->regmap, TPS65086_FORCESHUTDN, 1);
+       if (ret) {
+               dev_err(tps65086_restart->dev, "%s: error writing to tps65086 pmic: %d\n",
+                       __func__, ret);
+               return NOTIFY_DONE;
+       }
+
+       /* give it a little time */
+       mdelay(200);
+
+       WARN_ON(1);
+
+       return NOTIFY_DONE;
+}
+
+static int tps65086_restart_probe(struct platform_device *pdev)
+{
+       struct tps65086_restart *tps65086_restart;
+       int ret;
+
+       tps65086_restart = devm_kzalloc(&pdev->dev, sizeof(*tps65086_restart), GFP_KERNEL);
+       if (!tps65086_restart)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, tps65086_restart);
+
+       tps65086_restart->handler.notifier_call = tps65086_restart_notify;
+       tps65086_restart->handler.priority = 192;
+       tps65086_restart->dev = &pdev->dev;
+
+       ret = register_restart_handler(&tps65086_restart->handler);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: cannot register restart handler: %d\n",
+                       __func__, ret);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int tps65086_restart_remove(struct platform_device *pdev)
+{
+       struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = unregister_restart_handler(&tps65086_restart->handler);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
+                       __func__, ret);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id tps65086_restart_id_table[] = {
+       { "tps65086-reset", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65086_restart_id_table);
+
+static struct platform_driver tps65086_restart_driver = {
+       .driver = {
+               .name = "tps65086-restart",
+       },
+       .probe = tps65086_restart_probe,
+       .remove = tps65086_restart_remove,
+       .id_table = tps65086_restart_id_table,
+};
+module_platform_driver(tps65086_restart_driver);
+
+MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
+MODULE_DESCRIPTION("TPS65086 restart driver");
+MODULE_LICENSE("GPL v2");
index 11f5368..fcc7534 100644 (file)
@@ -358,7 +358,7 @@ config AXP288_CHARGER
 
 config AXP288_FUEL_GAUGE
        tristate "X-Powers AXP288 Fuel Gauge"
-       depends on MFD_AXP20X && IIO
+       depends on MFD_AXP20X && IIO && IOSF_MBI
        help
          Say yes here to have support for X-Power power management IC (PMIC)
          Fuel Gauge. The device provides battery statistics and status
@@ -577,6 +577,17 @@ config CHARGER_MP2629
          Battery charger. This driver provides Battery charger power management
          functions on the systems.
 
+config CHARGER_MT6360
+       tristate "Mediatek MT6360 Charger Driver"
+       depends on MFD_MT6360
+       depends on REGULATOR
+       select LINEAR_RANGES
+       help
+         Say Y here to enable MT6360 Charger Part.
+         The device supports High-Accuracy Voltage/Current Regulation,
+         Average Input Current Regulation, Battery Temperature Sensing,
+         Over-Temperature Protection, DPDM Detection for BC1.2.
+
 config CHARGER_QCOM_SMBB
        tristate "Qualcomm Switch-Mode Battery Charger and Boost"
        depends on MFD_SPMI_PMIC || COMPILE_TEST
@@ -669,6 +680,7 @@ config CHARGER_BQ256XX
 config CHARGER_SMB347
        tristate "Summit Microelectronics SMB3XX Battery Charger"
        depends on I2C
+       depends on REGULATOR
        select REGMAP_I2C
        help
          Say Y to include support for Summit Microelectronics SMB345,
@@ -736,6 +748,16 @@ config CHARGER_CROS_USBPD
          what is connected to USB PD ports from the EC and converts
          that into power_supply properties.
 
+config CHARGER_CROS_PCHG
+       tristate "ChromeOS EC based peripheral charger"
+       depends on MFD_CROS_EC_DEV
+       default MFD_CROS_EC_DEV
+       help
+         Say Y here to enable ChromeOS EC based peripheral charge driver.
+         This driver gets various information about the devices connected to
+         the peripheral charge ports from the EC and converts that into
+         power_supply properties.
+
 config CHARGER_SC2731
        tristate "Spreadtrum SC2731 charger driver"
        depends on MFD_SC27XX_PMIC || COMPILE_TEST
@@ -782,6 +804,8 @@ config CHARGER_WILCO
 config RN5T618_POWER
        tristate "RN5T618 charger/fuel gauge support"
        depends on MFD_RN5T618
+       depends on RN5T618_ADC
+       depends on IIO
        help
          Say Y here to have support for RN5T618 PMIC family fuel gauge and charger.
          This driver can also be built as a module. If so, the module will be
index 33059a9..4e55a11 100644 (file)
@@ -60,7 +60,7 @@ obj-$(CONFIG_BATTERY_TWL4030_MADC)    += twl4030_madc_battery.o
 obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
 obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
 obj-$(CONFIG_BATTERY_RX51)     += rx51_battery.o
-obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
+obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o ab8500_chargalg.o
 obj-$(CONFIG_CHARGER_CPCAP)    += cpcap-charger.o
 obj-$(CONFIG_CHARGER_ISP1704)  += isp1704_charger.o
 obj-$(CONFIG_CHARGER_MAX8903)  += max8903_charger.o
@@ -78,6 +78,7 @@ obj-$(CONFIG_CHARGER_MAX77693)        += max77693_charger.o
 obj-$(CONFIG_CHARGER_MAX8997)  += max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)  += max8998_charger.o
 obj-$(CONFIG_CHARGER_MP2629)   += mp2629_charger.o
+obj-$(CONFIG_CHARGER_MT6360)   += mt6360_charger.o
 obj-$(CONFIG_CHARGER_QCOM_SMBB)        += qcom_smbb.o
 obj-$(CONFIG_CHARGER_BQ2415X)  += bq2415x_charger.o
 obj-$(CONFIG_CHARGER_BQ24190)  += bq24190_charger.o
@@ -93,6 +94,7 @@ obj-$(CONFIG_CHARGER_TPS65217)        += tps65217_charger.o
 obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
 obj-$(CONFIG_AXP288_CHARGER)   += axp288_charger.o
 obj-$(CONFIG_CHARGER_CROS_USBPD)       += cros_usbpd-charger.o
+obj-$(CONFIG_CHARGER_CROS_PCHG)        += cros_peripheral_charger.o
 obj-$(CONFIG_CHARGER_SC2731)   += sc2731_charger.o
 obj-$(CONFIG_FUEL_GAUGE_SC27XX)        += sc27xx_fuel_gauge.o
 obj-$(CONFIG_CHARGER_UCS1002)  += ucs1002_power.o
index 0c94057..d11405b 100644 (file)
@@ -269,43 +269,43 @@ enum bup_vch_sel {
 
 /*
  * ADC for the battery thermistor.
- * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
+ * When using the AB8500_ADC_THERM_BATCTRL the battery ID resistor is combined
  * with a NTC resistor to both identify the battery and to measure its
  * temperature. Different phone manufactures uses different techniques to both
  * identify the battery and to read its temperature.
  */
-enum abx500_adc_therm {
-       ABx500_ADC_THERM_BATCTRL,
-       ABx500_ADC_THERM_BATTEMP,
+enum ab8500_adc_therm {
+       AB8500_ADC_THERM_BATCTRL,
+       AB8500_ADC_THERM_BATTEMP,
 };
 
 /**
- * struct abx500_res_to_temp - defines one point in a temp to res curve. To
+ * struct ab8500_res_to_temp - defines one point in a temp to res curve. To
  * be used in battery packs that combines the identification resistor with a
  * NTC resistor.
  * @temp:                      battery pack temperature in Celsius
  * @resist:                    NTC resistor net total resistance
  */
-struct abx500_res_to_temp {
+struct ab8500_res_to_temp {
        int temp;
        int resist;
 };
 
 /**
- * struct abx500_v_to_cap - Table for translating voltage to capacity
+ * struct ab8500_v_to_cap - Table for translating voltage to capacity
  * @voltage:           Voltage in mV
  * @capacity:          Capacity in percent
  */
-struct abx500_v_to_cap {
+struct ab8500_v_to_cap {
        int voltage;
        int capacity;
 };
 
 /* Forward declaration */
-struct abx500_fg;
+struct ab8500_fg;
 
 /**
- * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
  * if not specified
  * @recovery_sleep_timer:      Time between measurements while recovering
  * @recovery_total_time:       Total recovery time
@@ -333,7 +333,7 @@ struct abx500_fg;
  * @pcut_max_restart:          Max number of restarts
  * @pcut_debounce_time:                Sets battery debounce time
  */
-struct abx500_fg_parameters {
+struct ab8500_fg_parameters {
        int recovery_sleep_timer;
        int recovery_total_time;
        int init_timer;
@@ -357,13 +357,13 @@ struct abx500_fg_parameters {
 };
 
 /**
- * struct abx500_charger_maximization - struct used by the board config.
+ * struct ab8500_charger_maximization - struct used by the board config.
  * @use_maxi:          Enable maximization for this battery type
  * @maxi_chg_curr:     Maximum charger current allowed
  * @maxi_wait_cycles:  cycles to wait before setting charger current
  * @charger_curr_step  delta between two charger current settings (mA)
  */
-struct abx500_maxim_parameters {
+struct ab8500_maxim_parameters {
        bool ena_maxi;
        int chg_curr;
        int wait_cycles;
@@ -371,7 +371,7 @@ struct abx500_maxim_parameters {
 };
 
 /**
- * struct abx500_battery_type - different batteries supported
+ * struct ab8500_battery_type - different batteries supported
  * @name:                      battery technology
  * @resis_high:                        battery upper resistance limit
  * @resis_low:                 battery lower resistance limit
@@ -400,7 +400,7 @@ struct abx500_maxim_parameters {
  * @n_batres_tbl_elements      number of elements in the batres_tbl
  * @batres_tbl                 battery internal resistance vs temperature table
  */
-struct abx500_battery_type {
+struct ab8500_battery_type {
        int name;
        int resis_high;
        int resis_low;
@@ -421,22 +421,22 @@ struct abx500_battery_type {
        int low_high_vol_lvl;
        int battery_resistance;
        int n_temp_tbl_elements;
-       const struct abx500_res_to_temp *r_to_t_tbl;
+       const struct ab8500_res_to_temp *r_to_t_tbl;
        int n_v_cap_tbl_elements;
-       const struct abx500_v_to_cap *v_to_cap_tbl;
+       const struct ab8500_v_to_cap *v_to_cap_tbl;
        int n_batres_tbl_elements;
        const struct batres_vs_temp *batres_tbl;
 };
 
 /**
- * struct abx500_bm_capacity_levels - abx500 capacity level data
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
  * @critical:          critical capacity level in percent
  * @low:               low capacity level in percent
  * @normal:            normal capacity level in percent
  * @high:              high capacity level in percent
  * @full:              full capacity level in percent
  */
-struct abx500_bm_capacity_levels {
+struct ab8500_bm_capacity_levels {
        int critical;
        int low;
        int normal;
@@ -445,13 +445,13 @@ struct abx500_bm_capacity_levels {
 };
 
 /**
- * struct abx500_bm_charger_parameters - Charger specific parameters
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
  * @usb_volt_max:      maximum allowed USB charger voltage in mV
  * @usb_curr_max:      maximum allowed USB charger current in mA
  * @ac_volt_max:       maximum allowed AC charger voltage in mV
  * @ac_curr_max:       maximum allowed AC charger current in mA
  */
-struct abx500_bm_charger_parameters {
+struct ab8500_bm_charger_parameters {
        int usb_volt_max;
        int usb_curr_max;
        int ac_volt_max;
@@ -459,7 +459,7 @@ struct abx500_bm_charger_parameters {
 };
 
 /**
- * struct abx500_bm_data - abx500 battery management data
+ * struct ab8500_bm_data - ab8500 battery management data
  * @temp_under         under this temp, charging is stopped
  * @temp_low           between this temp and temp_under charging is reduced
  * @temp_high          between this temp and temp_over charging is reduced
@@ -473,7 +473,7 @@ struct abx500_bm_charger_parameters {
  * @bkup_bat_i         current which we charge the backup battery with
  * @no_maintenance     indicates that maintenance charging is disabled
  * @capacity_scaling    indicates whether capacity scaling is to be used
- * @abx500_adc_therm   placement of thermistor, batctrl or battemp adc
+ * @ab8500_adc_therm   placement of thermistor, batctrl or battemp adc
  * @chg_unknown_bat    flag to enable charging of unknown batteries
  * @enable_overshoot   flag to enable VBAT overshoot control
  * @auto_trig          flag to enable auto adc trigger
@@ -494,7 +494,7 @@ struct abx500_bm_charger_parameters {
  * @chg_params         charger parameters
  * @fg_params          fuel gauge parameters
  */
-struct abx500_bm_data {
+struct ab8500_bm_data {
        int temp_under;
        int temp_low;
        int temp_high;
@@ -511,7 +511,7 @@ struct abx500_bm_data {
        bool chg_unknown_bat;
        bool enable_overshoot;
        bool auto_trig;
-       enum abx500_adc_therm adc_therm;
+       enum ab8500_adc_therm adc_therm;
        int fg_res;
        int n_btypes;
        int batt_id;
@@ -523,11 +523,11 @@ struct abx500_bm_data {
        int n_chg_in_curr;
        int *chg_output_curr;
        int *chg_input_curr;
-       const struct abx500_maxim_parameters *maxi;
-       const struct abx500_bm_capacity_levels *cap_levels;
-       struct abx500_battery_type *bat_type;
-       const struct abx500_bm_charger_parameters *chg_params;
-       const struct abx500_fg_parameters *fg_params;
+       const struct ab8500_maxim_parameters *maxi;
+       const struct ab8500_bm_capacity_levels *cap_levels;
+       struct ab8500_battery_type *bat_type;
+       const struct ab8500_bm_charger_parameters *chg_params;
+       const struct ab8500_fg_parameters *fg_params;
 };
 
 enum {
@@ -561,160 +561,7 @@ struct batres_vs_temp {
 /* Forward declaration */
 struct ab8500_fg;
 
-/**
- * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer:      Time between measurements while recovering
- * @recovery_total_time:       Total recovery time
- * @init_timer:                        Measurement interval during startup
- * @init_discard_time:         Time we discard voltage measurement at startup
- * @init_total_time:           Total init time during startup
- * @high_curr_time:            Time current has to be high to go to recovery
- * @accu_charging:             FG accumulation time while charging
- * @accu_high_curr:            FG accumulation time in high current mode
- * @high_curr_threshold:       High current threshold, in mA
- * @lowbat_threshold:          Low battery threshold, in mV
- * @battok_falling_th_sel0     Threshold in mV for battOk signal sel0
- *                             Resolution in 50 mV step.
- * @battok_raising_th_sel1     Threshold in mV for battOk signal sel1
- *                             Resolution in 50 mV step.
- * @user_cap_limit             Capacity reported from user must be within this
- *                             limit to be considered as sane, in percentage
- *                             points.
- * @maint_thres                        This is the threshold where we stop reporting
- *                             battery full while in maintenance, in per cent
- * @pcut_enable:                       Enable power cut feature in ab8505
- * @pcut_max_time:             Max time threshold
- * @pcut_flag_time:            Flagtime threshold
- * @pcut_max_restart:          Max number of restarts
- * @pcut_debunce_time: Sets battery debounce time
- */
-struct ab8500_fg_parameters {
-       int recovery_sleep_timer;
-       int recovery_total_time;
-       int init_timer;
-       int init_discard_time;
-       int init_total_time;
-       int high_curr_time;
-       int accu_charging;
-       int accu_high_curr;
-       int high_curr_threshold;
-       int lowbat_threshold;
-       int battok_falling_th_sel0;
-       int battok_raising_th_sel1;
-       int user_cap_limit;
-       int maint_thres;
-       bool pcut_enable;
-       u8 pcut_max_time;
-       u8 pcut_flag_time;
-       u8 pcut_max_restart;
-       u8 pcut_debunce_time;
-};
-
-/**
- * struct ab8500_charger_maximization - struct used by the board config.
- * @use_maxi:          Enable maximization for this battery type
- * @maxi_chg_curr:     Maximum charger current allowed
- * @maxi_wait_cycles:  cycles to wait before setting charger current
- * @charger_curr_step  delta between two charger current settings (mA)
- */
-struct ab8500_maxim_parameters {
-       bool ena_maxi;
-       int chg_curr;
-       int wait_cycles;
-       int charger_curr_step;
-};
-
-/**
- * struct ab8500_bm_capacity_levels - ab8500 capacity level data
- * @critical:          critical capacity level in percent
- * @low:               low capacity level in percent
- * @normal:            normal capacity level in percent
- * @high:              high capacity level in percent
- * @full:              full capacity level in percent
- */
-struct ab8500_bm_capacity_levels {
-       int critical;
-       int low;
-       int normal;
-       int high;
-       int full;
-};
-
-/**
- * struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max:      maximum allowed USB charger voltage in mV
- * @usb_curr_max:      maximum allowed USB charger current in mA
- * @ac_volt_max:       maximum allowed AC charger voltage in mV
- * @ac_curr_max:       maximum allowed AC charger current in mA
- */
-struct ab8500_bm_charger_parameters {
-       int usb_volt_max;
-       int usb_curr_max;
-       int ac_volt_max;
-       int ac_curr_max;
-};
-
-/**
- * struct ab8500_bm_data - ab8500 battery management data
- * @temp_under         under this temp, charging is stopped
- * @temp_low           between this temp and temp_under charging is reduced
- * @temp_high          between this temp and temp_over charging is reduced
- * @temp_over          over this temp, charging is stopped
- * @temp_interval_chg  temperature measurement interval in s when charging
- * @temp_interval_nochg        temperature measurement interval in s when not charging
- * @main_safety_tmr_h  safety timer for main charger
- * @usb_safety_tmr_h   safety timer for usb charger
- * @bkup_bat_v         voltage which we charge the backup battery with
- * @bkup_bat_i         current which we charge the backup battery with
- * @no_maintenance     indicates that maintenance charging is disabled
- * @capacity_scaling    indicates whether capacity scaling is to be used
- * @adc_therm          placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat    flag to enable charging of unknown batteries
- * @enable_overshoot   flag to enable VBAT overshoot control
- * @fg_res             resistance of FG resistor in 0.1mOhm
- * @n_btypes           number of elements in array bat_type
- * @batt_id            index of the identified battery in array bat_type
- * @interval_charging  charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis    temperature hysteresis
- * @gnd_lift_resistance        Battery ground to phone ground resistance (mOhm)
- * @maxi:              maximization parameters
- * @cap_levels         capacity in percent for the different capacity levels
- * @bat_type           table of supported battery types
- * @chg_params         charger parameters
- * @fg_params          fuel gauge parameters
- */
-struct ab8500_bm_data {
-       int temp_under;
-       int temp_low;
-       int temp_high;
-       int temp_over;
-       int temp_interval_chg;
-       int temp_interval_nochg;
-       int main_safety_tmr_h;
-       int usb_safety_tmr_h;
-       int bkup_bat_v;
-       int bkup_bat_i;
-       bool no_maintenance;
-       bool capacity_scaling;
-       bool chg_unknown_bat;
-       bool enable_overshoot;
-       enum abx500_adc_therm adc_therm;
-       int fg_res;
-       int n_btypes;
-       int batt_id;
-       int interval_charging;
-       int interval_not_charging;
-       int temp_hysteresis;
-       int gnd_lift_resistance;
-       const struct ab8500_maxim_parameters *maxi;
-       const struct ab8500_bm_capacity_levels *cap_levels;
-       const struct ab8500_bm_charger_parameters *chg_params;
-       const struct ab8500_fg_parameters *fg_params;
-};
-
-extern struct abx500_bm_data ab8500_bm_data;
+extern struct ab8500_bm_data ab8500_bm_data;
 
 void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
 struct ab8500_fg *ab8500_fg_get(void);
@@ -725,10 +572,10 @@ int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
 int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
 int ab8500_bm_of_probe(struct device *dev,
                       struct device_node *np,
-                      struct abx500_bm_data *bm);
+                      struct ab8500_bm_data *bm);
 
 extern struct platform_driver ab8500_fg_driver;
 extern struct platform_driver ab8500_btemp_driver;
-extern struct platform_driver abx500_chargalg_driver;
+extern struct platform_driver ab8500_chargalg_driver;
 
 #endif /* _AB8500_CHARGER_H_ */
index c2b8c0b..6f5fb79 100644 (file)
@@ -2,8 +2,6 @@
 #include <linux/export.h>
 #include <linux/power_supply.h>
 #include <linux/of.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
 
 #include "ab8500-bm.h"
 
@@ -13,7 +11,7 @@
  * Note that the res_to_temp table must be strictly sorted by falling resistance
  * values to work.
  */
-const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
+const struct ab8500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
        {-5, 53407},
        { 0, 48594},
        { 5, 43804},
@@ -35,7 +33,7 @@ EXPORT_SYMBOL(ab8500_temp_tbl_a_thermistor);
 const int ab8500_temp_tbl_a_size = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor);
 EXPORT_SYMBOL(ab8500_temp_tbl_a_size);
 
-const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
+const struct ab8500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
        {-5, 200000},
        { 0, 159024},
        { 5, 151921},
@@ -57,7 +55,7 @@ EXPORT_SYMBOL(ab8500_temp_tbl_b_thermistor);
 const int ab8500_temp_tbl_b_size = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor);
 EXPORT_SYMBOL(ab8500_temp_tbl_b_size);
 
-static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = {
+static const struct ab8500_v_to_cap cap_tbl_a_thermistor[] = {
        {4171,  100},
        {4114,   95},
        {4009,   83},
@@ -80,7 +78,7 @@ static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = {
        {3247,    0},
 };
 
-static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = {
+static const struct ab8500_v_to_cap cap_tbl_b_thermistor[] = {
        {4161,  100},
        {4124,   98},
        {4044,   90},
@@ -103,7 +101,7 @@ static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = {
        {3250,    0},
 };
 
-static const struct abx500_v_to_cap cap_tbl[] = {
+static const struct ab8500_v_to_cap cap_tbl[] = {
        {4186,  100},
        {4163,   99},
        {4114,   95},
@@ -134,7 +132,7 @@ static const struct abx500_v_to_cap cap_tbl[] = {
  * Note that the res_to_temp table must be strictly sorted by falling
  * resistance values to work.
  */
-static const struct abx500_res_to_temp temp_tbl[] = {
+static const struct ab8500_res_to_temp temp_tbl[] = {
        {-5, 214834},
        { 0, 162943},
        { 5, 124820},
@@ -191,7 +189,7 @@ static const struct batres_vs_temp temp_to_batres_tbl_9100[] = {
        {-20, 180},
 };
 
-static struct abx500_battery_type bat_type_thermistor[] = {
+static struct ab8500_battery_type bat_type_thermistor[] = {
        [BATTERY_UNKNOWN] = {
                /* First element always represent the UNKNOWN battery */
                .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
@@ -277,7 +275,7 @@ static struct abx500_battery_type bat_type_thermistor[] = {
        },
 };
 
-static struct abx500_battery_type bat_type_ext_thermistor[] = {
+static struct ab8500_battery_type bat_type_ext_thermistor[] = {
        [BATTERY_UNKNOWN] = {
                /* First element always represent the UNKNOWN battery */
                .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
@@ -394,7 +392,7 @@ static struct abx500_battery_type bat_type_ext_thermistor[] = {
        },
 };
 
-static const struct abx500_bm_capacity_levels cap_levels = {
+static const struct ab8500_bm_capacity_levels cap_levels = {
        .critical       = 2,
        .low            = 10,
        .normal         = 70,
@@ -402,7 +400,7 @@ static const struct abx500_bm_capacity_levels cap_levels = {
        .full           = 100,
 };
 
-static const struct abx500_fg_parameters fg = {
+static const struct ab8500_fg_parameters fg = {
        .recovery_sleep_timer = 10,
        .recovery_total_time = 100,
        .init_timer = 1,
@@ -424,14 +422,14 @@ static const struct abx500_fg_parameters fg = {
        .pcut_debounce_time = 2,
 };
 
-static const struct abx500_maxim_parameters ab8500_maxi_params = {
+static const struct ab8500_maxim_parameters ab8500_maxi_params = {
        .ena_maxi = true,
        .chg_curr = 910,
        .wait_cycles = 10,
        .charger_curr_step = 100,
 };
 
-static const struct abx500_bm_charger_parameters chg = {
+static const struct ab8500_bm_charger_parameters chg = {
        .usb_volt_max           = 5500,
        .usb_curr_max           = 1500,
        .ac_volt_max            = 7500,
@@ -456,7 +454,7 @@ static int ab8500_charge_input_curr_map[] = {
         700,    800,    900,    1000,   1100,   1300,   1400,   1500,
 };
 
-struct abx500_bm_data ab8500_bm_data = {
+struct ab8500_bm_data ab8500_bm_data = {
        .temp_under             = 3,
        .temp_low               = 8,
        .temp_high              = 43,
@@ -469,7 +467,7 @@ struct abx500_bm_data ab8500_bm_data = {
        .bkup_bat_i             = BUP_ICH_SEL_150UA,
        .no_maintenance         = false,
        .capacity_scaling       = false,
-       .adc_therm              = ABx500_ADC_THERM_BATCTRL,
+       .adc_therm              = AB8500_ADC_THERM_BATCTRL,
        .chg_unknown_bat        = false,
        .enable_overshoot       = false,
        .fg_res                 = 100,
@@ -492,7 +490,7 @@ struct abx500_bm_data ab8500_bm_data = {
 
 int ab8500_bm_of_probe(struct device *dev,
                       struct device_node *np,
-                      struct abx500_bm_data *bm)
+                      struct ab8500_bm_data *bm)
 {
        const struct batres_vs_temp *tmp_batres_tbl;
        struct device_node *battery_node;
@@ -531,7 +529,7 @@ int ab8500_bm_of_probe(struct device *dev,
        } else {
                bm->n_btypes   = 4;
                bm->bat_type   = bat_type_ext_thermistor;
-               bm->adc_therm  = ABx500_ADC_THERM_BATTEMP;
+               bm->adc_therm  = AB8500_ADC_THERM_BATTEMP;
                tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor;
        }
 
index dbdcff3..b6c9111 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/iio/consumer.h>
+#include <linux/fixp-arith.h>
 
 #include "ab8500-bm.h"
 
@@ -102,7 +103,7 @@ struct ab8500_btemp {
        struct iio_channel *btemp_ball;
        struct iio_channel *bat_ctrl;
        struct ab8500_fg *fg;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct power_supply *btemp_psy;
        struct ab8500_btemp_events events;
        struct ab8500_btemp_ranges btemp_ranges;
@@ -144,7 +145,7 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
                return (450000 * (v_batctrl)) / (1800 - v_batctrl);
        }
 
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL) {
                /*
                 * If the battery has internal NTC, we use the current
                 * source to calculate the resistance.
@@ -206,7 +207,7 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                return 0;
 
        /* Only do this for batteries with internal NTC */
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL && enable) {
 
                if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
                        curr = BAT_CTRL_7U_ENA;
@@ -239,7 +240,7 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                                __func__);
                        goto disable_curr_source;
                }
-       } else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+       } else if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL && !enable) {
                dev_dbg(di->dev, "Disable BATCTRL curr source\n");
 
                /* Write 0 to the curr bits */
@@ -417,7 +418,7 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
  * based on the NTC resistance.
  */
 static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
-       const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+       const struct ab8500_res_to_temp *tbl, int tbl_size, int res)
 {
        int i;
        /*
@@ -437,8 +438,9 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
                        i++;
        }
 
-       return tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
-               (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+       return fixp_linear_interpolate(tbl[i].resist, tbl[i].temp,
+                                      tbl[i + 1].resist, tbl[i + 1].temp,
+                                      res);
 }
 
 /**
@@ -456,7 +458,7 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
 
        id = di->bm->batt_id;
 
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
                        id != BATTERY_UNKNOWN) {
 
                rbat = ab8500_btemp_get_batctrl_res(di);
@@ -525,7 +527,7 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
                        dev_dbg(di->dev, "Battery detected on %s"
                                " low %d < res %d < high: %d"
                                " index: %d\n",
-                               di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+                               di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL ?
                                "BATCTRL" : "BATTEMP",
                                di->bm->bat_type[i].resis_low, res,
                                di->bm->bat_type[i].resis_high, i);
@@ -545,7 +547,7 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
         * We only have to change current source if the
         * detected type is Type 1.
         */
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
            di->bm->batt_id == 1) {
                dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
                di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
new file mode 100644 (file)
index 0000000..ff4b26b
--- /dev/null
@@ -0,0 +1,2096 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ * Copyright (c) 2012 Sony Mobile Communications AB
+ *
+ * Charging algorithm driver for AB8500
+ *
+ * Authors:
+ *     Johan Palsson <johan.palsson@stericsson.com>
+ *     Karl Komierowski <karl.komierowski@stericsson.com>
+ *     Arun R Murthy <arun.murthy@stericsson.com>
+ *     Author: Imre Sunyi <imre.sunyi@sonymobile.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/component.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/notifier.h>
+
+#include "ab8500-bm.h"
+#include "ab8500-chargalg.h"
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL                        (6 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT                   10
+
+/* One hour expressed in seconds */
+#define ONE_HOUR_IN_SECONDS            3600
+
+/* Five minutes expressed in seconds */
+#define FIVE_MINUTES_IN_SECONDS        300
+
+#define CHARGALG_CURR_STEP_LOW         0
+#define CHARGALG_CURR_STEP_HIGH        100
+
+enum ab8500_chargers {
+       NO_CHG,
+       AC_CHG,
+       USB_CHG,
+};
+
+struct ab8500_chargalg_charger_info {
+       enum ab8500_chargers conn_chg;
+       enum ab8500_chargers prev_conn_chg;
+       enum ab8500_chargers online_chg;
+       enum ab8500_chargers prev_online_chg;
+       enum ab8500_chargers charger_type;
+       bool usb_chg_ok;
+       bool ac_chg_ok;
+       int usb_volt;
+       int usb_curr;
+       int ac_volt;
+       int ac_curr;
+       int usb_vset;
+       int usb_iset;
+       int ac_vset;
+       int ac_iset;
+};
+
+struct ab8500_chargalg_suspension_status {
+       bool suspended_change;
+       bool ac_suspended;
+       bool usb_suspended;
+};
+
+struct ab8500_chargalg_current_step_status {
+       bool curr_step_change;
+       int curr_step;
+};
+
+struct ab8500_chargalg_battery_data {
+       int temp;
+       int volt;
+       int avg_curr;
+       int inst_curr;
+       int percent;
+};
+
+enum ab8500_chargalg_states {
+       STATE_HANDHELD_INIT,
+       STATE_HANDHELD,
+       STATE_CHG_NOT_OK_INIT,
+       STATE_CHG_NOT_OK,
+       STATE_HW_TEMP_PROTECT_INIT,
+       STATE_HW_TEMP_PROTECT,
+       STATE_NORMAL_INIT,
+       STATE_NORMAL,
+       STATE_WAIT_FOR_RECHARGE_INIT,
+       STATE_WAIT_FOR_RECHARGE,
+       STATE_MAINTENANCE_A_INIT,
+       STATE_MAINTENANCE_A,
+       STATE_MAINTENANCE_B_INIT,
+       STATE_MAINTENANCE_B,
+       STATE_TEMP_UNDEROVER_INIT,
+       STATE_TEMP_UNDEROVER,
+       STATE_TEMP_LOWHIGH_INIT,
+       STATE_TEMP_LOWHIGH,
+       STATE_SUSPENDED_INIT,
+       STATE_SUSPENDED,
+       STATE_OVV_PROTECT_INIT,
+       STATE_OVV_PROTECT,
+       STATE_SAFETY_TIMER_EXPIRED_INIT,
+       STATE_SAFETY_TIMER_EXPIRED,
+       STATE_BATT_REMOVED_INIT,
+       STATE_BATT_REMOVED,
+       STATE_WD_EXPIRED_INIT,
+       STATE_WD_EXPIRED,
+};
+
+static const char * const states[] = {
+       "HANDHELD_INIT",
+       "HANDHELD",
+       "CHG_NOT_OK_INIT",
+       "CHG_NOT_OK",
+       "HW_TEMP_PROTECT_INIT",
+       "HW_TEMP_PROTECT",
+       "NORMAL_INIT",
+       "NORMAL",
+       "WAIT_FOR_RECHARGE_INIT",
+       "WAIT_FOR_RECHARGE",
+       "MAINTENANCE_A_INIT",
+       "MAINTENANCE_A",
+       "MAINTENANCE_B_INIT",
+       "MAINTENANCE_B",
+       "TEMP_UNDEROVER_INIT",
+       "TEMP_UNDEROVER",
+       "TEMP_LOWHIGH_INIT",
+       "TEMP_LOWHIGH",
+       "SUSPENDED_INIT",
+       "SUSPENDED",
+       "OVV_PROTECT_INIT",
+       "OVV_PROTECT",
+       "SAFETY_TIMER_EXPIRED_INIT",
+       "SAFETY_TIMER_EXPIRED",
+       "BATT_REMOVED_INIT",
+       "BATT_REMOVED",
+       "WD_EXPIRED_INIT",
+       "WD_EXPIRED",
+};
+
+struct ab8500_chargalg_events {
+       bool batt_unknown;
+       bool mainextchnotok;
+       bool batt_ovv;
+       bool batt_rem;
+       bool btemp_underover;
+       bool btemp_lowhigh;
+       bool main_thermal_prot;
+       bool usb_thermal_prot;
+       bool main_ovv;
+       bool vbus_ovv;
+       bool usbchargernotok;
+       bool safety_timer_expired;
+       bool maintenance_timer_expired;
+       bool ac_wd_expired;
+       bool usb_wd_expired;
+       bool ac_cv_active;
+       bool usb_cv_active;
+       bool vbus_collapsed;
+};
+
+/**
+ * struct ab8500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset:     the non optimized/maximised charger current
+ * @current_iset:      the charging current used at this moment
+ * @test_delta_i:      the delta between the current we want to charge and the
+                       current that is really going into the battery
+ * @condition_cnt:     number of iterations needed before a new charger current
+                       is set
+ * @max_current:       maximum charger current
+ * @wait_cnt:          to avoid too fast current step down in case of charger
+ *                     voltage collapse, we insert this delay between step
+ *                     down
+ * @level:             tells in how many steps the charging current has been
+                       increased
+ */
+struct ab8500_charge_curr_maximization {
+       int original_iset;
+       int current_iset;
+       int test_delta_i;
+       int condition_cnt;
+       int max_current;
+       int wait_cnt;
+       u8 level;
+};
+
+enum maxim_ret {
+       MAXIM_RET_NOACTION,
+       MAXIM_RET_CHANGE,
+       MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+/**
+ * struct ab8500_chargalg - ab8500 Charging algorithm device information
+ * @dev:               pointer to the structure device
+ * @charge_status:     battery operating status
+ * @eoc_cnt:           counter used to determine end-of_charge
+ * @maintenance_chg:   indicate if maintenance charge is active
+ * @t_hyst_norm                temperature hysteresis when the temperature has been
+ *                     over or under normal limits
+ * @t_hyst_lowhigh     temperature hysteresis when the temperature has been
+ *                     over or under the high or low limits
+ * @charge_state:      current state of the charging algorithm
+ * @ccm                        charging current maximization parameters
+ * @chg_info:          information about connected charger types
+ * @batt_data:         data of the battery
+ * @susp_status:       current charger suspension status
+ * @bm:                Platform specific battery management information
+ * @curr_status:       Current step status for over-current protection
+ * @parent:            pointer to the struct ab8500
+ * @chargalg_psy:      structure that holds the battery properties exposed by
+ *                     the charging algorithm
+ * @events:            structure for information about events triggered
+ * @chargalg_wq:               work queue for running the charging algorithm
+ * @chargalg_periodic_work:    work to run the charging algorithm periodically
+ * @chargalg_wd_work:          work to kick the charger watchdog periodically
+ * @chargalg_work:             work to run the charging algorithm instantly
+ * @safety_timer:              charging safety timer
+ * @maintenance_timer:         maintenance charging timer
+ * @chargalg_kobject:          structure of type kobject
+ */
+struct ab8500_chargalg {
+       struct device *dev;
+       int charge_status;
+       int eoc_cnt;
+       bool maintenance_chg;
+       int t_hyst_norm;
+       int t_hyst_lowhigh;
+       enum ab8500_chargalg_states charge_state;
+       struct ab8500_charge_curr_maximization ccm;
+       struct ab8500_chargalg_charger_info chg_info;
+       struct ab8500_chargalg_battery_data batt_data;
+       struct ab8500_chargalg_suspension_status susp_status;
+       struct ab8500 *parent;
+       struct ab8500_chargalg_current_step_status curr_status;
+       struct ab8500_bm_data *bm;
+       struct power_supply *chargalg_psy;
+       struct ux500_charger *ac_chg;
+       struct ux500_charger *usb_chg;
+       struct ab8500_chargalg_events events;
+       struct workqueue_struct *chargalg_wq;
+       struct delayed_work chargalg_periodic_work;
+       struct delayed_work chargalg_wd_work;
+       struct work_struct chargalg_work;
+       struct hrtimer safety_timer;
+       struct hrtimer maintenance_timer;
+       struct kobject chargalg_kobject;
+};
+
+/*External charger prepare notifier*/
+BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
+
+/* Main battery properties */
+static enum power_supply_property ab8500_chargalg_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_HEALTH,
+};
+
+struct ab8500_chargalg_sysfs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct ab8500_chargalg *di, char *buf);
+       ssize_t (*store)(struct ab8500_chargalg *di, const char *buf, size_t length);
+};
+
+/**
+ * ab8500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @timer:     pointer to the hrtimer structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static enum hrtimer_restart
+ab8500_chargalg_safety_timer_expired(struct hrtimer *timer)
+{
+       struct ab8500_chargalg *di = container_of(timer, struct ab8500_chargalg,
+                                                 safety_timer);
+       dev_err(di->dev, "Safety timer expired\n");
+       di->events.safety_timer_expired = true;
+
+       /* Trigger execution of the algorithm instantly */
+       queue_work(di->chargalg_wq, &di->chargalg_work);
+
+       return HRTIMER_NORESTART;
+}
+
+/**
+ * ab8500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @timer:     pointer to the timer structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static enum hrtimer_restart
+ab8500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
+{
+
+       struct ab8500_chargalg *di = container_of(timer, struct ab8500_chargalg,
+                                                 maintenance_timer);
+
+       dev_dbg(di->dev, "Maintenance timer expired\n");
+       di->events.maintenance_timer_expired = true;
+
+       /* Trigger execution of the algorithm instantly */
+       queue_work(di->chargalg_wq, &di->chargalg_work);
+
+       return HRTIMER_NORESTART;
+}
+
+/**
+ * ab8500_chargalg_state_to() - Change charge state
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void ab8500_chargalg_state_to(struct ab8500_chargalg *di,
+       enum ab8500_chargalg_states state)
+{
+       dev_dbg(di->dev,
+               "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+               di->charge_state == state ? "NO" : "YES",
+               di->charge_state,
+               states[di->charge_state],
+               state,
+               states[state]);
+
+       di->charge_state = state;
+}
+
+static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
+{
+       switch (di->charge_state) {
+       case STATE_NORMAL:
+       case STATE_MAINTENANCE_A:
+       case STATE_MAINTENANCE_B:
+               break;
+       default:
+               return 0;
+       }
+
+       if (di->chg_info.charger_type & USB_CHG) {
+               return di->usb_chg->ops.check_enable(di->usb_chg,
+                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+       } else if ((di->chg_info.charger_type & AC_CHG) &&
+                  !(di->ac_chg->external)) {
+               return di->ac_chg->ops.check_enable(di->ac_chg,
+                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+       }
+       return 0;
+}
+
+/**
+ * ab8500_chargalg_check_charger_connection() - Check charger connection change
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int ab8500_chargalg_check_charger_connection(struct ab8500_chargalg *di)
+{
+       if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+               di->susp_status.suspended_change) {
+               /*
+                * Charger state changed or suspension
+                * has changed since last update
+                */
+               if ((di->chg_info.conn_chg & AC_CHG) &&
+                       !di->susp_status.ac_suspended) {
+                       dev_dbg(di->dev, "Charging source is AC\n");
+                       if (di->chg_info.charger_type != AC_CHG) {
+                               di->chg_info.charger_type = AC_CHG;
+                               ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       }
+               } else if ((di->chg_info.conn_chg & USB_CHG) &&
+                       !di->susp_status.usb_suspended) {
+                       dev_dbg(di->dev, "Charging source is USB\n");
+                       di->chg_info.charger_type = USB_CHG;
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               } else if (di->chg_info.conn_chg &&
+                       (di->susp_status.ac_suspended ||
+                       di->susp_status.usb_suspended)) {
+                       dev_dbg(di->dev, "Charging is suspended\n");
+                       di->chg_info.charger_type = NO_CHG;
+                       ab8500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+               } else {
+                       dev_dbg(di->dev, "Charging source is OFF\n");
+                       di->chg_info.charger_type = NO_CHG;
+                       ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+               }
+               di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+               di->susp_status.suspended_change = false;
+       }
+       return di->chg_info.conn_chg;
+}
+
+/**
+ * ab8500_chargalg_check_current_step_status() - Check charging current
+ * step status.
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function will check if there is a change in the charging current step
+ * and change charge state accordingly.
+ */
+static void ab8500_chargalg_check_current_step_status
+       (struct ab8500_chargalg *di)
+{
+       if (di->curr_status.curr_step_change)
+               ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+       di->curr_status.curr_step_change = false;
+}
+
+/**
+ * ab8500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void ab8500_chargalg_start_safety_timer(struct ab8500_chargalg *di)
+{
+       /* Charger-dependent expiration time in hours*/
+       int timer_expiration = 0;
+
+       switch (di->chg_info.charger_type) {
+       case AC_CHG:
+               timer_expiration = di->bm->main_safety_tmr_h;
+               break;
+
+       case USB_CHG:
+               timer_expiration = di->bm->usb_safety_tmr_h;
+               break;
+
+       default:
+               dev_err(di->dev, "Unknown charger to charge from\n");
+               break;
+       }
+
+       di->events.safety_timer_expired = false;
+       hrtimer_set_expires_range(&di->safety_timer,
+               ktime_set(timer_expiration * ONE_HOUR_IN_SECONDS, 0),
+               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
+       hrtimer_start_expires(&di->safety_timer, HRTIMER_MODE_REL);
+}
+
+/**
+ * ab8500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void ab8500_chargalg_stop_safety_timer(struct ab8500_chargalg *di)
+{
+       if (hrtimer_try_to_cancel(&di->safety_timer) >= 0)
+               di->events.safety_timer_expired = false;
+}
+
+/**
+ * ab8500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di:                pointer to the ab8500_chargalg structure
+ * @duration:  duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void ab8500_chargalg_start_maintenance_timer(struct ab8500_chargalg *di,
+       int duration)
+{
+       hrtimer_set_expires_range(&di->maintenance_timer,
+               ktime_set(duration * ONE_HOUR_IN_SECONDS, 0),
+               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
+       di->events.maintenance_timer_expired = false;
+       hrtimer_start_expires(&di->maintenance_timer, HRTIMER_MODE_REL);
+}
+
+/**
+ * ab8500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void ab8500_chargalg_stop_maintenance_timer(struct ab8500_chargalg *di)
+{
+       if (hrtimer_try_to_cancel(&di->maintenance_timer) >= 0)
+               di->events.maintenance_timer_expired = false;
+}
+
+/**
+ * ab8500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
+{
+       /* Check if charger exists and kick watchdog if charging */
+       if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+           di->chg_info.online_chg & AC_CHG) {
+               /*
+                * If AB charger watchdog expired, pm2xxx charging
+                * gets disabled. To be safe, kick both AB charger watchdog
+                * and pm2xxx watchdog.
+                */
+               if (di->ac_chg->external &&
+                   di->usb_chg && di->usb_chg->ops.kick_wd)
+                       di->usb_chg->ops.kick_wd(di->usb_chg);
+
+               return di->ac_chg->ops.kick_wd(di->ac_chg);
+       } else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+                       di->chg_info.online_chg & USB_CHG)
+               return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+       return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di:                pointer to the ab8500_chargalg structure
+ * @enable:    charger on/off
+ * @vset:      requested charger output voltage
+ * @iset:      requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
+       int vset, int iset)
+{
+       static int ab8500_chargalg_ex_ac_enable_toggle;
+
+       if (!di->ac_chg || !di->ac_chg->ops.enable)
+               return -ENXIO;
+
+       /* Select maximum of what both the charger and the battery supports */
+       if (di->ac_chg->max_out_volt)
+               vset = min(vset, di->ac_chg->max_out_volt);
+       if (di->ac_chg->max_out_curr)
+               iset = min(iset, di->ac_chg->max_out_curr);
+
+       di->chg_info.ac_iset = iset;
+       di->chg_info.ac_vset = vset;
+
+       /* Enable external charger */
+       if (enable && di->ac_chg->external &&
+           !ab8500_chargalg_ex_ac_enable_toggle) {
+               blocking_notifier_call_chain(&charger_notifier_list,
+                                            0, di->dev);
+               ab8500_chargalg_ex_ac_enable_toggle++;
+       }
+
+       return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di:                pointer to the ab8500_chargalg structure
+ * @enable:    charger on/off
+ * @vset:      requested charger output voltage
+ * @iset:      requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_usb_en(struct ab8500_chargalg *di, int enable,
+       int vset, int iset)
+{
+       if (!di->usb_chg || !di->usb_chg->ops.enable)
+               return -ENXIO;
+
+       /* Select maximum of what both the charger and the battery supports */
+       if (di->usb_chg->max_out_volt)
+               vset = min(vset, di->usb_chg->max_out_volt);
+       if (di->usb_chg->max_out_curr)
+               iset = min(iset, di->usb_chg->max_out_curr);
+
+       di->chg_info.usb_iset = iset;
+       di->chg_info.usb_vset = vset;
+
+       return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_update_chg_curr() - Update charger current
+ * @di:                pointer to the ab8500_chargalg structure
+ * @iset:      requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int ab8500_chargalg_update_chg_curr(struct ab8500_chargalg *di,
+               int iset)
+{
+       /* Check if charger exists and update current if charging */
+       if (di->ac_chg && di->ac_chg->ops.update_curr &&
+                       di->chg_info.charger_type & AC_CHG) {
+               /*
+                * Select maximum of what both the charger
+                * and the battery supports
+                */
+               if (di->ac_chg->max_out_curr)
+                       iset = min(iset, di->ac_chg->max_out_curr);
+
+               di->chg_info.ac_iset = iset;
+
+               return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+       } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+                       di->chg_info.charger_type & USB_CHG) {
+               /*
+                * Select maximum of what both the charger
+                * and the battery supports
+                */
+               if (di->usb_chg->max_out_curr)
+                       iset = min(iset, di->usb_chg->max_out_curr);
+
+               di->chg_info.usb_iset = iset;
+
+               return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+       }
+
+       return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_stop_charging() - Stop charging
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void ab8500_chargalg_stop_charging(struct ab8500_chargalg *di)
+{
+       ab8500_chargalg_ac_en(di, false, 0, 0);
+       ab8500_chargalg_usb_en(di, false, 0, 0);
+       ab8500_chargalg_stop_safety_timer(di);
+       ab8500_chargalg_stop_maintenance_timer(di);
+       di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+       di->maintenance_chg = false;
+       cancel_delayed_work(&di->chargalg_wd_work);
+       power_supply_changed(di->chargalg_psy);
+}
+
+/**
+ * ab8500_chargalg_hold_charging() - Pauses charging
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void ab8500_chargalg_hold_charging(struct ab8500_chargalg *di)
+{
+       ab8500_chargalg_ac_en(di, false, 0, 0);
+       ab8500_chargalg_usb_en(di, false, 0, 0);
+       ab8500_chargalg_stop_safety_timer(di);
+       ab8500_chargalg_stop_maintenance_timer(di);
+       di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+       di->maintenance_chg = false;
+       cancel_delayed_work(&di->chargalg_wd_work);
+       power_supply_changed(di->chargalg_psy);
+}
+
+/**
+ * ab8500_chargalg_start_charging() - Start the charger
+ * @di:                pointer to the ab8500_chargalg structure
+ * @vset:      requested charger output voltage
+ * @iset:      requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void ab8500_chargalg_start_charging(struct ab8500_chargalg *di,
+       int vset, int iset)
+{
+       switch (di->chg_info.charger_type) {
+       case AC_CHG:
+               dev_dbg(di->dev,
+                       "AC parameters: Vset %d, Ich %d\n", vset, iset);
+               ab8500_chargalg_usb_en(di, false, 0, 0);
+               ab8500_chargalg_ac_en(di, true, vset, iset);
+               break;
+
+       case USB_CHG:
+               dev_dbg(di->dev,
+                       "USB parameters: Vset %d, Ich %d\n", vset, iset);
+               ab8500_chargalg_ac_en(di, false, 0, 0);
+               ab8500_chargalg_usb_en(di, true, vset, iset);
+               break;
+
+       default:
+               dev_err(di->dev, "Unknown charger to charge from\n");
+               break;
+       }
+}
+
+/**
+ * ab8500_chargalg_check_temp() - Check battery temperature ranges
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di)
+{
+       if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
+               di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
+               /* Temp OK! */
+               di->events.btemp_underover = false;
+               di->events.btemp_lowhigh = false;
+               di->t_hyst_norm = 0;
+               di->t_hyst_lowhigh = 0;
+       } else {
+               if (((di->batt_data.temp >= di->bm->temp_high) &&
+                       (di->batt_data.temp <
+                               (di->bm->temp_over - di->t_hyst_lowhigh))) ||
+                       ((di->batt_data.temp >
+                               (di->bm->temp_under + di->t_hyst_lowhigh)) &&
+                       (di->batt_data.temp <= di->bm->temp_low))) {
+                       /* TEMP minor!!!!! */
+                       di->events.btemp_underover = false;
+                       di->events.btemp_lowhigh = true;
+                       di->t_hyst_norm = di->bm->temp_hysteresis;
+                       di->t_hyst_lowhigh = 0;
+               } else if (di->batt_data.temp <= di->bm->temp_under ||
+                       di->batt_data.temp >= di->bm->temp_over) {
+                       /* TEMP major!!!!! */
+                       di->events.btemp_underover = true;
+                       di->events.btemp_lowhigh = false;
+                       di->t_hyst_norm = 0;
+                       di->t_hyst_lowhigh = di->bm->temp_hysteresis;
+               } else {
+                       /* Within hysteresis */
+                       dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+                               "hyst_lowhigh %d, hyst normal %d\n",
+                               di->batt_data.temp, di->t_hyst_lowhigh,
+                               di->t_hyst_norm);
+               }
+       }
+}
+
+/**
+ * ab8500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void ab8500_chargalg_check_charger_voltage(struct ab8500_chargalg *di)
+{
+       if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
+               di->chg_info.usb_chg_ok = false;
+       else
+               di->chg_info.usb_chg_ok = true;
+
+       if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
+               di->chg_info.ac_chg_ok = false;
+       else
+               di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * ab8500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
+{
+       if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+               di->charge_state == STATE_NORMAL &&
+               !di->maintenance_chg && (di->batt_data.volt >=
+               di->bm->bat_type[di->bm->batt_id].termination_vol ||
+               di->events.usb_cv_active || di->events.ac_cv_active) &&
+               di->batt_data.avg_curr <
+               di->bm->bat_type[di->bm->batt_id].termination_curr &&
+               di->batt_data.avg_curr > 0) {
+               if (++di->eoc_cnt >= EOC_COND_CNT) {
+                       di->eoc_cnt = 0;
+                       di->charge_status = POWER_SUPPLY_STATUS_FULL;
+                       di->maintenance_chg = true;
+                       dev_dbg(di->dev, "EOC reached!\n");
+                       power_supply_changed(di->chargalg_psy);
+               } else {
+                       dev_dbg(di->dev,
+                               " EOC limit reached for the %d"
+                               " time, out of %d before EOC\n",
+                               di->eoc_cnt,
+                               EOC_COND_CNT);
+               }
+       } else {
+               di->eoc_cnt = 0;
+       }
+}
+
+static void init_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+       di->ccm.original_iset =
+               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
+       di->ccm.current_iset =
+               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
+       di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
+       di->ccm.max_current = di->bm->maxi->chg_curr;
+       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+       di->ccm.level = 0;
+}
+
+/**
+ * ab8500_chargalg_chg_curr_maxim - increases the charger current to
+ *                     compensate for the system load
+ * @di         pointer to the ab8500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
+{
+       int delta_i;
+
+       if (!di->bm->maxi->ena_maxi)
+               return MAXIM_RET_NOACTION;
+
+       delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+       if (di->events.vbus_collapsed) {
+               dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+                               di->ccm.wait_cnt);
+               if (di->ccm.wait_cnt == 0) {
+                       dev_dbg(di->dev, "lowering current\n");
+                       di->ccm.wait_cnt++;
+                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+                       di->ccm.max_current =
+                               di->ccm.current_iset - di->ccm.test_delta_i;
+                       di->ccm.current_iset = di->ccm.max_current;
+                       di->ccm.level--;
+                       return MAXIM_RET_CHANGE;
+               } else {
+                       dev_dbg(di->dev, "waiting\n");
+                       /* Let's go in here twice before lowering curr again */
+                       di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+                       return MAXIM_RET_NOACTION;
+               }
+       }
+
+       di->ccm.wait_cnt = 0;
+
+       if (di->batt_data.inst_curr > di->ccm.original_iset) {
+               dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+                       " (limit %dmA) (current iset: %dmA)!\n",
+                       di->batt_data.inst_curr, di->ccm.original_iset,
+                       di->ccm.current_iset);
+
+               if (di->ccm.current_iset == di->ccm.original_iset)
+                       return MAXIM_RET_NOACTION;
+
+               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+               di->ccm.current_iset = di->ccm.original_iset;
+               di->ccm.level = 0;
+
+               return MAXIM_RET_IBAT_TOO_HIGH;
+       }
+
+       if (delta_i > di->ccm.test_delta_i &&
+               (di->ccm.current_iset + di->ccm.test_delta_i) <
+               di->ccm.max_current) {
+               if (di->ccm.condition_cnt-- == 0) {
+                       /* Increse the iset with cco.test_delta_i */
+                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+                       di->ccm.current_iset += di->ccm.test_delta_i;
+                       di->ccm.level++;
+                       dev_dbg(di->dev, " Maximization needed, increase"
+                               " with %d mA to %dmA (Optimal ibat: %d)"
+                               " Level %d\n",
+                               di->ccm.test_delta_i,
+                               di->ccm.current_iset,
+                               di->ccm.original_iset,
+                               di->ccm.level);
+                       return MAXIM_RET_CHANGE;
+               } else {
+                       return MAXIM_RET_NOACTION;
+               }
+       }  else {
+               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+               return MAXIM_RET_NOACTION;
+       }
+}
+
+static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+       enum maxim_ret ret;
+       int result;
+
+       ret = ab8500_chargalg_chg_curr_maxim(di);
+       switch (ret) {
+       case MAXIM_RET_CHANGE:
+               result = ab8500_chargalg_update_chg_curr(di,
+                       di->ccm.current_iset);
+               if (result)
+                       dev_err(di->dev, "failed to set chg curr\n");
+               break;
+       case MAXIM_RET_IBAT_TOO_HIGH:
+               result = ab8500_chargalg_update_chg_curr(di,
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+               if (result)
+                       dev_err(di->dev, "failed to set chg curr\n");
+               break;
+
+       case MAXIM_RET_NOACTION:
+       default:
+               /* Do nothing..*/
+               break;
+       }
+}
+
+static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+       struct power_supply *psy;
+       struct power_supply *ext = dev_get_drvdata(dev);
+       const char **supplicants = (const char **)ext->supplied_to;
+       struct ab8500_chargalg *di;
+       union power_supply_propval ret;
+       int j;
+       bool capacity_updated = false;
+
+       psy = (struct power_supply *)data;
+       di = power_supply_get_drvdata(psy);
+       /* For all psy where the driver name appears in any supplied_to */
+       j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
+       if (j < 0)
+               return 0;
+
+       /*
+        *  If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its
+        * property because of handling that sysfs entry on its own, this is
+        * the place to get the battery capacity.
+        */
+       if (!power_supply_get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) {
+               di->batt_data.percent = ret.intval;
+               capacity_updated = true;
+       }
+
+       /* Go through all properties for the psy */
+       for (j = 0; j < ext->desc->num_properties; j++) {
+               enum power_supply_property prop;
+               prop = ext->desc->properties[j];
+
+               /*
+                * Initialize chargers if not already done.
+                * The ab8500_charger*/
+               if (!di->ac_chg &&
+                       ext->desc->type == POWER_SUPPLY_TYPE_MAINS)
+                       di->ac_chg = psy_to_ux500_charger(ext);
+               else if (!di->usb_chg &&
+                       ext->desc->type == POWER_SUPPLY_TYPE_USB)
+                       di->usb_chg = psy_to_ux500_charger(ext);
+
+               if (power_supply_get_property(ext, prop, &ret))
+                       continue;
+               switch (prop) {
+               case POWER_SUPPLY_PROP_PRESENT:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               /* Battery present */
+                               if (ret.intval)
+                                       di->events.batt_rem = false;
+                               /* Battery removed */
+                               else
+                                       di->events.batt_rem = true;
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               /* AC disconnected */
+                               if (!ret.intval &&
+                                       (di->chg_info.conn_chg & AC_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg &= ~AC_CHG;
+                               }
+                               /* AC connected */
+                               else if (ret.intval &&
+                                       !(di->chg_info.conn_chg & AC_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg |= AC_CHG;
+                               }
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               /* USB disconnected */
+                               if (!ret.intval &&
+                                       (di->chg_info.conn_chg & USB_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg &= ~USB_CHG;
+                               }
+                               /* USB connected */
+                               else if (ret.intval &&
+                                       !(di->chg_info.conn_chg & USB_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg |= USB_CHG;
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_ONLINE:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               /* AC offline */
+                               if (!ret.intval &&
+                                       (di->chg_info.online_chg & AC_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg &= ~AC_CHG;
+                               }
+                               /* AC online */
+                               else if (ret.intval &&
+                                       !(di->chg_info.online_chg & AC_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg |= AC_CHG;
+                                       queue_delayed_work(di->chargalg_wq,
+                                               &di->chargalg_wd_work, 0);
+                               }
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               /* USB offline */
+                               if (!ret.intval &&
+                                       (di->chg_info.online_chg & USB_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg &= ~USB_CHG;
+                               }
+                               /* USB online */
+                               else if (ret.intval &&
+                                       !(di->chg_info.online_chg & USB_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg |= USB_CHG;
+                                       queue_delayed_work(di->chargalg_wq,
+                                               &di->chargalg_wd_work, 0);
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_HEALTH:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               switch (ret.intval) {
+                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+                                       di->events.mainextchnotok = true;
+                                       di->events.main_thermal_prot = false;
+                                       di->events.main_ovv = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_DEAD:
+                                       di->events.ac_wd_expired = true;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_ovv = false;
+                                       di->events.main_thermal_prot = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_COLD:
+                               case POWER_SUPPLY_HEALTH_OVERHEAT:
+                                       di->events.main_thermal_prot = true;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_ovv = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+                                       di->events.main_ovv = true;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_thermal_prot = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_GOOD:
+                                       di->events.main_thermal_prot = false;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_ovv = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               default:
+                                       break;
+                               }
+                               break;
+
+                       case POWER_SUPPLY_TYPE_USB:
+                               switch (ret.intval) {
+                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+                                       di->events.usbchargernotok = true;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.vbus_ovv = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_DEAD:
+                                       di->events.usb_wd_expired = true;
+                                       di->events.usbchargernotok = false;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.vbus_ovv = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_COLD:
+                               case POWER_SUPPLY_HEALTH_OVERHEAT:
+                                       di->events.usb_thermal_prot = true;
+                                       di->events.usbchargernotok = false;
+                                       di->events.vbus_ovv = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+                                       di->events.vbus_ovv = true;
+                                       di->events.usbchargernotok = false;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_GOOD:
+                                       di->events.usbchargernotok = false;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.vbus_ovv = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               default:
+                                       break;
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               di->batt_data.volt = ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               di->chg_info.ac_volt = ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               di->chg_info.usb_volt = ret.intval / 1000;
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               /* AVG is used to indicate when we are
+                                * in CV mode */
+                               if (ret.intval)
+                                       di->events.ac_cv_active = true;
+                               else
+                                       di->events.ac_cv_active = false;
+
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               /* AVG is used to indicate when we are
+                                * in CV mode */
+                               if (ret.intval)
+                                       di->events.usb_cv_active = true;
+                               else
+                                       di->events.usb_cv_active = false;
+
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_TECHNOLOGY:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               if (ret.intval)
+                                       di->events.batt_unknown = false;
+                               else
+                                       di->events.batt_unknown = true;
+
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_TEMP:
+                       di->batt_data.temp = ret.intval / 10;
+                       break;
+
+               case POWER_SUPPLY_PROP_CURRENT_NOW:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_MAINS:
+                                       di->chg_info.ac_curr =
+                                               ret.intval / 1000;
+                                       break;
+                       case POWER_SUPPLY_TYPE_USB:
+                                       di->chg_info.usb_curr =
+                                               ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               di->batt_data.inst_curr = ret.intval / 1000;
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_CURRENT_AVG:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               di->batt_data.avg_curr = ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               if (ret.intval)
+                                       di->events.vbus_collapsed = true;
+                               else
+                                       di->events.vbus_collapsed = false;
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+               case POWER_SUPPLY_PROP_CAPACITY:
+                       if (!capacity_updated)
+                               di->batt_data.percent = ret.intval;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+/**
+ * ab8500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy:       pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
+{
+       struct ab8500_chargalg *di = power_supply_get_drvdata(psy);
+
+       /*
+        * Trigger execution of the algorithm instantly and read
+        * all power_supply properties there instead
+        */
+       if (di->chargalg_wq)
+               queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_algorithm() - Main function for the algorithm
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
+{
+       int charger_status;
+       int ret;
+       int curr_step_lvl;
+
+       /* Collect data from all power_supply class devices */
+       class_for_each_device(power_supply_class, NULL,
+               di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+
+       ab8500_chargalg_end_of_charge(di);
+       ab8500_chargalg_check_temp(di);
+       ab8500_chargalg_check_charger_voltage(di);
+
+       charger_status = ab8500_chargalg_check_charger_connection(di);
+       ab8500_chargalg_check_current_step_status(di);
+
+       if (is_ab8500(di->parent)) {
+               ret = ab8500_chargalg_check_charger_enable(di);
+               if (ret < 0)
+                       dev_err(di->dev, "Checking charger is enabled error"
+                                       ": Returned Value %d\n", ret);
+       }
+
+       /*
+        * First check if we have a charger connected.
+        * Also we don't allow charging of unknown batteries if configured
+        * this way
+        */
+       if (!charger_status ||
+               (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
+               if (di->charge_state != STATE_HANDHELD) {
+                       di->events.safety_timer_expired = false;
+                       ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+               }
+       }
+
+       /* If suspended, we should not continue checking the flags */
+       else if (di->charge_state == STATE_SUSPENDED_INIT ||
+               di->charge_state == STATE_SUSPENDED) {
+               /* We don't do anything here, just don,t continue */
+       }
+
+       /* Safety timer expiration */
+       else if (di->events.safety_timer_expired) {
+               if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+                       ab8500_chargalg_state_to(di,
+                               STATE_SAFETY_TIMER_EXPIRED_INIT);
+       }
+       /*
+        * Check if any interrupts has occured
+        * that will prevent us from charging
+        */
+
+       /* Battery removed */
+       else if (di->events.batt_rem) {
+               if (di->charge_state != STATE_BATT_REMOVED)
+                       ab8500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+       }
+       /* Main or USB charger not ok. */
+       else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+               /*
+                * If vbus_collapsed is set, we have to lower the charger
+                * current, which is done in the normal state below
+                */
+               if (di->charge_state != STATE_CHG_NOT_OK &&
+                               !di->events.vbus_collapsed)
+                       ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+       }
+       /* VBUS, Main or VBAT OVV. */
+       else if (di->events.vbus_ovv ||
+                       di->events.main_ovv ||
+                       di->events.batt_ovv ||
+                       !di->chg_info.usb_chg_ok ||
+                       !di->chg_info.ac_chg_ok) {
+               if (di->charge_state != STATE_OVV_PROTECT)
+                       ab8500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+       }
+       /* USB Thermal, stop charging */
+       else if (di->events.main_thermal_prot ||
+               di->events.usb_thermal_prot) {
+               if (di->charge_state != STATE_HW_TEMP_PROTECT)
+                       ab8500_chargalg_state_to(di,
+                               STATE_HW_TEMP_PROTECT_INIT);
+       }
+       /* Battery temp over/under */
+       else if (di->events.btemp_underover) {
+               if (di->charge_state != STATE_TEMP_UNDEROVER)
+                       ab8500_chargalg_state_to(di,
+                               STATE_TEMP_UNDEROVER_INIT);
+       }
+       /* Watchdog expired */
+       else if (di->events.ac_wd_expired ||
+               di->events.usb_wd_expired) {
+               if (di->charge_state != STATE_WD_EXPIRED)
+                       ab8500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+       }
+       /* Battery temp high/low */
+       else if (di->events.btemp_lowhigh) {
+               if (di->charge_state != STATE_TEMP_LOWHIGH)
+                       ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+       }
+
+       dev_dbg(di->dev,
+               "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+               "State %s Active_chg %d Chg_status %d AC %d USB %d "
+               "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+               "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+               di->batt_data.volt,
+               di->batt_data.avg_curr,
+               di->batt_data.inst_curr,
+               di->batt_data.temp,
+               di->batt_data.percent,
+               di->maintenance_chg,
+               states[di->charge_state],
+               di->chg_info.charger_type,
+               di->charge_status,
+               di->chg_info.conn_chg & AC_CHG,
+               di->chg_info.conn_chg & USB_CHG,
+               di->chg_info.online_chg & AC_CHG,
+               di->chg_info.online_chg & USB_CHG,
+               di->events.ac_cv_active,
+               di->events.usb_cv_active,
+               di->chg_info.ac_curr,
+               di->chg_info.usb_curr,
+               di->chg_info.ac_vset,
+               di->chg_info.ac_iset,
+               di->chg_info.usb_vset,
+               di->chg_info.usb_iset);
+
+       switch (di->charge_state) {
+       case STATE_HANDHELD_INIT:
+               ab8500_chargalg_stop_charging(di);
+               di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+               ab8500_chargalg_state_to(di, STATE_HANDHELD);
+               fallthrough;
+
+       case STATE_HANDHELD:
+               break;
+
+       case STATE_SUSPENDED_INIT:
+               if (di->susp_status.ac_suspended)
+                       ab8500_chargalg_ac_en(di, false, 0, 0);
+               if (di->susp_status.usb_suspended)
+                       ab8500_chargalg_usb_en(di, false, 0, 0);
+               ab8500_chargalg_stop_safety_timer(di);
+               ab8500_chargalg_stop_maintenance_timer(di);
+               di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               di->maintenance_chg = false;
+               ab8500_chargalg_state_to(di, STATE_SUSPENDED);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_SUSPENDED:
+               /* CHARGING is suspended */
+               break;
+
+       case STATE_BATT_REMOVED_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_BATT_REMOVED);
+               fallthrough;
+
+       case STATE_BATT_REMOVED:
+               if (!di->events.batt_rem)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_HW_TEMP_PROTECT_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+               fallthrough;
+
+       case STATE_HW_TEMP_PROTECT:
+               if (!di->events.main_thermal_prot &&
+                               !di->events.usb_thermal_prot)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_OVV_PROTECT_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_OVV_PROTECT);
+               fallthrough;
+
+       case STATE_OVV_PROTECT:
+               if (!di->events.vbus_ovv &&
+                               !di->events.main_ovv &&
+                               !di->events.batt_ovv &&
+                               di->chg_info.usb_chg_ok &&
+                               di->chg_info.ac_chg_ok)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_CHG_NOT_OK_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+               fallthrough;
+
+       case STATE_CHG_NOT_OK:
+               if (!di->events.mainextchnotok &&
+                               !di->events.usbchargernotok)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_SAFETY_TIMER_EXPIRED_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+               fallthrough;
+
+       case STATE_SAFETY_TIMER_EXPIRED:
+               /* We exit this state when charger is removed */
+               break;
+
+       case STATE_NORMAL_INIT:
+               if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
+                       ab8500_chargalg_stop_charging(di);
+               else {
+                       curr_step_lvl = di->bm->bat_type[
+                               di->bm->batt_id].normal_cur_lvl
+                               * di->curr_status.curr_step
+                               / CHARGALG_CURR_STEP_HIGH;
+                       ab8500_chargalg_start_charging(di,
+                               di->bm->bat_type[di->bm->batt_id]
+                               .normal_vol_lvl, curr_step_lvl);
+               }
+
+               ab8500_chargalg_state_to(di, STATE_NORMAL);
+               ab8500_chargalg_start_safety_timer(di);
+               ab8500_chargalg_stop_maintenance_timer(di);
+               init_maxim_chg_curr(di);
+               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+               di->eoc_cnt = 0;
+               di->maintenance_chg = false;
+               power_supply_changed(di->chargalg_psy);
+
+               break;
+
+       case STATE_NORMAL:
+               handle_maxim_chg_curr(di);
+               if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+                       di->maintenance_chg) {
+                       if (di->bm->no_maintenance)
+                               ab8500_chargalg_state_to(di,
+                                       STATE_WAIT_FOR_RECHARGE_INIT);
+                       else
+                               ab8500_chargalg_state_to(di,
+                                       STATE_MAINTENANCE_A_INIT);
+               }
+               break;
+
+       /* This state will be used when the maintenance state is disabled */
+       case STATE_WAIT_FOR_RECHARGE_INIT:
+               ab8500_chargalg_hold_charging(di);
+               ab8500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+               fallthrough;
+
+       case STATE_WAIT_FOR_RECHARGE:
+               if (di->batt_data.percent <=
+                   di->bm->bat_type[di->bm->batt_id].recharge_cap)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_MAINTENANCE_A_INIT:
+               ab8500_chargalg_stop_safety_timer(di);
+               ab8500_chargalg_start_maintenance_timer(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_chg_timer_h);
+               ab8500_chargalg_start_charging(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_cur_lvl);
+               ab8500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_MAINTENANCE_A:
+               if (di->events.maintenance_timer_expired) {
+                       ab8500_chargalg_stop_maintenance_timer(di);
+                       ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+               }
+               break;
+
+       case STATE_MAINTENANCE_B_INIT:
+               ab8500_chargalg_start_maintenance_timer(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_chg_timer_h);
+               ab8500_chargalg_start_charging(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_cur_lvl);
+               ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_MAINTENANCE_B:
+               if (di->events.maintenance_timer_expired) {
+                       ab8500_chargalg_stop_maintenance_timer(di);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               }
+               break;
+
+       case STATE_TEMP_LOWHIGH_INIT:
+               ab8500_chargalg_start_charging(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].low_high_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].low_high_cur_lvl);
+               ab8500_chargalg_stop_maintenance_timer(di);
+               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+               ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_TEMP_LOWHIGH:
+               if (!di->events.btemp_lowhigh)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_WD_EXPIRED_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_WD_EXPIRED);
+               fallthrough;
+
+       case STATE_WD_EXPIRED:
+               if (!di->events.ac_wd_expired &&
+                               !di->events.usb_wd_expired)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_TEMP_UNDEROVER_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+               fallthrough;
+
+       case STATE_TEMP_UNDEROVER:
+               if (!di->events.btemp_underover)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+       }
+
+       /* Start charging directly if the new state is a charge state */
+       if (di->charge_state == STATE_NORMAL_INIT ||
+                       di->charge_state == STATE_MAINTENANCE_A_INIT ||
+                       di->charge_state == STATE_MAINTENANCE_B_INIT)
+               queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work:      pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void ab8500_chargalg_periodic_work(struct work_struct *work)
+{
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_periodic_work.work);
+
+       ab8500_chargalg_algorithm(di);
+
+       /*
+        * If a charger is connected then the battery has to be monitored
+        * frequently, else the work can be delayed.
+        */
+       if (di->chg_info.conn_chg)
+               queue_delayed_work(di->chargalg_wq,
+                       &di->chargalg_periodic_work,
+                       di->bm->interval_charging * HZ);
+       else
+               queue_delayed_work(di->chargalg_wq,
+                       &di->chargalg_periodic_work,
+                       di->bm->interval_not_charging * HZ);
+}
+
+/**
+ * ab8500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work:      pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void ab8500_chargalg_wd_work(struct work_struct *work)
+{
+       int ret;
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_wd_work.work);
+
+       ret = ab8500_chargalg_kick_watchdog(di);
+       if (ret < 0)
+               dev_err(di->dev, "failed to kick watchdog\n");
+
+       queue_delayed_work(di->chargalg_wq,
+               &di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * ab8500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work:      pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void ab8500_chargalg_work(struct work_struct *work)
+{
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_work);
+
+       ab8500_chargalg_algorithm(di);
+}
+
+/**
+ * ab8500_chargalg_get_property() - get the chargalg properties
+ * @psy:       pointer to the power_supply structure
+ * @psp:       pointer to the power_supply_property structure
+ * @val:       pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status:     charging/discharging/full/unknown
+ * health:     health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_chargalg_get_property(struct power_supply *psy,
+       enum power_supply_property psp,
+       union power_supply_propval *val)
+{
+       struct ab8500_chargalg *di = power_supply_get_drvdata(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = di->charge_status;
+               break;
+       case POWER_SUPPLY_PROP_HEALTH:
+               if (di->events.batt_ovv) {
+                       val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+               } else if (di->events.btemp_underover) {
+                       if (di->batt_data.temp <= di->bm->temp_under)
+                               val->intval = POWER_SUPPLY_HEALTH_COLD;
+                       else
+                               val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+               } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED ||
+                          di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) {
+                       val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+               } else {
+                       val->intval = POWER_SUPPLY_HEALTH_GOOD;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+static ssize_t ab8500_chargalg_curr_step_show(struct ab8500_chargalg *di,
+                                             char *buf)
+{
+       return sprintf(buf, "%d\n", di->curr_status.curr_step);
+}
+
+static ssize_t ab8500_chargalg_curr_step_store(struct ab8500_chargalg *di,
+                                              const char *buf, size_t length)
+{
+       long param;
+       int ret;
+
+       ret = kstrtol(buf, 10, &param);
+       if (ret < 0)
+               return ret;
+
+       di->curr_status.curr_step = param;
+       if (di->curr_status.curr_step >= CHARGALG_CURR_STEP_LOW &&
+               di->curr_status.curr_step <= CHARGALG_CURR_STEP_HIGH) {
+               di->curr_status.curr_step_change = true;
+               queue_work(di->chargalg_wq, &di->chargalg_work);
+       } else
+               dev_info(di->dev, "Wrong current step\n"
+                       "Enter 0. Disable AC/USB Charging\n"
+                       "1--100. Set AC/USB charging current step\n"
+                       "100. Enable AC/USB Charging\n");
+
+       return strlen(buf);
+}
+
+
+static ssize_t ab8500_chargalg_en_show(struct ab8500_chargalg *di,
+                                      char *buf)
+{
+       return sprintf(buf, "%d\n",
+                      di->susp_status.ac_suspended &&
+                      di->susp_status.usb_suspended);
+}
+
+static ssize_t ab8500_chargalg_en_store(struct ab8500_chargalg *di,
+       const char *buf, size_t length)
+{
+       long param;
+       int ac_usb;
+       int ret;
+
+       ret = kstrtol(buf, 10, &param);
+       if (ret < 0)
+               return ret;
+
+       ac_usb = param;
+       switch (ac_usb) {
+       case 0:
+               /* Disable charging */
+               di->susp_status.ac_suspended = true;
+               di->susp_status.usb_suspended = true;
+               di->susp_status.suspended_change = true;
+               /* Trigger a state change */
+               queue_work(di->chargalg_wq,
+                       &di->chargalg_work);
+               break;
+       case 1:
+               /* Enable AC Charging */
+               di->susp_status.ac_suspended = false;
+               di->susp_status.suspended_change = true;
+               /* Trigger a state change */
+               queue_work(di->chargalg_wq,
+                       &di->chargalg_work);
+               break;
+       case 2:
+               /* Enable USB charging */
+               di->susp_status.usb_suspended = false;
+               di->susp_status.suspended_change = true;
+               /* Trigger a state change */
+               queue_work(di->chargalg_wq,
+                       &di->chargalg_work);
+               break;
+       default:
+               dev_info(di->dev, "Wrong input\n"
+                       "Enter 0. Disable AC/USB Charging\n"
+                       "1. Enable AC charging\n"
+                       "2. Enable USB Charging\n");
+       }
+       return strlen(buf);
+}
+
+static struct ab8500_chargalg_sysfs_entry ab8500_chargalg_en_charger =
+       __ATTR(chargalg, 0644, ab8500_chargalg_en_show,
+                               ab8500_chargalg_en_store);
+
+static struct ab8500_chargalg_sysfs_entry ab8500_chargalg_curr_step =
+       __ATTR(chargalg_curr_step, 0644, ab8500_chargalg_curr_step_show,
+                                       ab8500_chargalg_curr_step_store);
+
+static ssize_t ab8500_chargalg_sysfs_show(struct kobject *kobj,
+       struct attribute *attr, char *buf)
+{
+       struct ab8500_chargalg_sysfs_entry *entry = container_of(attr,
+               struct ab8500_chargalg_sysfs_entry, attr);
+
+       struct ab8500_chargalg *di = container_of(kobj,
+               struct ab8500_chargalg, chargalg_kobject);
+
+       if (!entry->show)
+               return -EIO;
+
+       return entry->show(di, buf);
+}
+
+static ssize_t ab8500_chargalg_sysfs_charger(struct kobject *kobj,
+       struct attribute *attr, const char *buf, size_t length)
+{
+       struct ab8500_chargalg_sysfs_entry *entry = container_of(attr,
+               struct ab8500_chargalg_sysfs_entry, attr);
+
+       struct ab8500_chargalg *di = container_of(kobj,
+               struct ab8500_chargalg, chargalg_kobject);
+
+       if (!entry->store)
+               return -EIO;
+
+       return entry->store(di, buf, length);
+}
+
+static struct attribute *ab8500_chargalg_chg[] = {
+       &ab8500_chargalg_en_charger.attr,
+       &ab8500_chargalg_curr_step.attr,
+       NULL,
+};
+
+static const struct sysfs_ops ab8500_chargalg_sysfs_ops = {
+       .show = ab8500_chargalg_sysfs_show,
+       .store = ab8500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type ab8500_chargalg_ktype = {
+       .sysfs_ops = &ab8500_chargalg_sysfs_ops,
+       .default_attrs = ab8500_chargalg_chg,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_chargalg_sysfs_exit(struct ab8500_chargalg *di)
+{
+       kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_chargalg_sysfs_init(struct ab8500_chargalg *di)
+{
+       int ret = 0;
+
+       ret = kobject_init_and_add(&di->chargalg_kobject,
+               &ab8500_chargalg_ktype,
+               NULL, "ab8500_chargalg");
+       if (ret < 0)
+               dev_err(di->dev, "failed to create sysfs entry\n");
+
+       return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+static int __maybe_unused ab8500_chargalg_resume(struct device *dev)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       /* Kick charger watchdog if charging (any charger online) */
+       if (di->chg_info.online_chg)
+               queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+       /*
+        * Run the charging algorithm directly to be sure we don't
+        * do it too seldom
+        */
+       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+       return 0;
+}
+
+static int __maybe_unused ab8500_chargalg_suspend(struct device *dev)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       if (di->chg_info.online_chg)
+               cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+       cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+       return 0;
+}
+
+static char *supply_interface[] = {
+       "ab8500_fg",
+};
+
+static const struct power_supply_desc ab8500_chargalg_desc = {
+       .name                   = "ab8500_chargalg",
+       .type                   = POWER_SUPPLY_TYPE_BATTERY,
+       .properties             = ab8500_chargalg_props,
+       .num_properties         = ARRAY_SIZE(ab8500_chargalg_props),
+       .get_property           = ab8500_chargalg_get_property,
+       .external_power_changed = ab8500_chargalg_external_power_changed,
+};
+
+static int ab8500_chargalg_bind(struct device *dev, struct device *master,
+                               void *data)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       /* Create a work queue for the chargalg */
+       di->chargalg_wq = alloc_ordered_workqueue("ab8500_chargalg_wq",
+                                                 WQ_MEM_RECLAIM);
+       if (di->chargalg_wq == NULL) {
+               dev_err(di->dev, "failed to create work queue\n");
+               return -ENOMEM;
+       }
+
+       /* Run the charging algorithm */
+       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+       return 0;
+}
+
+static void ab8500_chargalg_unbind(struct device *dev, struct device *master,
+                                  void *data)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       /* Stop all timers and work */
+       hrtimer_cancel(&di->safety_timer);
+       hrtimer_cancel(&di->maintenance_timer);
+
+       cancel_delayed_work_sync(&di->chargalg_periodic_work);
+       cancel_delayed_work_sync(&di->chargalg_wd_work);
+       cancel_work_sync(&di->chargalg_work);
+
+       /* Delete the work queue */
+       destroy_workqueue(di->chargalg_wq);
+       flush_scheduled_work();
+}
+
+static const struct component_ops ab8500_chargalg_component_ops = {
+       .bind = ab8500_chargalg_bind,
+       .unbind = ab8500_chargalg_unbind,
+};
+
+static int ab8500_chargalg_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct power_supply_config psy_cfg = {};
+       struct ab8500_chargalg *di;
+       int ret = 0;
+
+       di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+       if (!di)
+               return -ENOMEM;
+
+       di->bm = &ab8500_bm_data;
+
+       /* get device struct and parent */
+       di->dev = dev;
+       di->parent = dev_get_drvdata(pdev->dev.parent);
+
+       psy_cfg.supplied_to = supply_interface;
+       psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
+       psy_cfg.drv_data = di;
+
+       /* Initilialize safety timer */
+       hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+       di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+
+       /* Initilialize maintenance timer */
+       hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+       di->maintenance_timer.function =
+               ab8500_chargalg_maintenance_timer_expired;
+
+       /* Init work for chargalg */
+       INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
+               ab8500_chargalg_periodic_work);
+       INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
+               ab8500_chargalg_wd_work);
+
+       /* Init work for chargalg */
+       INIT_WORK(&di->chargalg_work, ab8500_chargalg_work);
+
+       /* To detect charger at startup */
+       di->chg_info.prev_conn_chg = -1;
+
+       /* Register chargalg power supply class */
+       di->chargalg_psy = devm_power_supply_register(di->dev,
+                                                &ab8500_chargalg_desc,
+                                                &psy_cfg);
+       if (IS_ERR(di->chargalg_psy)) {
+               dev_err(di->dev, "failed to register chargalg psy\n");
+               return PTR_ERR(di->chargalg_psy);
+       }
+
+       platform_set_drvdata(pdev, di);
+
+       /* sysfs interface to enable/disable charging from user space */
+       ret = ab8500_chargalg_sysfs_init(di);
+       if (ret) {
+               dev_err(di->dev, "failed to create sysfs entry\n");
+               return ret;
+       }
+       di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
+
+       dev_info(di->dev, "probe success\n");
+       return component_add(dev, &ab8500_chargalg_component_ops);
+}
+
+static int ab8500_chargalg_remove(struct platform_device *pdev)
+{
+       struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+       component_del(&pdev->dev, &ab8500_chargalg_component_ops);
+
+       /* sysfs interface to enable/disable charging from user space */
+       ab8500_chargalg_sysfs_exit(di);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ab8500_chargalg_pm_ops, ab8500_chargalg_suspend, ab8500_chargalg_resume);
+
+static const struct of_device_id ab8500_chargalg_match[] = {
+       { .compatible = "stericsson,ab8500-chargalg", },
+       { },
+};
+
+struct platform_driver ab8500_chargalg_driver = {
+       .probe = ab8500_chargalg_probe,
+       .remove = ab8500_chargalg_remove,
+       .driver = {
+               .name = "ab8500_chargalg",
+               .of_match_table = ab8500_chargalg_match,
+               .pm = &ab8500_chargalg_pm_ops,
+       },
+};
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-chargalg");
+MODULE_DESCRIPTION("ab8500 battery charging algorithm");
index fa49e12..15eadaf 100644 (file)
@@ -292,7 +292,7 @@ struct ab8500_charger {
        struct iio_channel *adc_main_charger_c;
        struct iio_channel *adc_vbus_v;
        struct iio_channel *adc_usb_charger_c;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct ab8500_charger_event_flags flags;
        struct ab8500_charger_usb_state usb_state;
        struct ab8500_charger_max_usb_in_curr max_usb_in_curr;
@@ -3388,7 +3388,7 @@ static const struct component_master_ops ab8500_charger_comp_ops = {
 static struct platform_driver *const ab8500_charger_component_drivers[] = {
        &ab8500_fg_driver,
        &ab8500_btemp_driver,
-       &abx500_chargalg_driver,
+       &ab8500_chargalg_driver,
 };
 
 static int ab8500_charger_compare_dev(struct device *dev, void *data)
index a6ebdb2..05fe972 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/iio/consumer.h>
 #include <linux/kernel.h>
+#include <linux/fixp-arith.h>
 
 #include "ab8500-bm.h"
 
@@ -56,9 +57,6 @@
 /* FG constants */
 #define BATT_OVV                       0x01
 
-#define interpolate(x, x1, y1, x2, y2) \
-       ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
-
 /**
  * struct ab8500_fg_interrupts - ab8500 fg interrupts
  * @name:      name of the interrupt
@@ -227,7 +225,7 @@ struct ab8500_fg {
        struct ab8500_fg_avg_cap avg_cap;
        struct ab8500 *parent;
        struct iio_channel *main_bat_v;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct power_supply *fg_psy;
        struct workqueue_struct *fg_wq;
        struct delayed_work fg_periodic_work;
@@ -856,7 +854,7 @@ static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
 static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
 {
        int i, tbl_size;
-       const struct abx500_v_to_cap *tbl;
+       const struct ab8500_v_to_cap *tbl;
        int cap = 0;
 
        tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl;
@@ -868,11 +866,12 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
        }
 
        if ((i > 0) && (i < tbl_size)) {
-               cap = interpolate(voltage,
+               cap = fixp_linear_interpolate(
                        tbl[i].voltage,
                        tbl[i].capacity * 10,
                        tbl[i-1].voltage,
-                       tbl[i-1].capacity * 10);
+                       tbl[i-1].capacity * 10,
+                       voltage);
        } else if (i == 0) {
                cap = 1000;
        } else {
@@ -920,11 +919,12 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
        }
 
        if ((i > 0) && (i < tbl_size)) {
-               resist = interpolate(di->bat_temp / 10,
+               resist = fixp_linear_interpolate(
                        tbl[i].temp,
                        tbl[i].resist,
                        tbl[i-1].temp,
-                       tbl[i-1].resist);
+                       tbl[i-1].resist,
+                       di->bat_temp / 10);
        } else if (i == 0) {
                resist = tbl[0].resist;
        } else {
@@ -2235,7 +2235,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                        case POWER_SUPPLY_TYPE_BATTERY:
                                if (!di->flags.batt_id_received &&
                                    di->bm->batt_id != BATTERY_UNKNOWN) {
-                                       const struct abx500_battery_type *b;
+                                       const struct ab8500_battery_type *b;
 
                                        b = &(di->bm->bat_type[di->bm->batt_id]);
 
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
deleted file mode 100644 (file)
index b72826c..0000000
+++ /dev/null
@@ -1,2099 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2012
- * Copyright (c) 2012 Sony Mobile Communications AB
- *
- * Charging algorithm driver for abx500 variants
- *
- * Authors:
- *     Johan Palsson <johan.palsson@stericsson.com>
- *     Karl Komierowski <karl.komierowski@stericsson.com>
- *     Arun R Murthy <arun.murthy@stericsson.com>
- *     Author: Imre Sunyi <imre.sunyi@sonymobile.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/component.h>
-#include <linux/hrtimer.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/kobject.h>
-#include <linux/of.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/notifier.h>
-
-#include "ab8500-bm.h"
-#include "ab8500-chargalg.h"
-
-/* Watchdog kick interval */
-#define CHG_WD_INTERVAL                        (6 * HZ)
-
-/* End-of-charge criteria counter */
-#define EOC_COND_CNT                   10
-
-/* One hour expressed in seconds */
-#define ONE_HOUR_IN_SECONDS            3600
-
-/* Five minutes expressed in seconds */
-#define FIVE_MINUTES_IN_SECONDS        300
-
-#define CHARGALG_CURR_STEP_LOW         0
-#define CHARGALG_CURR_STEP_HIGH        100
-
-enum abx500_chargers {
-       NO_CHG,
-       AC_CHG,
-       USB_CHG,
-};
-
-struct abx500_chargalg_charger_info {
-       enum abx500_chargers conn_chg;
-       enum abx500_chargers prev_conn_chg;
-       enum abx500_chargers online_chg;
-       enum abx500_chargers prev_online_chg;
-       enum abx500_chargers charger_type;
-       bool usb_chg_ok;
-       bool ac_chg_ok;
-       int usb_volt;
-       int usb_curr;
-       int ac_volt;
-       int ac_curr;
-       int usb_vset;
-       int usb_iset;
-       int ac_vset;
-       int ac_iset;
-};
-
-struct abx500_chargalg_suspension_status {
-       bool suspended_change;
-       bool ac_suspended;
-       bool usb_suspended;
-};
-
-struct abx500_chargalg_current_step_status {
-       bool curr_step_change;
-       int curr_step;
-};
-
-struct abx500_chargalg_battery_data {
-       int temp;
-       int volt;
-       int avg_curr;
-       int inst_curr;
-       int percent;
-};
-
-enum abx500_chargalg_states {
-       STATE_HANDHELD_INIT,
-       STATE_HANDHELD,
-       STATE_CHG_NOT_OK_INIT,
-       STATE_CHG_NOT_OK,
-       STATE_HW_TEMP_PROTECT_INIT,
-       STATE_HW_TEMP_PROTECT,
-       STATE_NORMAL_INIT,
-       STATE_NORMAL,
-       STATE_WAIT_FOR_RECHARGE_INIT,
-       STATE_WAIT_FOR_RECHARGE,
-       STATE_MAINTENANCE_A_INIT,
-       STATE_MAINTENANCE_A,
-       STATE_MAINTENANCE_B_INIT,
-       STATE_MAINTENANCE_B,
-       STATE_TEMP_UNDEROVER_INIT,
-       STATE_TEMP_UNDEROVER,
-       STATE_TEMP_LOWHIGH_INIT,
-       STATE_TEMP_LOWHIGH,
-       STATE_SUSPENDED_INIT,
-       STATE_SUSPENDED,
-       STATE_OVV_PROTECT_INIT,
-       STATE_OVV_PROTECT,
-       STATE_SAFETY_TIMER_EXPIRED_INIT,
-       STATE_SAFETY_TIMER_EXPIRED,
-       STATE_BATT_REMOVED_INIT,
-       STATE_BATT_REMOVED,
-       STATE_WD_EXPIRED_INIT,
-       STATE_WD_EXPIRED,
-};
-
-static const char *states[] = {
-       "HANDHELD_INIT",
-       "HANDHELD",
-       "CHG_NOT_OK_INIT",
-       "CHG_NOT_OK",
-       "HW_TEMP_PROTECT_INIT",
-       "HW_TEMP_PROTECT",
-       "NORMAL_INIT",
-       "NORMAL",
-       "WAIT_FOR_RECHARGE_INIT",
-       "WAIT_FOR_RECHARGE",
-       "MAINTENANCE_A_INIT",
-       "MAINTENANCE_A",
-       "MAINTENANCE_B_INIT",
-       "MAINTENANCE_B",
-       "TEMP_UNDEROVER_INIT",
-       "TEMP_UNDEROVER",
-       "TEMP_LOWHIGH_INIT",
-       "TEMP_LOWHIGH",
-       "SUSPENDED_INIT",
-       "SUSPENDED",
-       "OVV_PROTECT_INIT",
-       "OVV_PROTECT",
-       "SAFETY_TIMER_EXPIRED_INIT",
-       "SAFETY_TIMER_EXPIRED",
-       "BATT_REMOVED_INIT",
-       "BATT_REMOVED",
-       "WD_EXPIRED_INIT",
-       "WD_EXPIRED",
-};
-
-struct abx500_chargalg_events {
-       bool batt_unknown;
-       bool mainextchnotok;
-       bool batt_ovv;
-       bool batt_rem;
-       bool btemp_underover;
-       bool btemp_lowhigh;
-       bool main_thermal_prot;
-       bool usb_thermal_prot;
-       bool main_ovv;
-       bool vbus_ovv;
-       bool usbchargernotok;
-       bool safety_timer_expired;
-       bool maintenance_timer_expired;
-       bool ac_wd_expired;
-       bool usb_wd_expired;
-       bool ac_cv_active;
-       bool usb_cv_active;
-       bool vbus_collapsed;
-};
-
-/**
- * struct abx500_charge_curr_maximization - Charger maximization parameters
- * @original_iset:     the non optimized/maximised charger current
- * @current_iset:      the charging current used at this moment
- * @test_delta_i:      the delta between the current we want to charge and the
-                       current that is really going into the battery
- * @condition_cnt:     number of iterations needed before a new charger current
-                       is set
- * @max_current:       maximum charger current
- * @wait_cnt:          to avoid too fast current step down in case of charger
- *                     voltage collapse, we insert this delay between step
- *                     down
- * @level:             tells in how many steps the charging current has been
-                       increased
- */
-struct abx500_charge_curr_maximization {
-       int original_iset;
-       int current_iset;
-       int test_delta_i;
-       int condition_cnt;
-       int max_current;
-       int wait_cnt;
-       u8 level;
-};
-
-enum maxim_ret {
-       MAXIM_RET_NOACTION,
-       MAXIM_RET_CHANGE,
-       MAXIM_RET_IBAT_TOO_HIGH,
-};
-
-/**
- * struct abx500_chargalg - abx500 Charging algorithm device information
- * @dev:               pointer to the structure device
- * @charge_status:     battery operating status
- * @eoc_cnt:           counter used to determine end-of_charge
- * @maintenance_chg:   indicate if maintenance charge is active
- * @t_hyst_norm                temperature hysteresis when the temperature has been
- *                     over or under normal limits
- * @t_hyst_lowhigh     temperature hysteresis when the temperature has been
- *                     over or under the high or low limits
- * @charge_state:      current state of the charging algorithm
- * @ccm                        charging current maximization parameters
- * @chg_info:          information about connected charger types
- * @batt_data:         data of the battery
- * @susp_status:       current charger suspension status
- * @bm:                Platform specific battery management information
- * @curr_status:       Current step status for over-current protection
- * @parent:            pointer to the struct abx500
- * @chargalg_psy:      structure that holds the battery properties exposed by
- *                     the charging algorithm
- * @events:            structure for information about events triggered
- * @chargalg_wq:               work queue for running the charging algorithm
- * @chargalg_periodic_work:    work to run the charging algorithm periodically
- * @chargalg_wd_work:          work to kick the charger watchdog periodically
- * @chargalg_work:             work to run the charging algorithm instantly
- * @safety_timer:              charging safety timer
- * @maintenance_timer:         maintenance charging timer
- * @chargalg_kobject:          structure of type kobject
- */
-struct abx500_chargalg {
-       struct device *dev;
-       int charge_status;
-       int eoc_cnt;
-       bool maintenance_chg;
-       int t_hyst_norm;
-       int t_hyst_lowhigh;
-       enum abx500_chargalg_states charge_state;
-       struct abx500_charge_curr_maximization ccm;
-       struct abx500_chargalg_charger_info chg_info;
-       struct abx500_chargalg_battery_data batt_data;
-       struct abx500_chargalg_suspension_status susp_status;
-       struct ab8500 *parent;
-       struct abx500_chargalg_current_step_status curr_status;
-       struct abx500_bm_data *bm;
-       struct power_supply *chargalg_psy;
-       struct ux500_charger *ac_chg;
-       struct ux500_charger *usb_chg;
-       struct abx500_chargalg_events events;
-       struct workqueue_struct *chargalg_wq;
-       struct delayed_work chargalg_periodic_work;
-       struct delayed_work chargalg_wd_work;
-       struct work_struct chargalg_work;
-       struct hrtimer safety_timer;
-       struct hrtimer maintenance_timer;
-       struct kobject chargalg_kobject;
-};
-
-/*External charger prepare notifier*/
-BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
-
-/* Main battery properties */
-static enum power_supply_property abx500_chargalg_props[] = {
-       POWER_SUPPLY_PROP_STATUS,
-       POWER_SUPPLY_PROP_HEALTH,
-};
-
-struct abx500_chargalg_sysfs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct abx500_chargalg *, char *);
-       ssize_t (*store)(struct abx500_chargalg *, const char *, size_t);
-};
-
-/**
- * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
- * @timer:     pointer to the hrtimer structure
- *
- * This function gets called when the safety timer for the charger
- * expires
- */
-static enum hrtimer_restart
-abx500_chargalg_safety_timer_expired(struct hrtimer *timer)
-{
-       struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
-                                                 safety_timer);
-       dev_err(di->dev, "Safety timer expired\n");
-       di->events.safety_timer_expired = true;
-
-       /* Trigger execution of the algorithm instantly */
-       queue_work(di->chargalg_wq, &di->chargalg_work);
-
-       return HRTIMER_NORESTART;
-}
-
-/**
- * abx500_chargalg_maintenance_timer_expired() - Expiration of
- * the maintenance timer
- * @timer:     pointer to the timer structure
- *
- * This function gets called when the maintenence timer
- * expires
- */
-static enum hrtimer_restart
-abx500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
-{
-
-       struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
-                                                 maintenance_timer);
-
-       dev_dbg(di->dev, "Maintenance timer expired\n");
-       di->events.maintenance_timer_expired = true;
-
-       /* Trigger execution of the algorithm instantly */
-       queue_work(di->chargalg_wq, &di->chargalg_work);
-
-       return HRTIMER_NORESTART;
-}
-
-/**
- * abx500_chargalg_state_to() - Change charge state
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function gets called when a charge state change should occur
- */
-static void abx500_chargalg_state_to(struct abx500_chargalg *di,
-       enum abx500_chargalg_states state)
-{
-       dev_dbg(di->dev,
-               "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
-               di->charge_state == state ? "NO" : "YES",
-               di->charge_state,
-               states[di->charge_state],
-               state,
-               states[state]);
-
-       di->charge_state = state;
-}
-
-static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
-{
-       switch (di->charge_state) {
-       case STATE_NORMAL:
-       case STATE_MAINTENANCE_A:
-       case STATE_MAINTENANCE_B:
-               break;
-       default:
-               return 0;
-       }
-
-       if (di->chg_info.charger_type & USB_CHG) {
-               return di->usb_chg->ops.check_enable(di->usb_chg,
-                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
-                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
-       } else if ((di->chg_info.charger_type & AC_CHG) &&
-                  !(di->ac_chg->external)) {
-               return di->ac_chg->ops.check_enable(di->ac_chg,
-                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
-                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
-       }
-       return 0;
-}
-
-/**
- * abx500_chargalg_check_charger_connection() - Check charger connection change
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function will check if there is a change in the charger connection
- * and change charge state accordingly. AC has precedence over USB.
- */
-static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
-{
-       if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
-               di->susp_status.suspended_change) {
-               /*
-                * Charger state changed or suspension
-                * has changed since last update
-                */
-               if ((di->chg_info.conn_chg & AC_CHG) &&
-                       !di->susp_status.ac_suspended) {
-                       dev_dbg(di->dev, "Charging source is AC\n");
-                       if (di->chg_info.charger_type != AC_CHG) {
-                               di->chg_info.charger_type = AC_CHG;
-                               abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-                       }
-               } else if ((di->chg_info.conn_chg & USB_CHG) &&
-                       !di->susp_status.usb_suspended) {
-                       dev_dbg(di->dev, "Charging source is USB\n");
-                       di->chg_info.charger_type = USB_CHG;
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               } else if (di->chg_info.conn_chg &&
-                       (di->susp_status.ac_suspended ||
-                       di->susp_status.usb_suspended)) {
-                       dev_dbg(di->dev, "Charging is suspended\n");
-                       di->chg_info.charger_type = NO_CHG;
-                       abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
-               } else {
-                       dev_dbg(di->dev, "Charging source is OFF\n");
-                       di->chg_info.charger_type = NO_CHG;
-                       abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
-               }
-               di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
-               di->susp_status.suspended_change = false;
-       }
-       return di->chg_info.conn_chg;
-}
-
-/**
- * abx500_chargalg_check_current_step_status() - Check charging current
- * step status.
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function will check if there is a change in the charging current step
- * and change charge state accordingly.
- */
-static void abx500_chargalg_check_current_step_status
-       (struct abx500_chargalg *di)
-{
-       if (di->curr_status.curr_step_change)
-               abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-       di->curr_status.curr_step_change = false;
-}
-
-/**
- * abx500_chargalg_start_safety_timer() - Start charging safety timer
- * @di:                pointer to the abx500_chargalg structure
- *
- * The safety timer is used to avoid overcharging of old or bad batteries.
- * There are different timers for AC and USB
- */
-static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
-{
-       /* Charger-dependent expiration time in hours*/
-       int timer_expiration = 0;
-
-       switch (di->chg_info.charger_type) {
-       case AC_CHG:
-               timer_expiration = di->bm->main_safety_tmr_h;
-               break;
-
-       case USB_CHG:
-               timer_expiration = di->bm->usb_safety_tmr_h;
-               break;
-
-       default:
-               dev_err(di->dev, "Unknown charger to charge from\n");
-               break;
-       }
-
-       di->events.safety_timer_expired = false;
-       hrtimer_set_expires_range(&di->safety_timer,
-               ktime_set(timer_expiration * ONE_HOUR_IN_SECONDS, 0),
-               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
-       hrtimer_start_expires(&di->safety_timer, HRTIMER_MODE_REL);
-}
-
-/**
- * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
- * @di:                pointer to the abx500_chargalg structure
- *
- * The safety timer is stopped whenever the NORMAL state is exited
- */
-static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
-{
-       if (hrtimer_try_to_cancel(&di->safety_timer) >= 0)
-               di->events.safety_timer_expired = false;
-}
-
-/**
- * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
- * @di:                pointer to the abx500_chargalg structure
- * @duration:  duration of ther maintenance timer in hours
- *
- * The maintenance timer is used to maintain the charge in the battery once
- * the battery is considered full. These timers are chosen to match the
- * discharge curve of the battery
- */
-static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
-       int duration)
-{
-       hrtimer_set_expires_range(&di->maintenance_timer,
-               ktime_set(duration * ONE_HOUR_IN_SECONDS, 0),
-               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
-       di->events.maintenance_timer_expired = false;
-       hrtimer_start_expires(&di->maintenance_timer, HRTIMER_MODE_REL);
-}
-
-/**
- * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
- * @di:                pointer to the abx500_chargalg structure
- *
- * The maintenance timer is stopped whenever maintenance ends or when another
- * state is entered
- */
-static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
-{
-       if (hrtimer_try_to_cancel(&di->maintenance_timer) >= 0)
-               di->events.maintenance_timer_expired = false;
-}
-
-/**
- * abx500_chargalg_kick_watchdog() - Kick charger watchdog
- * @di:                pointer to the abx500_chargalg structure
- *
- * The charger watchdog have to be kicked periodically whenever the charger is
- * on, else the ABB will reset the system
- */
-static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
-{
-       /* Check if charger exists and kick watchdog if charging */
-       if (di->ac_chg && di->ac_chg->ops.kick_wd &&
-           di->chg_info.online_chg & AC_CHG) {
-               /*
-                * If AB charger watchdog expired, pm2xxx charging
-                * gets disabled. To be safe, kick both AB charger watchdog
-                * and pm2xxx watchdog.
-                */
-               if (di->ac_chg->external &&
-                   di->usb_chg && di->usb_chg->ops.kick_wd)
-                       di->usb_chg->ops.kick_wd(di->usb_chg);
-
-               return di->ac_chg->ops.kick_wd(di->ac_chg);
-       }
-       else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
-                       di->chg_info.online_chg & USB_CHG)
-               return di->usb_chg->ops.kick_wd(di->usb_chg);
-
-       return -ENXIO;
-}
-
-/**
- * abx500_chargalg_ac_en() - Turn on/off the AC charger
- * @di:                pointer to the abx500_chargalg structure
- * @enable:    charger on/off
- * @vset:      requested charger output voltage
- * @iset:      requested charger output current
- *
- * The AC charger will be turned on/off with the requested charge voltage and
- * current
- */
-static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
-       int vset, int iset)
-{
-       static int abx500_chargalg_ex_ac_enable_toggle;
-
-       if (!di->ac_chg || !di->ac_chg->ops.enable)
-               return -ENXIO;
-
-       /* Select maximum of what both the charger and the battery supports */
-       if (di->ac_chg->max_out_volt)
-               vset = min(vset, di->ac_chg->max_out_volt);
-       if (di->ac_chg->max_out_curr)
-               iset = min(iset, di->ac_chg->max_out_curr);
-
-       di->chg_info.ac_iset = iset;
-       di->chg_info.ac_vset = vset;
-
-       /* Enable external charger */
-       if (enable && di->ac_chg->external &&
-           !abx500_chargalg_ex_ac_enable_toggle) {
-               blocking_notifier_call_chain(&charger_notifier_list,
-                                            0, di->dev);
-               abx500_chargalg_ex_ac_enable_toggle++;
-       }
-
-       return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
-}
-
-/**
- * abx500_chargalg_usb_en() - Turn on/off the USB charger
- * @di:                pointer to the abx500_chargalg structure
- * @enable:    charger on/off
- * @vset:      requested charger output voltage
- * @iset:      requested charger output current
- *
- * The USB charger will be turned on/off with the requested charge voltage and
- * current
- */
-static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
-       int vset, int iset)
-{
-       if (!di->usb_chg || !di->usb_chg->ops.enable)
-               return -ENXIO;
-
-       /* Select maximum of what both the charger and the battery supports */
-       if (di->usb_chg->max_out_volt)
-               vset = min(vset, di->usb_chg->max_out_volt);
-       if (di->usb_chg->max_out_curr)
-               iset = min(iset, di->usb_chg->max_out_curr);
-
-       di->chg_info.usb_iset = iset;
-       di->chg_info.usb_vset = vset;
-
-       return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
-}
-
-/**
- * abx500_chargalg_update_chg_curr() - Update charger current
- * @di:                pointer to the abx500_chargalg structure
- * @iset:      requested charger output current
- *
- * The charger output current will be updated for the charger
- * that is currently in use
- */
-static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
-               int iset)
-{
-       /* Check if charger exists and update current if charging */
-       if (di->ac_chg && di->ac_chg->ops.update_curr &&
-                       di->chg_info.charger_type & AC_CHG) {
-               /*
-                * Select maximum of what both the charger
-                * and the battery supports
-                */
-               if (di->ac_chg->max_out_curr)
-                       iset = min(iset, di->ac_chg->max_out_curr);
-
-               di->chg_info.ac_iset = iset;
-
-               return di->ac_chg->ops.update_curr(di->ac_chg, iset);
-       } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
-                       di->chg_info.charger_type & USB_CHG) {
-               /*
-                * Select maximum of what both the charger
-                * and the battery supports
-                */
-               if (di->usb_chg->max_out_curr)
-                       iset = min(iset, di->usb_chg->max_out_curr);
-
-               di->chg_info.usb_iset = iset;
-
-               return di->usb_chg->ops.update_curr(di->usb_chg, iset);
-       }
-
-       return -ENXIO;
-}
-
-/**
- * abx500_chargalg_stop_charging() - Stop charging
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function is called from any state where charging should be stopped.
- * All charging is disabled and all status parameters and timers are changed
- * accordingly
- */
-static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
-{
-       abx500_chargalg_ac_en(di, false, 0, 0);
-       abx500_chargalg_usb_en(di, false, 0, 0);
-       abx500_chargalg_stop_safety_timer(di);
-       abx500_chargalg_stop_maintenance_timer(di);
-       di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-       di->maintenance_chg = false;
-       cancel_delayed_work(&di->chargalg_wd_work);
-       power_supply_changed(di->chargalg_psy);
-}
-
-/**
- * abx500_chargalg_hold_charging() - Pauses charging
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function is called in the case where maintenance charging has been
- * disabled and instead a battery voltage mode is entered to check when the
- * battery voltage has reached a certain recharge voltage
- */
-static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
-{
-       abx500_chargalg_ac_en(di, false, 0, 0);
-       abx500_chargalg_usb_en(di, false, 0, 0);
-       abx500_chargalg_stop_safety_timer(di);
-       abx500_chargalg_stop_maintenance_timer(di);
-       di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
-       di->maintenance_chg = false;
-       cancel_delayed_work(&di->chargalg_wd_work);
-       power_supply_changed(di->chargalg_psy);
-}
-
-/**
- * abx500_chargalg_start_charging() - Start the charger
- * @di:                pointer to the abx500_chargalg structure
- * @vset:      requested charger output voltage
- * @iset:      requested charger output current
- *
- * A charger will be enabled depending on the requested charger type that was
- * detected previously.
- */
-static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
-       int vset, int iset)
-{
-       switch (di->chg_info.charger_type) {
-       case AC_CHG:
-               dev_dbg(di->dev,
-                       "AC parameters: Vset %d, Ich %d\n", vset, iset);
-               abx500_chargalg_usb_en(di, false, 0, 0);
-               abx500_chargalg_ac_en(di, true, vset, iset);
-               break;
-
-       case USB_CHG:
-               dev_dbg(di->dev,
-                       "USB parameters: Vset %d, Ich %d\n", vset, iset);
-               abx500_chargalg_ac_en(di, false, 0, 0);
-               abx500_chargalg_usb_en(di, true, vset, iset);
-               break;
-
-       default:
-               dev_err(di->dev, "Unknown charger to charge from\n");
-               break;
-       }
-}
-
-/**
- * abx500_chargalg_check_temp() - Check battery temperature ranges
- * @di:                pointer to the abx500_chargalg structure
- *
- * The battery temperature is checked against the predefined limits and the
- * charge state is changed accordingly
- */
-static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
-{
-       if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
-               di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
-               /* Temp OK! */
-               di->events.btemp_underover = false;
-               di->events.btemp_lowhigh = false;
-               di->t_hyst_norm = 0;
-               di->t_hyst_lowhigh = 0;
-       } else {
-               if (((di->batt_data.temp >= di->bm->temp_high) &&
-                       (di->batt_data.temp <
-                               (di->bm->temp_over - di->t_hyst_lowhigh))) ||
-                       ((di->batt_data.temp >
-                               (di->bm->temp_under + di->t_hyst_lowhigh)) &&
-                       (di->batt_data.temp <= di->bm->temp_low))) {
-                       /* TEMP minor!!!!! */
-                       di->events.btemp_underover = false;
-                       di->events.btemp_lowhigh = true;
-                       di->t_hyst_norm = di->bm->temp_hysteresis;
-                       di->t_hyst_lowhigh = 0;
-               } else if (di->batt_data.temp <= di->bm->temp_under ||
-                       di->batt_data.temp >= di->bm->temp_over) {
-                       /* TEMP major!!!!! */
-                       di->events.btemp_underover = true;
-                       di->events.btemp_lowhigh = false;
-                       di->t_hyst_norm = 0;
-                       di->t_hyst_lowhigh = di->bm->temp_hysteresis;
-               } else {
-               /* Within hysteresis */
-               dev_dbg(di->dev, "Within hysteresis limit temp: %d "
-                               "hyst_lowhigh %d, hyst normal %d\n",
-                               di->batt_data.temp, di->t_hyst_lowhigh,
-                               di->t_hyst_norm);
-               }
-       }
-}
-
-/**
- * abx500_chargalg_check_charger_voltage() - Check charger voltage
- * @di:                pointer to the abx500_chargalg structure
- *
- * Charger voltage is checked against maximum limit
- */
-static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
-{
-       if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
-               di->chg_info.usb_chg_ok = false;
-       else
-               di->chg_info.usb_chg_ok = true;
-
-       if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
-               di->chg_info.ac_chg_ok = false;
-       else
-               di->chg_info.ac_chg_ok = true;
-
-}
-
-/**
- * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
- * @di:                pointer to the abx500_chargalg structure
- *
- * End-of-charge criteria is fulfilled when the battery voltage is above a
- * certain limit and the battery current is below a certain limit for a
- * predefined number of consecutive seconds. If true, the battery is full
- */
-static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
-{
-       if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
-               di->charge_state == STATE_NORMAL &&
-               !di->maintenance_chg && (di->batt_data.volt >=
-               di->bm->bat_type[di->bm->batt_id].termination_vol ||
-               di->events.usb_cv_active || di->events.ac_cv_active) &&
-               di->batt_data.avg_curr <
-               di->bm->bat_type[di->bm->batt_id].termination_curr &&
-               di->batt_data.avg_curr > 0) {
-               if (++di->eoc_cnt >= EOC_COND_CNT) {
-                       di->eoc_cnt = 0;
-                       di->charge_status = POWER_SUPPLY_STATUS_FULL;
-                       di->maintenance_chg = true;
-                       dev_dbg(di->dev, "EOC reached!\n");
-                       power_supply_changed(di->chargalg_psy);
-               } else {
-                       dev_dbg(di->dev,
-                               " EOC limit reached for the %d"
-                               " time, out of %d before EOC\n",
-                               di->eoc_cnt,
-                               EOC_COND_CNT);
-               }
-       } else {
-               di->eoc_cnt = 0;
-       }
-}
-
-static void init_maxim_chg_curr(struct abx500_chargalg *di)
-{
-       di->ccm.original_iset =
-               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
-       di->ccm.current_iset =
-               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
-       di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
-       di->ccm.max_current = di->bm->maxi->chg_curr;
-       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-       di->ccm.level = 0;
-}
-
-/**
- * abx500_chargalg_chg_curr_maxim - increases the charger current to
- *                     compensate for the system load
- * @di         pointer to the abx500_chargalg structure
- *
- * This maximization function is used to raise the charger current to get the
- * battery current as close to the optimal value as possible. The battery
- * current during charging is affected by the system load
- */
-static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
-{
-       int delta_i;
-
-       if (!di->bm->maxi->ena_maxi)
-               return MAXIM_RET_NOACTION;
-
-       delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
-
-       if (di->events.vbus_collapsed) {
-               dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
-                               di->ccm.wait_cnt);
-               if (di->ccm.wait_cnt == 0) {
-                       dev_dbg(di->dev, "lowering current\n");
-                       di->ccm.wait_cnt++;
-                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-                       di->ccm.max_current =
-                               di->ccm.current_iset - di->ccm.test_delta_i;
-                       di->ccm.current_iset = di->ccm.max_current;
-                       di->ccm.level--;
-                       return MAXIM_RET_CHANGE;
-               } else {
-                       dev_dbg(di->dev, "waiting\n");
-                       /* Let's go in here twice before lowering curr again */
-                       di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
-                       return MAXIM_RET_NOACTION;
-               }
-       }
-
-       di->ccm.wait_cnt = 0;
-
-       if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
-               dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
-                       " (limit %dmA) (current iset: %dmA)!\n",
-                       di->batt_data.inst_curr, di->ccm.original_iset,
-                       di->ccm.current_iset);
-
-               if (di->ccm.current_iset == di->ccm.original_iset)
-                       return MAXIM_RET_NOACTION;
-
-               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-               di->ccm.current_iset = di->ccm.original_iset;
-               di->ccm.level = 0;
-
-               return MAXIM_RET_IBAT_TOO_HIGH;
-       }
-
-       if (delta_i > di->ccm.test_delta_i &&
-               (di->ccm.current_iset + di->ccm.test_delta_i) <
-               di->ccm.max_current) {
-               if (di->ccm.condition_cnt-- == 0) {
-                       /* Increse the iset with cco.test_delta_i */
-                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-                       di->ccm.current_iset += di->ccm.test_delta_i;
-                       di->ccm.level++;
-                       dev_dbg(di->dev, " Maximization needed, increase"
-                               " with %d mA to %dmA (Optimal ibat: %d)"
-                               " Level %d\n",
-                               di->ccm.test_delta_i,
-                               di->ccm.current_iset,
-                               di->ccm.original_iset,
-                               di->ccm.level);
-                       return MAXIM_RET_CHANGE;
-               } else {
-                       return MAXIM_RET_NOACTION;
-               }
-       }  else {
-               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-               return MAXIM_RET_NOACTION;
-       }
-}
-
-static void handle_maxim_chg_curr(struct abx500_chargalg *di)
-{
-       enum maxim_ret ret;
-       int result;
-
-       ret = abx500_chargalg_chg_curr_maxim(di);
-       switch (ret) {
-       case MAXIM_RET_CHANGE:
-               result = abx500_chargalg_update_chg_curr(di,
-                       di->ccm.current_iset);
-               if (result)
-                       dev_err(di->dev, "failed to set chg curr\n");
-               break;
-       case MAXIM_RET_IBAT_TOO_HIGH:
-               result = abx500_chargalg_update_chg_curr(di,
-                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
-               if (result)
-                       dev_err(di->dev, "failed to set chg curr\n");
-               break;
-
-       case MAXIM_RET_NOACTION:
-       default:
-               /* Do nothing..*/
-               break;
-       }
-}
-
-static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
-{
-       struct power_supply *psy;
-       struct power_supply *ext = dev_get_drvdata(dev);
-       const char **supplicants = (const char **)ext->supplied_to;
-       struct abx500_chargalg *di;
-       union power_supply_propval ret;
-       int j;
-       bool capacity_updated = false;
-
-       psy = (struct power_supply *)data;
-       di = power_supply_get_drvdata(psy);
-       /* For all psy where the driver name appears in any supplied_to */
-       j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
-       if (j < 0)
-               return 0;
-
-       /*
-        *  If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its
-        * property because of handling that sysfs entry on its own, this is
-        * the place to get the battery capacity.
-        */
-       if (!power_supply_get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) {
-               di->batt_data.percent = ret.intval;
-               capacity_updated = true;
-       }
-
-       /* Go through all properties for the psy */
-       for (j = 0; j < ext->desc->num_properties; j++) {
-               enum power_supply_property prop;
-               prop = ext->desc->properties[j];
-
-               /*
-                * Initialize chargers if not already done.
-                * The ab8500_charger*/
-               if (!di->ac_chg &&
-                       ext->desc->type == POWER_SUPPLY_TYPE_MAINS)
-                       di->ac_chg = psy_to_ux500_charger(ext);
-               else if (!di->usb_chg &&
-                       ext->desc->type == POWER_SUPPLY_TYPE_USB)
-                       di->usb_chg = psy_to_ux500_charger(ext);
-
-               if (power_supply_get_property(ext, prop, &ret))
-                       continue;
-               switch (prop) {
-               case POWER_SUPPLY_PROP_PRESENT:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               /* Battery present */
-                               if (ret.intval)
-                                       di->events.batt_rem = false;
-                               /* Battery removed */
-                               else
-                                       di->events.batt_rem = true;
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               /* AC disconnected */
-                               if (!ret.intval &&
-                                       (di->chg_info.conn_chg & AC_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg &= ~AC_CHG;
-                               }
-                               /* AC connected */
-                               else if (ret.intval &&
-                                       !(di->chg_info.conn_chg & AC_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg |= AC_CHG;
-                               }
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               /* USB disconnected */
-                               if (!ret.intval &&
-                                       (di->chg_info.conn_chg & USB_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg &= ~USB_CHG;
-                               }
-                               /* USB connected */
-                               else if (ret.intval &&
-                                       !(di->chg_info.conn_chg & USB_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg |= USB_CHG;
-                               }
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_ONLINE:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               /* AC offline */
-                               if (!ret.intval &&
-                                       (di->chg_info.online_chg & AC_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg &= ~AC_CHG;
-                               }
-                               /* AC online */
-                               else if (ret.intval &&
-                                       !(di->chg_info.online_chg & AC_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg |= AC_CHG;
-                                       queue_delayed_work(di->chargalg_wq,
-                                               &di->chargalg_wd_work, 0);
-                               }
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               /* USB offline */
-                               if (!ret.intval &&
-                                       (di->chg_info.online_chg & USB_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg &= ~USB_CHG;
-                               }
-                               /* USB online */
-                               else if (ret.intval &&
-                                       !(di->chg_info.online_chg & USB_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg |= USB_CHG;
-                                       queue_delayed_work(di->chargalg_wq,
-                                               &di->chargalg_wd_work, 0);
-                               }
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_HEALTH:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               switch (ret.intval) {
-                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
-                                       di->events.mainextchnotok = true;
-                                       di->events.main_thermal_prot = false;
-                                       di->events.main_ovv = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_DEAD:
-                                       di->events.ac_wd_expired = true;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_ovv = false;
-                                       di->events.main_thermal_prot = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_COLD:
-                               case POWER_SUPPLY_HEALTH_OVERHEAT:
-                                       di->events.main_thermal_prot = true;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_ovv = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
-                                       di->events.main_ovv = true;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_thermal_prot = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_GOOD:
-                                       di->events.main_thermal_prot = false;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_ovv = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               default:
-                                       break;
-                               }
-                               break;
-
-                       case POWER_SUPPLY_TYPE_USB:
-                               switch (ret.intval) {
-                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
-                                       di->events.usbchargernotok = true;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.vbus_ovv = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_DEAD:
-                                       di->events.usb_wd_expired = true;
-                                       di->events.usbchargernotok = false;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.vbus_ovv = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_COLD:
-                               case POWER_SUPPLY_HEALTH_OVERHEAT:
-                                       di->events.usb_thermal_prot = true;
-                                       di->events.usbchargernotok = false;
-                                       di->events.vbus_ovv = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
-                                       di->events.vbus_ovv = true;
-                                       di->events.usbchargernotok = false;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_GOOD:
-                                       di->events.usbchargernotok = false;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.vbus_ovv = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               default:
-                                       break;
-                               }
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               di->batt_data.volt = ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               di->chg_info.ac_volt = ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               di->chg_info.usb_volt = ret.intval / 1000;
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_VOLTAGE_AVG:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               /* AVG is used to indicate when we are
-                                * in CV mode */
-                               if (ret.intval)
-                                       di->events.ac_cv_active = true;
-                               else
-                                       di->events.ac_cv_active = false;
-
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               /* AVG is used to indicate when we are
-                                * in CV mode */
-                               if (ret.intval)
-                                       di->events.usb_cv_active = true;
-                               else
-                                       di->events.usb_cv_active = false;
-
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_TECHNOLOGY:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               if (ret.intval)
-                                       di->events.batt_unknown = false;
-                               else
-                                       di->events.batt_unknown = true;
-
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_TEMP:
-                       di->batt_data.temp = ret.intval / 10;
-                       break;
-
-               case POWER_SUPPLY_PROP_CURRENT_NOW:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_MAINS:
-                                       di->chg_info.ac_curr =
-                                               ret.intval / 1000;
-                                       break;
-                       case POWER_SUPPLY_TYPE_USB:
-                                       di->chg_info.usb_curr =
-                                               ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               di->batt_data.inst_curr = ret.intval / 1000;
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_CURRENT_AVG:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               di->batt_data.avg_curr = ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               if (ret.intval)
-                                       di->events.vbus_collapsed = true;
-                               else
-                                       di->events.vbus_collapsed = false;
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-               case POWER_SUPPLY_PROP_CAPACITY:
-                       if (!capacity_updated)
-                               di->batt_data.percent = ret.intval;
-                       break;
-               default:
-                       break;
-               }
-       }
-       return 0;
-}
-
-/**
- * abx500_chargalg_external_power_changed() - callback for power supply changes
- * @psy:       pointer to the structure power_supply
- *
- * This function is the entry point of the pointer external_power_changed
- * of the structure power_supply.
- * This function gets executed when there is a change in any external power
- * supply that this driver needs to be notified of.
- */
-static void abx500_chargalg_external_power_changed(struct power_supply *psy)
-{
-       struct abx500_chargalg *di = power_supply_get_drvdata(psy);
-
-       /*
-        * Trigger execution of the algorithm instantly and read
-        * all power_supply properties there instead
-        */
-       queue_work(di->chargalg_wq, &di->chargalg_work);
-}
-
-/**
- * abx500_chargalg_algorithm() - Main function for the algorithm
- * @di:                pointer to the abx500_chargalg structure
- *
- * This is the main control function for the charging algorithm.
- * It is called periodically or when something happens that will
- * trigger a state change
- */
-static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
-{
-       int charger_status;
-       int ret;
-       int curr_step_lvl;
-
-       /* Collect data from all power_supply class devices */
-       class_for_each_device(power_supply_class, NULL,
-               di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
-
-       abx500_chargalg_end_of_charge(di);
-       abx500_chargalg_check_temp(di);
-       abx500_chargalg_check_charger_voltage(di);
-
-       charger_status = abx500_chargalg_check_charger_connection(di);
-       abx500_chargalg_check_current_step_status(di);
-
-       if (is_ab8500(di->parent)) {
-               ret = abx500_chargalg_check_charger_enable(di);
-               if (ret < 0)
-                       dev_err(di->dev, "Checking charger is enabled error"
-                                       ": Returned Value %d\n", ret);
-       }
-
-       /*
-        * First check if we have a charger connected.
-        * Also we don't allow charging of unknown batteries if configured
-        * this way
-        */
-       if (!charger_status ||
-               (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
-               if (di->charge_state != STATE_HANDHELD) {
-                       di->events.safety_timer_expired = false;
-                       abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
-               }
-       }
-
-       /* If suspended, we should not continue checking the flags */
-       else if (di->charge_state == STATE_SUSPENDED_INIT ||
-               di->charge_state == STATE_SUSPENDED) {
-               /* We don't do anything here, just don,t continue */
-       }
-
-       /* Safety timer expiration */
-       else if (di->events.safety_timer_expired) {
-               if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
-                       abx500_chargalg_state_to(di,
-                               STATE_SAFETY_TIMER_EXPIRED_INIT);
-       }
-       /*
-        * Check if any interrupts has occured
-        * that will prevent us from charging
-        */
-
-       /* Battery removed */
-       else if (di->events.batt_rem) {
-               if (di->charge_state != STATE_BATT_REMOVED)
-                       abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
-       }
-       /* Main or USB charger not ok. */
-       else if (di->events.mainextchnotok || di->events.usbchargernotok) {
-               /*
-                * If vbus_collapsed is set, we have to lower the charger
-                * current, which is done in the normal state below
-                */
-               if (di->charge_state != STATE_CHG_NOT_OK &&
-                               !di->events.vbus_collapsed)
-                       abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
-       }
-       /* VBUS, Main or VBAT OVV. */
-       else if (di->events.vbus_ovv ||
-                       di->events.main_ovv ||
-                       di->events.batt_ovv ||
-                       !di->chg_info.usb_chg_ok ||
-                       !di->chg_info.ac_chg_ok) {
-               if (di->charge_state != STATE_OVV_PROTECT)
-                       abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
-       }
-       /* USB Thermal, stop charging */
-       else if (di->events.main_thermal_prot ||
-               di->events.usb_thermal_prot) {
-               if (di->charge_state != STATE_HW_TEMP_PROTECT)
-                       abx500_chargalg_state_to(di,
-                               STATE_HW_TEMP_PROTECT_INIT);
-       }
-       /* Battery temp over/under */
-       else if (di->events.btemp_underover) {
-               if (di->charge_state != STATE_TEMP_UNDEROVER)
-                       abx500_chargalg_state_to(di,
-                               STATE_TEMP_UNDEROVER_INIT);
-       }
-       /* Watchdog expired */
-       else if (di->events.ac_wd_expired ||
-               di->events.usb_wd_expired) {
-               if (di->charge_state != STATE_WD_EXPIRED)
-                       abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
-       }
-       /* Battery temp high/low */
-       else if (di->events.btemp_lowhigh) {
-               if (di->charge_state != STATE_TEMP_LOWHIGH)
-                       abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
-       }
-
-       dev_dbg(di->dev,
-               "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
-               "State %s Active_chg %d Chg_status %d AC %d USB %d "
-               "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
-               "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
-               di->batt_data.volt,
-               di->batt_data.avg_curr,
-               di->batt_data.inst_curr,
-               di->batt_data.temp,
-               di->batt_data.percent,
-               di->maintenance_chg,
-               states[di->charge_state],
-               di->chg_info.charger_type,
-               di->charge_status,
-               di->chg_info.conn_chg & AC_CHG,
-               di->chg_info.conn_chg & USB_CHG,
-               di->chg_info.online_chg & AC_CHG,
-               di->chg_info.online_chg & USB_CHG,
-               di->events.ac_cv_active,
-               di->events.usb_cv_active,
-               di->chg_info.ac_curr,
-               di->chg_info.usb_curr,
-               di->chg_info.ac_vset,
-               di->chg_info.ac_iset,
-               di->chg_info.usb_vset,
-               di->chg_info.usb_iset);
-
-       switch (di->charge_state) {
-       case STATE_HANDHELD_INIT:
-               abx500_chargalg_stop_charging(di);
-               di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
-               abx500_chargalg_state_to(di, STATE_HANDHELD);
-               fallthrough;
-
-       case STATE_HANDHELD:
-               break;
-
-       case STATE_SUSPENDED_INIT:
-               if (di->susp_status.ac_suspended)
-                       abx500_chargalg_ac_en(di, false, 0, 0);
-               if (di->susp_status.usb_suspended)
-                       abx500_chargalg_usb_en(di, false, 0, 0);
-               abx500_chargalg_stop_safety_timer(di);
-               abx500_chargalg_stop_maintenance_timer(di);
-               di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-               di->maintenance_chg = false;
-               abx500_chargalg_state_to(di, STATE_SUSPENDED);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_SUSPENDED:
-               /* CHARGING is suspended */
-               break;
-
-       case STATE_BATT_REMOVED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
-               fallthrough;
-
-       case STATE_BATT_REMOVED:
-               if (!di->events.batt_rem)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_HW_TEMP_PROTECT_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
-               fallthrough;
-
-       case STATE_HW_TEMP_PROTECT:
-               if (!di->events.main_thermal_prot &&
-                               !di->events.usb_thermal_prot)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_OVV_PROTECT_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
-               fallthrough;
-
-       case STATE_OVV_PROTECT:
-               if (!di->events.vbus_ovv &&
-                               !di->events.main_ovv &&
-                               !di->events.batt_ovv &&
-                               di->chg_info.usb_chg_ok &&
-                               di->chg_info.ac_chg_ok)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_CHG_NOT_OK_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
-               fallthrough;
-
-       case STATE_CHG_NOT_OK:
-               if (!di->events.mainextchnotok &&
-                               !di->events.usbchargernotok)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_SAFETY_TIMER_EXPIRED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
-               fallthrough;
-
-       case STATE_SAFETY_TIMER_EXPIRED:
-               /* We exit this state when charger is removed */
-               break;
-
-       case STATE_NORMAL_INIT:
-               if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
-                       abx500_chargalg_stop_charging(di);
-               else {
-                       curr_step_lvl = di->bm->bat_type[
-                               di->bm->batt_id].normal_cur_lvl
-                               * di->curr_status.curr_step
-                               / CHARGALG_CURR_STEP_HIGH;
-                       abx500_chargalg_start_charging(di,
-                               di->bm->bat_type[di->bm->batt_id]
-                               .normal_vol_lvl, curr_step_lvl);
-               }
-
-               abx500_chargalg_state_to(di, STATE_NORMAL);
-               abx500_chargalg_start_safety_timer(di);
-               abx500_chargalg_stop_maintenance_timer(di);
-               init_maxim_chg_curr(di);
-               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
-               di->eoc_cnt = 0;
-               di->maintenance_chg = false;
-               power_supply_changed(di->chargalg_psy);
-
-               break;
-
-       case STATE_NORMAL:
-               handle_maxim_chg_curr(di);
-               if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
-                       di->maintenance_chg) {
-                       if (di->bm->no_maintenance)
-                               abx500_chargalg_state_to(di,
-                                       STATE_WAIT_FOR_RECHARGE_INIT);
-                       else
-                               abx500_chargalg_state_to(di,
-                                       STATE_MAINTENANCE_A_INIT);
-               }
-               break;
-
-       /* This state will be used when the maintenance state is disabled */
-       case STATE_WAIT_FOR_RECHARGE_INIT:
-               abx500_chargalg_hold_charging(di);
-               abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
-               fallthrough;
-
-       case STATE_WAIT_FOR_RECHARGE:
-               if (di->batt_data.percent <=
-                   di->bm->bat_type[di->bm->batt_id].
-                   recharge_cap)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_MAINTENANCE_A_INIT:
-               abx500_chargalg_stop_safety_timer(di);
-               abx500_chargalg_start_maintenance_timer(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_a_chg_timer_h);
-               abx500_chargalg_start_charging(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_a_vol_lvl,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_a_cur_lvl);
-               abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_MAINTENANCE_A:
-               if (di->events.maintenance_timer_expired) {
-                       abx500_chargalg_stop_maintenance_timer(di);
-                       abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
-               }
-               break;
-
-       case STATE_MAINTENANCE_B_INIT:
-               abx500_chargalg_start_maintenance_timer(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_b_chg_timer_h);
-               abx500_chargalg_start_charging(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_b_vol_lvl,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_b_cur_lvl);
-               abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_MAINTENANCE_B:
-               if (di->events.maintenance_timer_expired) {
-                       abx500_chargalg_stop_maintenance_timer(di);
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               }
-               break;
-
-       case STATE_TEMP_LOWHIGH_INIT:
-               abx500_chargalg_start_charging(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].low_high_vol_lvl,
-                       di->bm->bat_type[
-                               di->bm->batt_id].low_high_cur_lvl);
-               abx500_chargalg_stop_maintenance_timer(di);
-               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
-               abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_TEMP_LOWHIGH:
-               if (!di->events.btemp_lowhigh)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_WD_EXPIRED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
-               fallthrough;
-
-       case STATE_WD_EXPIRED:
-               if (!di->events.ac_wd_expired &&
-                               !di->events.usb_wd_expired)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_TEMP_UNDEROVER_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
-               fallthrough;
-
-       case STATE_TEMP_UNDEROVER:
-               if (!di->events.btemp_underover)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-       }
-
-       /* Start charging directly if the new state is a charge state */
-       if (di->charge_state == STATE_NORMAL_INIT ||
-                       di->charge_state == STATE_MAINTENANCE_A_INIT ||
-                       di->charge_state == STATE_MAINTENANCE_B_INIT)
-               queue_work(di->chargalg_wq, &di->chargalg_work);
-}
-
-/**
- * abx500_chargalg_periodic_work() - Periodic work for the algorithm
- * @work:      pointer to the work_struct structure
- *
- * Work queue function for the charging algorithm
- */
-static void abx500_chargalg_periodic_work(struct work_struct *work)
-{
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_periodic_work.work);
-
-       abx500_chargalg_algorithm(di);
-
-       /*
-        * If a charger is connected then the battery has to be monitored
-        * frequently, else the work can be delayed.
-        */
-       if (di->chg_info.conn_chg)
-               queue_delayed_work(di->chargalg_wq,
-                       &di->chargalg_periodic_work,
-                       di->bm->interval_charging * HZ);
-       else
-               queue_delayed_work(di->chargalg_wq,
-                       &di->chargalg_periodic_work,
-                       di->bm->interval_not_charging * HZ);
-}
-
-/**
- * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
- * @work:      pointer to the work_struct structure
- *
- * Work queue function for kicking the charger watchdog
- */
-static void abx500_chargalg_wd_work(struct work_struct *work)
-{
-       int ret;
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_wd_work.work);
-
-       dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
-
-       ret = abx500_chargalg_kick_watchdog(di);
-       if (ret < 0)
-               dev_err(di->dev, "failed to kick watchdog\n");
-
-       queue_delayed_work(di->chargalg_wq,
-               &di->chargalg_wd_work, CHG_WD_INTERVAL);
-}
-
-/**
- * abx500_chargalg_work() - Work to run the charging algorithm instantly
- * @work:      pointer to the work_struct structure
- *
- * Work queue function for calling the charging algorithm
- */
-static void abx500_chargalg_work(struct work_struct *work)
-{
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_work);
-
-       abx500_chargalg_algorithm(di);
-}
-
-/**
- * abx500_chargalg_get_property() - get the chargalg properties
- * @psy:       pointer to the power_supply structure
- * @psp:       pointer to the power_supply_property structure
- * @val:       pointer to the power_supply_propval union
- *
- * This function gets called when an application tries to get the
- * chargalg properties by reading the sysfs files.
- * status:     charging/discharging/full/unknown
- * health:     health of the battery
- * Returns error code in case of failure else 0 on success
- */
-static int abx500_chargalg_get_property(struct power_supply *psy,
-       enum power_supply_property psp,
-       union power_supply_propval *val)
-{
-       struct abx500_chargalg *di = power_supply_get_drvdata(psy);
-
-       switch (psp) {
-       case POWER_SUPPLY_PROP_STATUS:
-               val->intval = di->charge_status;
-               break;
-       case POWER_SUPPLY_PROP_HEALTH:
-               if (di->events.batt_ovv) {
-                       val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-               } else if (di->events.btemp_underover) {
-                       if (di->batt_data.temp <= di->bm->temp_under)
-                               val->intval = POWER_SUPPLY_HEALTH_COLD;
-                       else
-                               val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
-               } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED ||
-                          di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) {
-                       val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
-               } else {
-                       val->intval = POWER_SUPPLY_HEALTH_GOOD;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-/* Exposure to the sysfs interface */
-
-static ssize_t abx500_chargalg_curr_step_show(struct abx500_chargalg *di,
-                                             char *buf)
-{
-       return sprintf(buf, "%d\n", di->curr_status.curr_step);
-}
-
-static ssize_t abx500_chargalg_curr_step_store(struct abx500_chargalg *di,
-                                              const char *buf, size_t length)
-{
-       long int param;
-       int ret;
-
-       ret = kstrtol(buf, 10, &param);
-       if (ret < 0)
-               return ret;
-
-       di->curr_status.curr_step = param;
-       if (di->curr_status.curr_step >= CHARGALG_CURR_STEP_LOW &&
-               di->curr_status.curr_step <= CHARGALG_CURR_STEP_HIGH) {
-               di->curr_status.curr_step_change = true;
-               queue_work(di->chargalg_wq, &di->chargalg_work);
-       } else
-               dev_info(di->dev, "Wrong current step\n"
-                       "Enter 0. Disable AC/USB Charging\n"
-                       "1--100. Set AC/USB charging current step\n"
-                       "100. Enable AC/USB Charging\n");
-
-       return strlen(buf);
-}
-
-
-static ssize_t abx500_chargalg_en_show(struct abx500_chargalg *di,
-                                      char *buf)
-{
-       return sprintf(buf, "%d\n",
-                      di->susp_status.ac_suspended &&
-                      di->susp_status.usb_suspended);
-}
-
-static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
-       const char *buf, size_t length)
-{
-       long int param;
-       int ac_usb;
-       int ret;
-
-       ret = kstrtol(buf, 10, &param);
-       if (ret < 0)
-               return ret;
-
-       ac_usb = param;
-       switch (ac_usb) {
-       case 0:
-               /* Disable charging */
-               di->susp_status.ac_suspended = true;
-               di->susp_status.usb_suspended = true;
-               di->susp_status.suspended_change = true;
-               /* Trigger a state change */
-               queue_work(di->chargalg_wq,
-                       &di->chargalg_work);
-               break;
-       case 1:
-               /* Enable AC Charging */
-               di->susp_status.ac_suspended = false;
-               di->susp_status.suspended_change = true;
-               /* Trigger a state change */
-               queue_work(di->chargalg_wq,
-                       &di->chargalg_work);
-               break;
-       case 2:
-               /* Enable USB charging */
-               di->susp_status.usb_suspended = false;
-               di->susp_status.suspended_change = true;
-               /* Trigger a state change */
-               queue_work(di->chargalg_wq,
-                       &di->chargalg_work);
-               break;
-       default:
-               dev_info(di->dev, "Wrong input\n"
-                       "Enter 0. Disable AC/USB Charging\n"
-                       "1. Enable AC charging\n"
-                       "2. Enable USB Charging\n");
-       }
-       return strlen(buf);
-}
-
-static struct abx500_chargalg_sysfs_entry abx500_chargalg_en_charger =
-       __ATTR(chargalg, 0644, abx500_chargalg_en_show,
-                               abx500_chargalg_en_store);
-
-static struct abx500_chargalg_sysfs_entry abx500_chargalg_curr_step =
-       __ATTR(chargalg_curr_step, 0644, abx500_chargalg_curr_step_show,
-                                       abx500_chargalg_curr_step_store);
-
-static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
-       struct attribute *attr, char *buf)
-{
-       struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
-               struct abx500_chargalg_sysfs_entry, attr);
-
-       struct abx500_chargalg *di = container_of(kobj,
-               struct abx500_chargalg, chargalg_kobject);
-
-       if (!entry->show)
-               return -EIO;
-
-       return entry->show(di, buf);
-}
-
-static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
-       struct attribute *attr, const char *buf, size_t length)
-{
-       struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
-               struct abx500_chargalg_sysfs_entry, attr);
-
-       struct abx500_chargalg *di = container_of(kobj,
-               struct abx500_chargalg, chargalg_kobject);
-
-       if (!entry->store)
-               return -EIO;
-
-       return entry->store(di, buf, length);
-}
-
-static struct attribute *abx500_chargalg_chg[] = {
-       &abx500_chargalg_en_charger.attr,
-       &abx500_chargalg_curr_step.attr,
-       NULL,
-};
-
-static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
-       .show = abx500_chargalg_sysfs_show,
-       .store = abx500_chargalg_sysfs_charger,
-};
-
-static struct kobj_type abx500_chargalg_ktype = {
-       .sysfs_ops = &abx500_chargalg_sysfs_ops,
-       .default_attrs = abx500_chargalg_chg,
-};
-
-/**
- * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
- * @di:                pointer to the struct abx500_chargalg
- *
- * This function removes the entry in sysfs.
- */
-static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
-{
-       kobject_del(&di->chargalg_kobject);
-}
-
-/**
- * abx500_chargalg_sysfs_init() - init of sysfs entry
- * @di:                pointer to the struct abx500_chargalg
- *
- * This function adds an entry in sysfs.
- * Returns error code in case of failure else 0(on success)
- */
-static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
-{
-       int ret = 0;
-
-       ret = kobject_init_and_add(&di->chargalg_kobject,
-               &abx500_chargalg_ktype,
-               NULL, "abx500_chargalg");
-       if (ret < 0)
-               dev_err(di->dev, "failed to create sysfs entry\n");
-
-       return ret;
-}
-/* Exposure to the sysfs interface <<END>> */
-
-static int __maybe_unused abx500_chargalg_resume(struct device *dev)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       /* Kick charger watchdog if charging (any charger online) */
-       if (di->chg_info.online_chg)
-               queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
-
-       /*
-        * Run the charging algorithm directly to be sure we don't
-        * do it too seldom
-        */
-       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
-
-       return 0;
-}
-
-static int __maybe_unused abx500_chargalg_suspend(struct device *dev)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       if (di->chg_info.online_chg)
-               cancel_delayed_work_sync(&di->chargalg_wd_work);
-
-       cancel_delayed_work_sync(&di->chargalg_periodic_work);
-
-       return 0;
-}
-
-static char *supply_interface[] = {
-       "ab8500_fg",
-};
-
-static const struct power_supply_desc abx500_chargalg_desc = {
-       .name                   = "abx500_chargalg",
-       .type                   = POWER_SUPPLY_TYPE_BATTERY,
-       .properties             = abx500_chargalg_props,
-       .num_properties         = ARRAY_SIZE(abx500_chargalg_props),
-       .get_property           = abx500_chargalg_get_property,
-       .external_power_changed = abx500_chargalg_external_power_changed,
-};
-
-static int abx500_chargalg_bind(struct device *dev, struct device *master,
-                               void *data)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       /* Create a work queue for the chargalg */
-       di->chargalg_wq = alloc_ordered_workqueue("abx500_chargalg_wq",
-                                                 WQ_MEM_RECLAIM);
-       if (di->chargalg_wq == NULL) {
-               dev_err(di->dev, "failed to create work queue\n");
-               return -ENOMEM;
-       }
-
-       /* Run the charging algorithm */
-       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
-
-       return 0;
-}
-
-static void abx500_chargalg_unbind(struct device *dev, struct device *master,
-                                  void *data)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       /* Stop all timers and work */
-       hrtimer_cancel(&di->safety_timer);
-       hrtimer_cancel(&di->maintenance_timer);
-
-       cancel_delayed_work_sync(&di->chargalg_periodic_work);
-       cancel_delayed_work_sync(&di->chargalg_wd_work);
-       cancel_work_sync(&di->chargalg_work);
-
-       /* Delete the work queue */
-       destroy_workqueue(di->chargalg_wq);
-       flush_scheduled_work();
-}
-
-static const struct component_ops abx500_chargalg_component_ops = {
-       .bind = abx500_chargalg_bind,
-       .unbind = abx500_chargalg_unbind,
-};
-
-static int abx500_chargalg_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct power_supply_config psy_cfg = {};
-       struct abx500_chargalg *di;
-       int ret = 0;
-
-       di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
-       if (!di)
-               return -ENOMEM;
-
-       di->bm = &ab8500_bm_data;
-
-       /* get device struct and parent */
-       di->dev = dev;
-       di->parent = dev_get_drvdata(pdev->dev.parent);
-
-       psy_cfg.supplied_to = supply_interface;
-       psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
-       psy_cfg.drv_data = di;
-
-       /* Initilialize safety timer */
-       hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-       di->safety_timer.function = abx500_chargalg_safety_timer_expired;
-
-       /* Initilialize maintenance timer */
-       hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-       di->maintenance_timer.function =
-               abx500_chargalg_maintenance_timer_expired;
-
-       /* Init work for chargalg */
-       INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
-               abx500_chargalg_periodic_work);
-       INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
-               abx500_chargalg_wd_work);
-
-       /* Init work for chargalg */
-       INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
-
-       /* To detect charger at startup */
-       di->chg_info.prev_conn_chg = -1;
-
-       /* Register chargalg power supply class */
-       di->chargalg_psy = devm_power_supply_register(di->dev,
-                                                &abx500_chargalg_desc,
-                                                &psy_cfg);
-       if (IS_ERR(di->chargalg_psy)) {
-               dev_err(di->dev, "failed to register chargalg psy\n");
-               return PTR_ERR(di->chargalg_psy);
-       }
-
-       platform_set_drvdata(pdev, di);
-
-       /* sysfs interface to enable/disable charging from user space */
-       ret = abx500_chargalg_sysfs_init(di);
-       if (ret) {
-               dev_err(di->dev, "failed to create sysfs entry\n");
-               return ret;
-       }
-       di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
-
-       dev_info(di->dev, "probe success\n");
-       return component_add(dev, &abx500_chargalg_component_ops);
-}
-
-static int abx500_chargalg_remove(struct platform_device *pdev)
-{
-       struct abx500_chargalg *di = platform_get_drvdata(pdev);
-
-       component_del(&pdev->dev, &abx500_chargalg_component_ops);
-
-       /* sysfs interface to enable/disable charging from user space */
-       abx500_chargalg_sysfs_exit(di);
-
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(abx500_chargalg_pm_ops, abx500_chargalg_suspend, abx500_chargalg_resume);
-
-static const struct of_device_id ab8500_chargalg_match[] = {
-       { .compatible = "stericsson,ab8500-chargalg", },
-       { },
-};
-
-struct platform_driver abx500_chargalg_driver = {
-       .probe = abx500_chargalg_probe,
-       .remove = abx500_chargalg_remove,
-       .driver = {
-               .name = "ab8500-chargalg",
-               .of_match_table = ab8500_chargalg_match,
-               .pm = &abx500_chargalg_pm_ops,
-       },
-};
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
-MODULE_ALIAS("platform:abx500-chargalg");
-MODULE_DESCRIPTION("abx500 battery charging algorithm");
index a4df1ea..b9553be 100644 (file)
@@ -813,7 +813,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
        if (val == 0)
                return -ENODEV;
 
-       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -823,7 +823,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
 
        info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
        if (info->cable.edev == NULL) {
-               dev_dbg(&pdev->dev, "%s is not ready, probe deferred\n",
+               dev_dbg(dev, "%s is not ready, probe deferred\n",
                        AXP288_EXTCON_DEV_NAME);
                return -EPROBE_DEFER;
        }
@@ -834,8 +834,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
                        dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
                        return -EPROBE_DEFER;
                }
-               dev_info(&pdev->dev,
-                        "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
+               dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
        }
 
        platform_set_drvdata(pdev, info);
@@ -874,7 +873,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
        INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
        info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
        if (info->otg.cable) {
-               ret = devm_extcon_register_notifier(&pdev->dev, info->otg.cable,
+               ret = devm_extcon_register_notifier(dev, info->otg.cable,
                                        EXTCON_USB_HOST, &info->otg.id_nb);
                if (ret) {
                        dev_err(dev, "failed to register EXTCON_USB_HOST notifier\n");
@@ -899,7 +898,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
                                        NULL, axp288_charger_irq_thread_handler,
                                        IRQF_ONESHOT, info->pdev->name, info);
                if (ret) {
-                       dev_err(&pdev->dev, "failed to request interrupt=%d\n",
+                       dev_err(dev, "failed to request interrupt=%d\n",
                                                                info->irq[i]);
                        return ret;
                }
index 2ba2d8d..c1da217 100644 (file)
@@ -2,7 +2,8 @@
 /*
  * axp288_fuel_gauge.c - Xpower AXP288 PMIC Fuel Gauge Driver
  *
- * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2020-2021 Andrejus Basovas <xxx@yyy.tld>
+ * Copyright (C) 2016-2021 Hans de Goede <hdegoede@redhat.com>
  * Copyright (C) 2014 Intel Corporation
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/iio/consumer.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
 #include <asm/unaligned.h>
+#include <asm/iosf_mbi.h>
 
-#define PS_STAT_VBUS_TRIGGER           (1 << 0)
-#define PS_STAT_BAT_CHRG_DIR           (1 << 2)
-#define PS_STAT_VBAT_ABOVE_VHOLD       (1 << 3)
-#define PS_STAT_VBUS_VALID             (1 << 4)
-#define PS_STAT_VBUS_PRESENT           (1 << 5)
+#define PS_STAT_VBUS_TRIGGER                   (1 << 0)
+#define PS_STAT_BAT_CHRG_DIR                   (1 << 2)
+#define PS_STAT_VBAT_ABOVE_VHOLD               (1 << 3)
+#define PS_STAT_VBUS_VALID                     (1 << 4)
+#define PS_STAT_VBUS_PRESENT                   (1 << 5)
 
-#define CHRG_STAT_BAT_SAFE_MODE                (1 << 3)
+#define CHRG_STAT_BAT_SAFE_MODE                        (1 << 3)
 #define CHRG_STAT_BAT_VALID                    (1 << 4)
-#define CHRG_STAT_BAT_PRESENT          (1 << 5)
+#define CHRG_STAT_BAT_PRESENT                  (1 << 5)
 #define CHRG_STAT_CHARGING                     (1 << 6)
 #define CHRG_STAT_PMIC_OTP                     (1 << 7)
 
 #define CHRG_CCCV_CC_MASK                      0xf     /* 4 bits */
-#define CHRG_CCCV_CC_BIT_POS           0
+#define CHRG_CCCV_CC_BIT_POS                   0
 #define CHRG_CCCV_CC_OFFSET                    200     /* 200mA */
-#define CHRG_CCCV_CC_LSB_RES           200     /* 200mA */
+#define CHRG_CCCV_CC_LSB_RES                   200     /* 200mA */
 #define CHRG_CCCV_ITERM_20P                    (1 << 4)    /* 20% of CC */
 #define CHRG_CCCV_CV_MASK                      0x60        /* 2 bits */
-#define CHRG_CCCV_CV_BIT_POS           5
+#define CHRG_CCCV_CV_BIT_POS                   5
 #define CHRG_CCCV_CV_4100MV                    0x0     /* 4.10V */
 #define CHRG_CCCV_CV_4150MV                    0x1     /* 4.15V */
 #define CHRG_CCCV_CV_4200MV                    0x2     /* 4.20V */
 #define CHRG_CCCV_CV_4350MV                    0x3     /* 4.35V */
 #define CHRG_CCCV_CHG_EN                       (1 << 7)
 
-#define FG_CNTL_OCV_ADJ_STAT           (1 << 2)
+#define FG_CNTL_OCV_ADJ_STAT                   (1 << 2)
 #define FG_CNTL_OCV_ADJ_EN                     (1 << 3)
-#define FG_CNTL_CAP_ADJ_STAT           (1 << 4)
+#define FG_CNTL_CAP_ADJ_STAT                   (1 << 4)
 #define FG_CNTL_CAP_ADJ_EN                     (1 << 5)
 #define FG_CNTL_CC_EN                          (1 << 6)
 #define FG_CNTL_GAUGE_EN                       (1 << 7)
 #define FG_CC_CAP_VALID                                (1 << 7)
 #define FG_CC_CAP_VAL_MASK                     0x7F
 
-#define FG_LOW_CAP_THR1_MASK           0xf0    /* 5% tp 20% */
+#define FG_LOW_CAP_THR1_MASK                   0xf0    /* 5% tp 20% */
 #define FG_LOW_CAP_THR1_VAL                    0xa0    /* 15 perc */
-#define FG_LOW_CAP_THR2_MASK           0x0f    /* 0% to 15% */
+#define FG_LOW_CAP_THR2_MASK                   0x0f    /* 0% to 15% */
 #define FG_LOW_CAP_WARN_THR                    14  /* 14 perc */
 #define FG_LOW_CAP_CRIT_THR                    4   /* 4 perc */
 #define FG_LOW_CAP_SHDN_THR                    0   /* 0 perc */
 
-#define NR_RETRY_CNT    3
-#define DEV_NAME       "axp288_fuel_gauge"
+#define DEV_NAME                               "axp288_fuel_gauge"
 
 /* 1.1mV per LSB expressed in uV */
 #define VOLTAGE_FROM_ADC(a)                    ((a * 11) / 10)
 /* properties converted to uV, uA */
-#define PROP_VOLT(a)           ((a) * 1000)
-#define PROP_CURR(a)           ((a) * 1000)
+#define PROP_VOLT(a)                           ((a) * 1000)
+#define PROP_CURR(a)                           ((a) * 1000)
 
-#define AXP288_FG_INTR_NUM     6
+#define AXP288_REG_UPDATE_INTERVAL             (60 * HZ)
+#define AXP288_FG_INTR_NUM                     6
 enum {
        QWBTU_IRQ = 0,
        WBTU_IRQ,
@@ -98,9 +98,6 @@ enum {
 };
 
 enum {
-       BAT_TEMP = 0,
-       PMIC_TEMP,
-       SYSTEM_TEMP,
        BAT_CHRG_CURR,
        BAT_D_CURR,
        BAT_VOLT,
@@ -108,7 +105,7 @@ enum {
 };
 
 struct axp288_fg_info {
-       struct platform_device *pdev;
+       struct device *dev;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
        int irq[AXP288_FG_INTR_NUM];
@@ -117,7 +114,21 @@ struct axp288_fg_info {
        struct mutex lock;
        int status;
        int max_volt;
+       int pwr_op;
+       int low_cap;
        struct dentry *debug_file;
+
+       char valid;                 /* zero until following fields are valid */
+       unsigned long last_updated; /* in jiffies */
+
+       int pwr_stat;
+       int fg_res;
+       int bat_volt;
+       int d_curr;
+       int c_curr;
+       int ocv;
+       int fg_cc_mtr1;
+       int fg_des_cap1;
 };
 
 static enum power_supply_property fuel_gauge_props[] = {
@@ -137,17 +148,12 @@ static enum power_supply_property fuel_gauge_props[] = {
 
 static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
 {
-       int ret, i;
        unsigned int val;
+       int ret;
 
-       for (i = 0; i < NR_RETRY_CNT; i++) {
-               ret = regmap_read(info->regmap, reg, &val);
-               if (ret != -EBUSY)
-                       break;
-       }
-
+       ret = regmap_read(info->regmap, reg, &val);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
@@ -161,7 +167,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
        ret = regmap_write(info->regmap, reg, (unsigned int)val);
 
        if (ret < 0)
-               dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
+               dev_err(info->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
 
        return ret;
 }
@@ -173,15 +179,13 @@ static int fuel_gauge_read_15bit_word(struct axp288_fg_info *info, int reg)
 
        ret = regmap_bulk_read(info->regmap, reg, buf, 2);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
-                       reg, ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
        ret = get_unaligned_be16(buf);
        if (!(ret & FG_15BIT_WORD_VALID)) {
-               dev_err(&info->pdev->dev, "Error reg 0x%02x contents not valid\n",
-                       reg);
+               dev_err(info->dev, "Error reg 0x%02x contents not valid\n", reg);
                return -ENXIO;
        }
 
@@ -195,8 +199,7 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
 
        ret = regmap_bulk_read(info->regmap, reg, buf, 2);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
-                       reg, ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
@@ -204,139 +207,78 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
        return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
 }
 
-#ifdef CONFIG_DEBUG_FS
-static int fuel_gauge_debug_show(struct seq_file *s, void *data)
+static int fuel_gauge_update_registers(struct axp288_fg_info *info)
 {
-       struct axp288_fg_info *info = s->private;
-       int raw_val, ret;
-
-       seq_printf(s, " PWR_STATUS[%02x] : %02x\n",
-               AXP20X_PWR_INPUT_STATUS,
-               fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS));
-       seq_printf(s, "PWR_OP_MODE[%02x] : %02x\n",
-               AXP20X_PWR_OP_MODE,
-               fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE));
-       seq_printf(s, " CHRG_CTRL1[%02x] : %02x\n",
-               AXP20X_CHRG_CTRL1,
-               fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1));
-       seq_printf(s, "       VLTF[%02x] : %02x\n",
-               AXP20X_V_LTF_DISCHRG,
-               fuel_gauge_reg_readb(info, AXP20X_V_LTF_DISCHRG));
-       seq_printf(s, "       VHTF[%02x] : %02x\n",
-               AXP20X_V_HTF_DISCHRG,
-               fuel_gauge_reg_readb(info, AXP20X_V_HTF_DISCHRG));
-       seq_printf(s, "    CC_CTRL[%02x] : %02x\n",
-               AXP20X_CC_CTRL,
-               fuel_gauge_reg_readb(info, AXP20X_CC_CTRL));
-       seq_printf(s, "BATTERY CAP[%02x] : %02x\n",
-               AXP20X_FG_RES,
-               fuel_gauge_reg_readb(info, AXP20X_FG_RES));
-       seq_printf(s, "    FG_RDC1[%02x] : %02x\n",
-               AXP288_FG_RDC1_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_RDC1_REG));
-       seq_printf(s, "    FG_RDC0[%02x] : %02x\n",
-               AXP288_FG_RDC0_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_RDC0_REG));
-       seq_printf(s, "     FG_OCV[%02x] : %04x\n",
-               AXP288_FG_OCVH_REG,
-               fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG));
-       seq_printf(s, " FG_DES_CAP[%02x] : %04x\n",
-               AXP288_FG_DES_CAP1_REG,
-               fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG));
-       seq_printf(s, "  FG_CC_MTR[%02x] : %04x\n",
-               AXP288_FG_CC_MTR1_REG,
-               fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG));
-       seq_printf(s, " FG_OCV_CAP[%02x] : %02x\n",
-               AXP288_FG_OCV_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_OCV_CAP_REG));
-       seq_printf(s, "  FG_CC_CAP[%02x] : %02x\n",
-               AXP288_FG_CC_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_CC_CAP_REG));
-       seq_printf(s, " FG_LOW_CAP[%02x] : %02x\n",
-               AXP288_FG_LOW_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG));
-       seq_printf(s, "TUNING_CTL0[%02x] : %02x\n",
-               AXP288_FG_TUNE0,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE0));
-       seq_printf(s, "TUNING_CTL1[%02x] : %02x\n",
-               AXP288_FG_TUNE1,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE1));
-       seq_printf(s, "TUNING_CTL2[%02x] : %02x\n",
-               AXP288_FG_TUNE2,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE2));
-       seq_printf(s, "TUNING_CTL3[%02x] : %02x\n",
-               AXP288_FG_TUNE3,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE3));
-       seq_printf(s, "TUNING_CTL4[%02x] : %02x\n",
-               AXP288_FG_TUNE4,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE4));
-       seq_printf(s, "TUNING_CTL5[%02x] : %02x\n",
-               AXP288_FG_TUNE5,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE5));
-
-       ret = iio_read_channel_raw(info->iio_channel[BAT_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-batttemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[PMIC_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-pmictemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[SYSTEM_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-systtemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-chrgcurr : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-dchrgcur : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-battvolt : %d\n", raw_val);
+       int ret;
 
-       return 0;
-}
+       if (info->valid && time_before(jiffies, info->last_updated + AXP288_REG_UPDATE_INTERVAL))
+               return 0;
 
-DEFINE_SHOW_ATTRIBUTE(fuel_gauge_debug);
+       dev_dbg(info->dev, "Fuel Gauge updating register values...\n");
 
-static void fuel_gauge_create_debugfs(struct axp288_fg_info *info)
-{
-       info->debug_file = debugfs_create_file("fuelgauge", 0666, NULL,
-               info, &fuel_gauge_debug_fops);
-}
+       ret = iosf_mbi_block_punit_i2c_access();
+       if (ret < 0)
+               return ret;
 
-static void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
-{
-       debugfs_remove(info->debug_file);
-}
-#else
-static inline void fuel_gauge_create_debugfs(struct axp288_fg_info *info)
-{
-}
-static inline void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
-{
+       ret = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
+       if (ret < 0)
+               goto out;
+       info->pwr_stat = ret;
+
+       ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
+       if (ret < 0)
+               goto out;
+       info->fg_res = ret;
+
+       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &info->bat_volt);
+       if (ret < 0)
+               goto out;
+
+       if (info->pwr_stat & PS_STAT_BAT_CHRG_DIR) {
+               info->d_curr = 0;
+               ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &info->c_curr);
+               if (ret < 0)
+                       goto out;
+       } else {
+               info->c_curr = 0;
+               ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &info->d_curr);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
+       if (ret < 0)
+               goto out;
+       info->ocv = ret;
+
+       ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
+       if (ret < 0)
+               goto out;
+       info->fg_cc_mtr1 = ret;
+
+       ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
+       if (ret < 0)
+               goto out;
+       info->fg_des_cap1 = ret;
+
+       info->last_updated = jiffies;
+       info->valid = 1;
+       ret = 0;
+out:
+       iosf_mbi_unblock_punit_i2c_access();
+       return ret;
 }
-#endif
 
 static void fuel_gauge_get_status(struct axp288_fg_info *info)
 {
-       int pwr_stat, fg_res, curr, ret;
-
-       pwr_stat = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
-       if (pwr_stat < 0) {
-               dev_err(&info->pdev->dev,
-                       "PWR STAT read failed:%d\n", pwr_stat);
-               return;
-       }
+       int pwr_stat = info->pwr_stat;
+       int fg_res = info->fg_res;
+       int curr = info->d_curr;
 
        /* Report full if Vbus is valid and the reported capacity is 100% */
        if (!(pwr_stat & PS_STAT_VBUS_VALID))
                goto not_full;
 
-       fg_res = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
-       if (fg_res < 0) {
-               dev_err(&info->pdev->dev, "FG RES read failed: %d\n", fg_res);
-               return;
-       }
        if (!(fg_res & FG_REP_CAP_VALID))
                goto not_full;
 
@@ -354,11 +296,6 @@ static void fuel_gauge_get_status(struct axp288_fg_info *info)
        if (fg_res < 90 || (pwr_stat & PS_STAT_BAT_CHRG_DIR))
                goto not_full;
 
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &curr);
-       if (ret < 0) {
-               dev_err(&info->pdev->dev, "FG get current failed: %d\n", ret);
-               return;
-       }
        if (curr == 0) {
                info->status = POWER_SUPPLY_STATUS_FULL;
                return;
@@ -371,61 +308,16 @@ not_full:
                info->status = POWER_SUPPLY_STATUS_DISCHARGING;
 }
 
-static int fuel_gauge_get_vbatt(struct axp288_fg_info *info, int *vbatt)
-{
-       int ret = 0, raw_val;
-
-       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
-       if (ret < 0)
-               goto vbatt_read_fail;
-
-       *vbatt = VOLTAGE_FROM_ADC(raw_val);
-vbatt_read_fail:
-       return ret;
-}
-
-static int fuel_gauge_get_current(struct axp288_fg_info *info, int *cur)
-{
-       int ret, discharge;
-
-       /* First check discharge current, so that we do only 1 read on bat. */
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &discharge);
-       if (ret < 0)
-               return ret;
-
-       if (discharge > 0) {
-               *cur = -1 * discharge;
-               return 0;
-       }
-
-       return iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], cur);
-}
-
-static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
-{
-       int ret;
-
-       ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
-       if (ret >= 0)
-               *vocv = VOLTAGE_FROM_ADC(ret);
-
-       return ret;
-}
-
 static int fuel_gauge_battery_health(struct axp288_fg_info *info)
 {
-       int ret, vocv, health = POWER_SUPPLY_HEALTH_UNKNOWN;
-
-       ret = fuel_gauge_get_vocv(info, &vocv);
-       if (ret < 0)
-               goto health_read_fail;
+       int vocv = VOLTAGE_FROM_ADC(info->ocv);
+       int health = POWER_SUPPLY_HEALTH_UNKNOWN;
 
        if (vocv > info->max_volt)
                health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
        else
                health = POWER_SUPPLY_HEALTH_GOOD;
 
-health_read_fail:
        return health;
 }
 
@@ -434,9 +326,14 @@ static int fuel_gauge_get_property(struct power_supply *ps,
                union power_supply_propval *val)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(ps);
-       int ret = 0, value;
+       int ret, value;
 
        mutex_lock(&info->lock);
+
+       ret = fuel_gauge_update_registers(info);
+       if (ret < 0)
+               goto out;
+
        switch (prop) {
        case POWER_SUPPLY_PROP_STATUS:
                fuel_gauge_get_status(info);
@@ -446,78 +343,52 @@ static int fuel_gauge_get_property(struct power_supply *ps,
                val->intval = fuel_gauge_battery_health(info);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               ret = fuel_gauge_get_vbatt(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               value = VOLTAGE_FROM_ADC(info->bat_volt);
                val->intval = PROP_VOLT(value);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_OCV:
-               ret = fuel_gauge_get_vocv(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               value = VOLTAGE_FROM_ADC(info->ocv);
                val->intval = PROP_VOLT(value);
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = fuel_gauge_get_current(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               if (info->d_curr > 0)
+                       value = -1 * info->d_curr;
+               else
+                       value = info->c_curr;
+
                val->intval = PROP_CURR(value);
                break;
        case POWER_SUPPLY_PROP_PRESENT:
-               ret = fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               if (ret & CHRG_STAT_BAT_PRESENT)
+               if (info->pwr_op & CHRG_STAT_BAT_PRESENT)
                        val->intval = 1;
                else
                        val->intval = 0;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
-               ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               if (!(ret & FG_REP_CAP_VALID))
-                       dev_err(&info->pdev->dev,
-                               "capacity measurement not valid\n");
-               val->intval = (ret & FG_REP_CAP_VAL_MASK);
+               if (!(info->fg_res & FG_REP_CAP_VALID))
+                       dev_err(info->dev, "capacity measurement not valid\n");
+               val->intval = (info->fg_res & FG_REP_CAP_VAL_MASK);
                break;
        case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-               val->intval = (ret & 0x0f);
+               val->intval = (info->low_cap & 0x0f);
                break;
        case POWER_SUPPLY_PROP_TECHNOLOGY:
                val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
-               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               val->intval = ret * FG_DES_CAP_RES_LSB;
+               val->intval = info->fg_cc_mtr1 * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL:
-               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               val->intval = ret * FG_DES_CAP_RES_LSB;
+               val->intval = info->fg_des_cap1 * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
                val->intval = PROP_VOLT(info->max_volt);
                break;
        default:
-               mutex_unlock(&info->lock);
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       mutex_unlock(&info->lock);
-       return 0;
-
-fuel_gauge_read_err:
+out:
        mutex_unlock(&info->lock);
        return ret;
 }
@@ -527,7 +398,7 @@ static int fuel_gauge_set_property(struct power_supply *ps,
                const union power_supply_propval *val)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(ps);
-       int ret = 0;
+       int new_low_cap, ret = 0;
 
        mutex_lock(&info->lock);
        switch (prop) {
@@ -536,12 +407,12 @@ static int fuel_gauge_set_property(struct power_supply *ps,
                        ret = -EINVAL;
                        break;
                }
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
-               if (ret < 0)
-                       break;
-               ret &= 0xf0;
-               ret |= (val->intval & 0xf);
-               ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, ret);
+               new_low_cap = info->low_cap;
+               new_low_cap &= 0xf0;
+               new_low_cap |= (val->intval & 0xf);
+               ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, new_low_cap);
+               if (ret == 0)
+                       info->low_cap = new_low_cap;
                break;
        default:
                ret = -EINVAL;
@@ -579,37 +450,35 @@ static irqreturn_t fuel_gauge_thread_handler(int irq, void *dev)
        }
 
        if (i >= AXP288_FG_INTR_NUM) {
-               dev_warn(&info->pdev->dev, "spurious interrupt!!\n");
+               dev_warn(info->dev, "spurious interrupt!!\n");
                return IRQ_NONE;
        }
 
        switch (i) {
        case QWBTU_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Quit Battery under temperature in work mode IRQ (QWBTU)\n");
+               dev_info(info->dev, "Quit Battery under temperature in work mode IRQ (QWBTU)\n");
                break;
        case WBTU_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Battery under temperature in work mode IRQ (WBTU)\n");
+               dev_info(info->dev, "Battery under temperature in work mode IRQ (WBTU)\n");
                break;
        case QWBTO_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Quit Battery over temperature in work mode IRQ (QWBTO)\n");
+               dev_info(info->dev, "Quit Battery over temperature in work mode IRQ (QWBTO)\n");
                break;
        case WBTO_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Battery over temperature in work mode IRQ (WBTO)\n");
+               dev_info(info->dev, "Battery over temperature in work mode IRQ (WBTO)\n");
                break;
        case WL2_IRQ:
-               dev_info(&info->pdev->dev, "Low Batt Warning(2) INTR\n");
+               dev_info(info->dev, "Low Batt Warning(2) INTR\n");
                break;
        case WL1_IRQ:
-               dev_info(&info->pdev->dev, "Low Batt Warning(1) INTR\n");
+               dev_info(info->dev, "Low Batt Warning(1) INTR\n");
                break;
        default:
-               dev_warn(&info->pdev->dev, "Spurious Interrupt!!!\n");
+               dev_warn(info->dev, "Spurious Interrupt!!!\n");
        }
 
+       info->valid = 0; /* Force updating of the cached registers */
+
        power_supply_changed(info->bat);
        return IRQ_HANDLED;
 }
@@ -618,6 +487,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(psy);
 
+       info->valid = 0; /* Force updating of the cached registers */
        power_supply_changed(info->bat);
 }
 
@@ -632,16 +502,15 @@ static const struct power_supply_desc fuel_gauge_desc = {
        .external_power_changed = fuel_gauge_external_power_changed,
 };
 
-static void fuel_gauge_init_irq(struct axp288_fg_info *info)
+static void fuel_gauge_init_irq(struct axp288_fg_info *info, struct platform_device *pdev)
 {
        int ret, i, pirq;
 
        for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
-               pirq = platform_get_irq(info->pdev, i);
+               pirq = platform_get_irq(pdev, i);
                info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
                if (info->irq[i] < 0) {
-                       dev_warn(&info->pdev->dev,
-                               "regmap_irq get virq failed for IRQ %d: %d\n",
+                       dev_warn(info->dev, "regmap_irq get virq failed for IRQ %d: %d\n",
                                pirq, info->irq[i]);
                        info->irq[i] = -1;
                        goto intr_failed;
@@ -650,14 +519,10 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info)
                                NULL, fuel_gauge_thread_handler,
                                IRQF_ONESHOT, DEV_NAME, info);
                if (ret) {
-                       dev_warn(&info->pdev->dev,
-                               "request irq failed for IRQ %d: %d\n",
+                       dev_warn(info->dev, "request irq failed for IRQ %d: %d\n",
                                pirq, info->irq[i]);
                        info->irq[i] = -1;
                        goto intr_failed;
-               } else {
-                       dev_info(&info->pdev->dev, "HW IRQ %d -> VIRQ %d\n",
-                               pirq, info->irq[i]);
                }
        }
        return;
@@ -753,9 +618,6 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
        struct power_supply_config psy_cfg = {};
        static const char * const iio_chan_name[] = {
-               [BAT_TEMP] = "axp288-batt-temp",
-               [PMIC_TEMP] = "axp288-pmic-temp",
-               [SYSTEM_TEMP] = "axp288-system-temp",
                [BAT_CHRG_CURR] = "axp288-chrg-curr",
                [BAT_D_CURR] = "axp288-chrg-d-curr",
                [BAT_VOLT] = "axp288-batt-volt",
@@ -765,24 +627,15 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        if (dmi_check_system(axp288_no_battery_list))
                return -ENODEV;
 
-       /*
-        * On some devices the fuelgauge and charger parts of the axp288 are
-        * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
-        */
-       ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
-       if (ret < 0)
-               return ret;
-       if (val == 0)
-               return -ENODEV;
-
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
-       info->pdev = pdev;
+       info->dev = &pdev->dev;
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
        info->status = POWER_SUPPLY_STATUS_UNKNOWN;
+       info->valid = 0;
 
        platform_set_drvdata(pdev, info);
 
@@ -808,19 +661,35 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                }
        }
 
-       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+       ret = iosf_mbi_block_punit_i2c_access();
        if (ret < 0)
                goto out_free_iio_chan;
 
+       /*
+        * On some devices the fuelgauge and charger parts of the axp288 are
+        * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
+        */
+       ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       if (val == 0) {
+               ret = -ENODEV;
+               goto unblock_punit_i2c_access;
+       }
+
+       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+
        if (!(ret & FG_DES_CAP1_VALID)) {
                dev_err(&pdev->dev, "axp288 not configured by firmware\n");
                ret = -ENODEV;
-               goto out_free_iio_chan;
+               goto unblock_punit_i2c_access;
        }
 
        ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
        if (ret < 0)
-               goto out_free_iio_chan;
+               goto unblock_punit_i2c_access;
        switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
        case CHRG_CCCV_CV_4100MV:
                info->max_volt = 4100;
@@ -836,6 +705,22 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                break;
        }
 
+       ret = fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       info->pwr_op = ret;
+
+       ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       info->low_cap = ret;
+
+unblock_punit_i2c_access:
+       iosf_mbi_unblock_punit_i2c_access();
+       /* In case we arrive here by goto because of a register access error */
+       if (ret < 0)
+               goto out_free_iio_chan;
+
        psy_cfg.drv_data = info;
        info->bat = power_supply_register(&pdev->dev, &fuel_gauge_desc, &psy_cfg);
        if (IS_ERR(info->bat)) {
@@ -844,8 +729,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                goto out_free_iio_chan;
        }
 
-       fuel_gauge_create_debugfs(info);
-       fuel_gauge_init_irq(info);
+       fuel_gauge_init_irq(info, pdev);
 
        return 0;
 
@@ -869,7 +753,6 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
        int i;
 
        power_supply_unregister(info->bat);
-       fuel_gauge_remove_debugfs(info);
 
        for (i = 0; i < AXP288_FG_INTR_NUM; i++)
                if (info->irq[i] >= 0)
index b5d619d..3ce36d0 100644 (file)
@@ -31,9 +31,8 @@
 
 #include <linux/power/bq24735-charger.h>
 
-#define BQ24735_CHG_OPT                        0x12
-#define BQ24735_CHG_OPT_CHARGE_DISABLE (1 << 0)
-#define BQ24735_CHG_OPT_AC_PRESENT     (1 << 4)
+/* BQ24735 available commands and their respective masks */
+#define BQ24735_CHARGE_OPT             0x12
 #define BQ24735_CHARGE_CURRENT         0x14
 #define BQ24735_CHARGE_CURRENT_MASK    0x1fc0
 #define BQ24735_CHARGE_VOLTAGE         0x15
 #define BQ24735_MANUFACTURER_ID                0xfe
 #define BQ24735_DEVICE_ID              0xff
 
+/* ChargeOptions bits of interest */
+#define BQ24735_CHARGE_OPT_CHG_DISABLE (1 << 0)
+#define BQ24735_CHARGE_OPT_AC_PRESENT  (1 << 4)
+
 struct bq24735 {
        struct power_supply             *charger;
        struct power_supply_desc        charger_desc;
@@ -167,8 +170,8 @@ static inline int bq24735_enable_charging(struct bq24735 *charger)
        if (ret)
                return ret;
 
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE, 0);
+       return bq24735_update_word(charger->client, BQ24735_CHARGE_OPT,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE, 0);
 }
 
 static inline int bq24735_disable_charging(struct bq24735 *charger)
@@ -176,9 +179,9 @@ static inline int bq24735_disable_charging(struct bq24735 *charger)
        if (charger->pdata->ext_control)
                return 0;
 
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE);
+       return bq24735_update_word(charger->client, BQ24735_CHARGE_OPT,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE);
 }
 
 static bool bq24735_charger_is_present(struct bq24735 *charger)
@@ -188,14 +191,14 @@ static bool bq24735_charger_is_present(struct bq24735 *charger)
        } else {
                int ac = 0;
 
-               ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+               ac = bq24735_read_word(charger->client, BQ24735_CHARGE_OPT);
                if (ac < 0) {
                        dev_dbg(&charger->client->dev,
                                "Failed to read charger options : %d\n",
                                ac);
                        return false;
                }
-               return (ac & BQ24735_CHG_OPT_AC_PRESENT) ? true : false;
+               return (ac & BQ24735_CHARGE_OPT_AC_PRESENT) ? true : false;
        }
 
        return false;
@@ -208,11 +211,11 @@ static int bq24735_charger_is_charging(struct bq24735 *charger)
        if (!bq24735_charger_is_present(charger))
                return 0;
 
-       ret  = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+       ret  = bq24735_read_word(charger->client, BQ24735_CHARGE_OPT);
        if (ret < 0)
                return ret;
 
-       return !(ret & BQ24735_CHG_OPT_CHARGE_DISABLE);
+       return !(ret & BQ24735_CHARGE_OPT_CHG_DISABLE);
 }
 
 static void bq24735_update(struct bq24735 *charger)
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
new file mode 100644 (file)
index 0000000..305f10d
--- /dev/null
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power supply driver for ChromeOS EC based Peripheral Device Charger.
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+
+#define DRV_NAME               "cros-ec-pchg"
+#define PCHG_DIR_PREFIX                "peripheral"
+#define PCHG_DIR_NAME          PCHG_DIR_PREFIX "%d"
+#define PCHG_DIR_NAME_LENGTH \
+               sizeof(PCHG_DIR_PREFIX __stringify(EC_PCHG_MAX_PORTS))
+#define PCHG_CACHE_UPDATE_DELAY        msecs_to_jiffies(500)
+
+struct port_data {
+       int port_number;
+       char name[PCHG_DIR_NAME_LENGTH];
+       struct power_supply *psy;
+       struct power_supply_desc psy_desc;
+       int psy_status;
+       int battery_percentage;
+       int charge_type;
+       struct charger_data *charger;
+       unsigned long last_update;
+};
+
+struct charger_data {
+       struct device *dev;
+       struct cros_ec_dev *ec_dev;
+       struct cros_ec_device *ec_device;
+       int num_registered_psy;
+       struct port_data *ports[EC_PCHG_MAX_PORTS];
+       struct notifier_block notifier;
+};
+
+static enum power_supply_property cros_pchg_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_CHARGE_TYPE,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_SCOPE,
+};
+
+static int cros_pchg_ec_command(const struct charger_data *charger,
+                               unsigned int version,
+                               unsigned int command,
+                               const void *outdata,
+                               unsigned int outsize,
+                               void *indata,
+                               unsigned int insize)
+{
+       struct cros_ec_dev *ec_dev = charger->ec_dev;
+       struct cros_ec_command *msg;
+       int ret;
+
+       msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->version = version;
+       msg->command = ec_dev->cmd_offset + command;
+       msg->outsize = outsize;
+       msg->insize = insize;
+
+       if (outsize)
+               memcpy(msg->data, outdata, outsize);
+
+       ret = cros_ec_cmd_xfer_status(charger->ec_device, msg);
+       if (ret >= 0 && insize)
+               memcpy(indata, msg->data, insize);
+
+       kfree(msg);
+       return ret;
+}
+
+static const unsigned int pchg_cmd_version = 1;
+
+static bool cros_pchg_cmd_ver_check(const struct charger_data *charger)
+{
+       struct ec_params_get_cmd_versions_v1 req;
+       struct ec_response_get_cmd_versions rsp;
+       int ret;
+
+       req.cmd = EC_CMD_PCHG;
+       ret = cros_pchg_ec_command(charger, 1, EC_CMD_GET_CMD_VERSIONS,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(charger->dev,
+                        "Unable to get versions of EC_CMD_PCHG (err:%d)\n",
+                        ret);
+               return false;
+       }
+
+       return !!(rsp.version_mask & BIT(pchg_cmd_version));
+}
+
+static int cros_pchg_port_count(const struct charger_data *charger)
+{
+       struct ec_response_pchg_count rsp;
+       int ret;
+
+       ret = cros_pchg_ec_command(charger, 0, EC_CMD_PCHG_COUNT,
+                                  NULL, 0, &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(charger->dev,
+                        "Unable to get number or ports (err:%d)\n", ret);
+               return ret;
+       }
+
+       return rsp.port_count;
+}
+
+static int cros_pchg_get_status(struct port_data *port)
+{
+       struct charger_data *charger = port->charger;
+       struct ec_params_pchg req;
+       struct ec_response_pchg rsp;
+       struct device *dev = charger->dev;
+       int old_status = port->psy_status;
+       int old_percentage = port->battery_percentage;
+       int ret;
+
+       req.port = port->port_number;
+       ret = cros_pchg_ec_command(charger, pchg_cmd_version, EC_CMD_PCHG,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_err(dev, "Unable to get port.%d status (err:%d)\n",
+                       port->port_number, ret);
+               return ret;
+       }
+
+       switch (rsp.state) {
+       case PCHG_STATE_RESET:
+       case PCHG_STATE_INITIALIZED:
+       case PCHG_STATE_ENABLED:
+       default:
+               port->psy_status = POWER_SUPPLY_STATUS_UNKNOWN;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       case PCHG_STATE_DETECTED:
+               port->psy_status = POWER_SUPPLY_STATUS_CHARGING;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+               break;
+       case PCHG_STATE_CHARGING:
+               port->psy_status = POWER_SUPPLY_STATUS_CHARGING;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+               break;
+       case PCHG_STATE_FULL:
+               port->psy_status = POWER_SUPPLY_STATUS_FULL;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       }
+
+       port->battery_percentage = rsp.battery_percentage;
+
+       if (port->psy_status != old_status ||
+                       port->battery_percentage != old_percentage)
+               power_supply_changed(port->psy);
+
+       dev_dbg(dev,
+               "Port %d: state=%d battery=%d%%\n",
+               port->port_number, rsp.state, rsp.battery_percentage);
+
+       return 0;
+}
+
+static int cros_pchg_get_port_status(struct port_data *port, bool ratelimit)
+{
+       int ret;
+
+       if (ratelimit &&
+           time_is_after_jiffies(port->last_update + PCHG_CACHE_UPDATE_DELAY))
+               return 0;
+
+       ret = cros_pchg_get_status(port);
+       if (ret < 0)
+               return ret;
+
+       port->last_update = jiffies;
+
+       return ret;
+}
+
+static int cros_pchg_get_prop(struct power_supply *psy,
+                             enum power_supply_property psp,
+                             union power_supply_propval *val)
+{
+       struct port_data *port = power_supply_get_drvdata(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+       case POWER_SUPPLY_PROP_CAPACITY:
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               cros_pchg_get_port_status(port, true);
+               break;
+       default:
+               break;
+       }
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = port->psy_status;
+               break;
+       case POWER_SUPPLY_PROP_CAPACITY:
+               val->intval = port->battery_percentage;
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               val->intval = port->charge_type;
+               break;
+       case POWER_SUPPLY_PROP_SCOPE:
+               val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cros_pchg_event(const struct charger_data *charger,
+                          unsigned long host_event)
+{
+       int i;
+
+       for (i = 0; i < charger->num_registered_psy; i++)
+               cros_pchg_get_port_status(charger->ports[i], false);
+
+       return NOTIFY_OK;
+}
+
+static u32 cros_get_device_event(const struct charger_data *charger)
+{
+       struct ec_params_device_event req;
+       struct ec_response_device_event rsp;
+       struct device *dev = charger->dev;
+       int ret;
+
+       req.param = EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS;
+       ret = cros_pchg_ec_command(charger, 0, EC_CMD_DEVICE_EVENT,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(dev, "Unable to get device events (err:%d)\n", ret);
+               return 0;
+       }
+
+       return rsp.event_mask;
+}
+
+static int cros_ec_notify(struct notifier_block *nb,
+                         unsigned long queued_during_suspend,
+                         void *data)
+{
+       struct cros_ec_device *ec_dev = (struct cros_ec_device *)data;
+       u32 host_event = cros_ec_get_host_event(ec_dev);
+       struct charger_data *charger =
+                       container_of(nb, struct charger_data, notifier);
+       u32 device_event_mask;
+
+       if (!host_event)
+               return NOTIFY_DONE;
+
+       if (!(host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_DEVICE)))
+               return NOTIFY_DONE;
+
+       /*
+        * todo: Retrieve device event mask in common place
+        * (e.g. cros_ec_proto.c).
+        */
+       device_event_mask = cros_get_device_event(charger);
+       if (!(device_event_mask & EC_DEVICE_EVENT_MASK(EC_DEVICE_EVENT_WLC)))
+               return NOTIFY_DONE;
+
+       return cros_pchg_event(charger, host_event);
+}
+
+static int cros_pchg_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+       struct cros_ec_device *ec_device = ec_dev->ec_dev;
+       struct power_supply_desc *psy_desc;
+       struct charger_data *charger;
+       struct power_supply *psy;
+       struct port_data *port;
+       struct notifier_block *nb;
+       int num_ports;
+       int ret;
+       int i;
+
+       charger = devm_kzalloc(dev, sizeof(*charger), GFP_KERNEL);
+       if (!charger)
+               return -ENOMEM;
+
+       charger->dev = dev;
+       charger->ec_dev = ec_dev;
+       charger->ec_device = ec_device;
+
+       ret = cros_pchg_port_count(charger);
+       if (ret <= 0) {
+               /*
+                * This feature is enabled by the EC and the kernel driver is
+                * included by default for CrOS devices. Don't need to be loud
+                * since this error can be normal.
+                */
+               dev_info(dev, "No peripheral charge ports (err:%d)\n", ret);
+               return -ENODEV;
+       }
+
+       if (!cros_pchg_cmd_ver_check(charger)) {
+               dev_err(dev, "EC_CMD_PCHG version %d isn't available.\n",
+                       pchg_cmd_version);
+               return -EOPNOTSUPP;
+       }
+
+       num_ports = ret;
+       if (num_ports > EC_PCHG_MAX_PORTS) {
+               dev_err(dev, "Too many peripheral charge ports (%d)\n",
+                       num_ports);
+               return -ENOBUFS;
+       }
+
+       dev_info(dev, "%d peripheral charge ports found\n", num_ports);
+
+       for (i = 0; i < num_ports; i++) {
+               struct power_supply_config psy_cfg = {};
+
+               port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+               if (!port)
+                       return -ENOMEM;
+
+               port->charger = charger;
+               port->port_number = i;
+               snprintf(port->name, sizeof(port->name), PCHG_DIR_NAME, i);
+
+               psy_desc = &port->psy_desc;
+               psy_desc->name = port->name;
+               psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+               psy_desc->get_property = cros_pchg_get_prop;
+               psy_desc->external_power_changed = NULL;
+               psy_desc->properties = cros_pchg_props;
+               psy_desc->num_properties = ARRAY_SIZE(cros_pchg_props);
+               psy_cfg.drv_data = port;
+
+               psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
+               if (IS_ERR(psy))
+                       return dev_err_probe(dev, PTR_ERR(psy),
+                                       "Failed to register power supply\n");
+               port->psy = psy;
+
+               charger->ports[charger->num_registered_psy++] = port;
+       }
+
+       if (!charger->num_registered_psy)
+               return -ENODEV;
+
+       nb = &charger->notifier;
+       nb->notifier_call = cros_ec_notify;
+       ret = blocking_notifier_chain_register(&ec_dev->ec_dev->event_notifier,
+                                              nb);
+       if (ret < 0)
+               dev_err(dev, "Failed to register notifier (err:%d)\n", ret);
+
+       return 0;
+}
+
+static struct platform_driver cros_pchg_driver = {
+       .driver = {
+               .name = DRV_NAME,
+       },
+       .probe = cros_pchg_probe
+};
+
+module_platform_driver(cros_pchg_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC peripheral device charger");
+MODULE_ALIAS("platform:" DRV_NAME);
index d110597..091868e 100644 (file)
@@ -679,7 +679,9 @@ static int cw_bat_probe(struct i2c_client *client)
                                                    &cw2015_bat_desc,
                                                    &psy_cfg);
        if (IS_ERR(cw_bat->rk_bat)) {
-               dev_err(cw_bat->dev, "Failed to register power supply\n");
+               /* try again if this happens */
+               dev_err_probe(&client->dev, PTR_ERR(cw_bat->rk_bat),
+                       "Failed to register power supply\n");
                return PTR_ERR(cw_bat->rk_bat);
        }
 
index ce2041b..8dffae7 100644 (file)
@@ -36,8 +36,6 @@
 
 /* Interrupt mask bits */
 #define CONFIG_ALRT_BIT_ENBL   (1 << 2)
-#define STATUS_INTR_SOCMIN_BIT (1 << 10)
-#define STATUS_INTR_SOCMAX_BIT (1 << 14)
 
 #define VFSOC0_LOCK            0x0000
 #define VFSOC0_UNLOCK          0x0080
@@ -285,8 +283,6 @@ static int max17042_get_property(struct power_supply *psy,
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
                if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
                        ret = regmap_read(map, MAX17042_V_empty, &data);
-               else if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
-                       ret = regmap_read(map, MAX17055_V_empty, &data);
                else
                        ret = regmap_read(map, MAX17047_V_empty, &data);
                if (ret < 0)
@@ -748,7 +744,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
        struct max17042_config_data *config = chip->pdata->config_data;
 
        max17042_override_por(map, MAX17042_TGAIN, config->tgain);
-       max17042_override_por(map, MAx17042_TOFF, config->toff);
+       max17042_override_por(map, MAX17042_TOFF, config->toff);
        max17042_override_por(map, MAX17042_CGAIN, config->cgain);
        max17042_override_por(map, MAX17042_COFF, config->coff);
 
@@ -767,36 +763,36 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
        max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg);
        max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg);
        max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg);
-       max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
 
        max17042_override_por(map, MAX17042_FullCAP, config->fullcap);
        max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom);
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
-               max17042_override_por(map, MAX17042_SOC_empty,
-                                               config->socempty);
-       max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
        max17042_override_por(map, MAX17042_dQacc, config->dqacc);
        max17042_override_por(map, MAX17042_dPacc, config->dpacc);
 
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
-               max17042_override_por(map, MAX17042_V_empty, config->vempty);
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
-               max17042_override_por(map, MAX17055_V_empty, config->vempty);
-       else
-               max17042_override_por(map, MAX17047_V_empty, config->vempty);
-       max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
-       max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
-       max17042_override_por(map, MAX17042_FCTC, config->fctc);
        max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
        max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
-       if (chip->chip_type &&
-           ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
+
+       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) {
+               max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
+               max17042_override_por(map, MAX17042_SOC_empty, config->socempty);
+               max17042_override_por(map, MAX17042_V_empty, config->vempty);
+               max17042_override_por(map, MAX17042_EmptyTempCo, config->empty_tempco);
+               max17042_override_por(map, MAX17042_K_empty0, config->kempty0);
+       }
+
+       if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
            (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
-           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050))) {
-               max17042_override_por(map, MAX17042_EmptyTempCo,
-                                               config->empty_tempco);
-               max17042_override_por(map, MAX17042_K_empty0,
-                                               config->kempty0);
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)) {
+               max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
+               max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
+               max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
+               max17042_override_por(map, MAX17042_FCTC, config->fctc);
+       }
+
+       if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050) ||
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)) {
+               max17042_override_por(map, MAX17047_V_empty, config->vempty);
        }
 }
 
@@ -869,11 +865,14 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
 {
        struct max17042_chip *chip = dev;
        u32 val;
+       int ret;
 
-       regmap_read(chip->regmap, MAX17042_STATUS, &val);
-       if ((val & STATUS_INTR_SOCMIN_BIT) ||
-               (val & STATUS_INTR_SOCMAX_BIT)) {
-               dev_info(&chip->client->dev, "SOC threshold INTR\n");
+       ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
+       if (ret)
+               return IRQ_HANDLED;
+
+       if ((val & STATUS_SMN_BIT) || (val & STATUS_SMX_BIT)) {
+               dev_dbg(&chip->client->dev, "SOC threshold INTR\n");
                max17042_set_soc_threshold(chip, 1);
        }
 
@@ -1196,6 +1195,7 @@ static const struct of_device_id max17042_dt_match[] = {
        { .compatible = "maxim,max17047" },
        { .compatible = "maxim,max17050" },
        { .compatible = "maxim,max17055" },
+       { .compatible = "maxim,max77849-battery" },
        { },
 };
 MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1206,6 +1206,7 @@ static const struct i2c_device_id max17042_id[] = {
        { "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
        { "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
        { "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
+       { "max77849-battery", MAXIM_DEVICE_TYPE_MAX17047 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
new file mode 100644 (file)
index 0000000..3abaa72
--- /dev/null
@@ -0,0 +1,867 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/devm-helpers.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define MT6360_PMU_CHG_CTRL1   0x311
+#define MT6360_PMU_CHG_CTRL2   0x312
+#define MT6360_PMU_CHG_CTRL3   0x313
+#define MT6360_PMU_CHG_CTRL4   0x314
+#define MT6360_PMU_CHG_CTRL5   0x315
+#define MT6360_PMU_CHG_CTRL6   0x316
+#define MT6360_PMU_CHG_CTRL7   0x317
+#define MT6360_PMU_CHG_CTRL8   0x318
+#define MT6360_PMU_CHG_CTRL9   0x319
+#define MT6360_PMU_CHG_CTRL10  0x31A
+#define MT6360_PMU_DEVICE_TYPE 0x322
+#define MT6360_PMU_USB_STATUS1 0x327
+#define MT6360_PMU_CHG_STAT    0x34A
+#define MT6360_PMU_CHG_CTRL19  0x361
+#define MT6360_PMU_FOD_STAT    0x3E7
+
+/* MT6360_PMU_CHG_CTRL1 */
+#define MT6360_FSLP_SHFT       (3)
+#define MT6360_FSLP_MASK       BIT(MT6360_FSLP_SHFT)
+#define MT6360_OPA_MODE_SHFT   (0)
+#define MT6360_OPA_MODE_MASK   BIT(MT6360_OPA_MODE_SHFT)
+/* MT6360_PMU_CHG_CTRL2 */
+#define MT6360_IINLMTSEL_SHFT  (2)
+#define MT6360_IINLMTSEL_MASK  GENMASK(3, 2)
+/* MT6360_PMU_CHG_CTRL3 */
+#define MT6360_IAICR_SHFT      (2)
+#define MT6360_IAICR_MASK      GENMASK(7, 2)
+#define MT6360_ILIM_EN_MASK    BIT(0)
+/* MT6360_PMU_CHG_CTRL4 */
+#define MT6360_VOREG_SHFT      (1)
+#define MT6360_VOREG_MASK      GENMASK(7, 1)
+/* MT6360_PMU_CHG_CTRL5 */
+#define MT6360_VOBST_MASK      GENMASK(7, 2)
+/* MT6360_PMU_CHG_CTRL6 */
+#define MT6360_VMIVR_SHFT      (1)
+#define MT6360_VMIVR_MASK      GENMASK(7, 1)
+/* MT6360_PMU_CHG_CTRL7 */
+#define MT6360_ICHG_SHFT       (2)
+#define MT6360_ICHG_MASK       GENMASK(7, 2)
+/* MT6360_PMU_CHG_CTRL8 */
+#define MT6360_IPREC_SHFT      (0)
+#define MT6360_IPREC_MASK      GENMASK(3, 0)
+/* MT6360_PMU_CHG_CTRL9 */
+#define MT6360_IEOC_SHFT       (4)
+#define MT6360_IEOC_MASK       GENMASK(7, 4)
+/* MT6360_PMU_CHG_CTRL10 */
+#define MT6360_OTG_OC_MASK     GENMASK(3, 0)
+/* MT6360_PMU_DEVICE_TYPE */
+#define MT6360_USBCHGEN_MASK   BIT(7)
+/* MT6360_PMU_USB_STATUS1 */
+#define MT6360_USB_STATUS_SHFT (4)
+#define MT6360_USB_STATUS_MASK GENMASK(6, 4)
+/* MT6360_PMU_CHG_STAT */
+#define MT6360_CHG_STAT_SHFT   (6)
+#define MT6360_CHG_STAT_MASK   GENMASK(7, 6)
+#define MT6360_VBAT_LVL_MASK   BIT(5)
+/* MT6360_PMU_CHG_CTRL19 */
+#define MT6360_VINOVP_SHFT     (5)
+#define MT6360_VINOVP_MASK     GENMASK(6, 5)
+/* MT6360_PMU_FOD_STAT */
+#define MT6360_CHRDET_EXT_MASK BIT(4)
+
+/* uV */
+#define MT6360_VMIVR_MIN       3900000
+#define MT6360_VMIVR_MAX       13400000
+#define MT6360_VMIVR_STEP      100000
+/* uA */
+#define MT6360_ICHG_MIN                100000
+#define MT6360_ICHG_MAX                5000000
+#define MT6360_ICHG_STEP       100000
+/* uV */
+#define MT6360_VOREG_MIN       3900000
+#define MT6360_VOREG_MAX       4710000
+#define MT6360_VOREG_STEP      10000
+/* uA */
+#define MT6360_AICR_MIN                100000
+#define MT6360_AICR_MAX                3250000
+#define MT6360_AICR_STEP       50000
+/* uA */
+#define MT6360_IPREC_MIN       100000
+#define MT6360_IPREC_MAX       850000
+#define MT6360_IPREC_STEP      50000
+/* uA */
+#define MT6360_IEOC_MIN                100000
+#define MT6360_IEOC_MAX                850000
+#define MT6360_IEOC_STEP       50000
+
+enum {
+       MT6360_RANGE_VMIVR,
+       MT6360_RANGE_ICHG,
+       MT6360_RANGE_VOREG,
+       MT6360_RANGE_AICR,
+       MT6360_RANGE_IPREC,
+       MT6360_RANGE_IEOC,
+       MT6360_RANGE_MAX,
+};
+
+#define MT6360_LINEAR_RANGE(idx, _min, _min_sel, _max_sel, _step) \
+       [idx] = REGULATOR_LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
+static const struct linear_range mt6360_chg_range[MT6360_RANGE_MAX] = {
+       MT6360_LINEAR_RANGE(MT6360_RANGE_VMIVR, 3900000, 0, 0x5F, 100000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_ICHG, 100000, 0, 0x31, 100000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_VOREG, 3900000, 0, 0x51, 10000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_AICR, 100000, 0, 0x3F, 50000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_IPREC, 100000, 0, 0x0F, 50000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_IEOC, 100000, 0, 0x0F, 50000),
+};
+
+struct mt6360_chg_info {
+       struct device *dev;
+       struct regmap *regmap;
+       struct power_supply_desc psy_desc;
+       struct power_supply *psy;
+       struct regulator_dev *otg_rdev;
+       struct mutex chgdet_lock;
+       u32 vinovp;
+       bool pwr_rdy;
+       bool bc12_en;
+       int psy_usb_type;
+       struct work_struct chrdet_work;
+};
+
+enum mt6360_iinlmtsel {
+       MT6360_IINLMTSEL_AICR_3250 = 0,
+       MT6360_IINLMTSEL_CHG_TYPE,
+       MT6360_IINLMTSEL_AICR,
+       MT6360_IINLMTSEL_LOWER_LEVEL,
+};
+
+enum mt6360_pmu_chg_type {
+       MT6360_CHG_TYPE_NOVBUS = 0,
+       MT6360_CHG_TYPE_UNDER_GOING,
+       MT6360_CHG_TYPE_SDP,
+       MT6360_CHG_TYPE_SDPNSTD,
+       MT6360_CHG_TYPE_DCP,
+       MT6360_CHG_TYPE_CDP,
+       MT6360_CHG_TYPE_DISABLE_BC12,
+       MT6360_CHG_TYPE_MAX,
+};
+
+static enum power_supply_usb_type mt6360_charger_usb_types[] = {
+       POWER_SUPPLY_USB_TYPE_UNKNOWN,
+       POWER_SUPPLY_USB_TYPE_SDP,
+       POWER_SUPPLY_USB_TYPE_DCP,
+       POWER_SUPPLY_USB_TYPE_CDP,
+};
+
+static int mt6360_get_chrdet_ext_stat(struct mt6360_chg_info *mci,
+                                            bool *pwr_rdy)
+{
+       int ret;
+       unsigned int regval;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_FOD_STAT, &regval);
+       if (ret < 0)
+               return ret;
+       *pwr_rdy = (regval & MT6360_CHRDET_EXT_MASK) ? true : false;
+       return 0;
+}
+
+static int mt6360_charger_get_online(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       int ret;
+       bool pwr_rdy;
+
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               return ret;
+       val->intval = pwr_rdy ? true : false;
+       return 0;
+}
+
+static int mt6360_charger_get_status(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       int status, ret;
+       unsigned int regval;
+       bool pwr_rdy;
+
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               return ret;
+       if (!pwr_rdy) {
+               status = POWER_SUPPLY_STATUS_DISCHARGING;
+               goto out;
+       }
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_STAT, &regval);
+       if (ret < 0)
+               return ret;
+       regval &= MT6360_CHG_STAT_MASK;
+       regval >>= MT6360_CHG_STAT_SHFT;
+       switch (regval) {
+       case 0x0:
+               status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               break;
+       case 0x1:
+               status = POWER_SUPPLY_STATUS_CHARGING;
+               break;
+       case 0x2:
+               status = POWER_SUPPLY_STATUS_FULL;
+               break;
+       default:
+               ret = -EIO;
+       }
+out:
+       if (!ret)
+               val->intval = status;
+       return ret;
+}
+
+static int mt6360_charger_get_charge_type(struct mt6360_chg_info *mci,
+                                         union power_supply_propval *val)
+{
+       int type, ret;
+       unsigned int regval;
+       u8 chg_stat;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_STAT, &regval);
+       if (ret < 0)
+               return ret;
+
+       chg_stat = (regval & MT6360_CHG_STAT_MASK) >> MT6360_CHG_STAT_SHFT;
+       switch (chg_stat) {
+       case 0x01: /* Charge in Progress */
+               if (regval & MT6360_VBAT_LVL_MASK)
+                       type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+               else
+                       type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+               break;
+       case 0x00: /* Not Charging */
+       case 0x02: /* Charge Done */
+       case 0x03: /* Charge Fault */
+       default:
+               type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       }
+
+       val->intval = type;
+       return 0;
+}
+
+static int mt6360_charger_get_ichg(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL7, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_ICHG_MASK) >> MT6360_ICHG_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_ICHG], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_max_ichg(struct mt6360_chg_info *mci,
+                                      union power_supply_propval *val)
+{
+       val->intval = MT6360_ICHG_MAX;
+       return 0;
+}
+
+static int mt6360_charger_get_cv(struct mt6360_chg_info *mci,
+                                union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL4, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_VOREG_MASK) >> MT6360_VOREG_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_VOREG], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_max_cv(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       val->intval = MT6360_VOREG_MAX;
+       return 0;
+}
+
+static int mt6360_charger_get_aicr(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL3, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IAICR_MASK) >> MT6360_IAICR_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_AICR], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_mivr(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL6, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_VMIVR_MASK) >> MT6360_VMIVR_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_VMIVR], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_iprechg(struct mt6360_chg_info *mci,
+                                     union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL8, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IPREC_MASK) >> MT6360_IPREC_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_IPREC], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_ieoc(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL9, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IEOC_MASK) >> MT6360_IEOC_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_IEOC], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_set_online(struct mt6360_chg_info *mci,
+                                    const union power_supply_propval *val)
+{
+       u8 force_sleep = val->intval ? 0 : 1;
+
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL1,
+                                 MT6360_FSLP_MASK,
+                                 force_sleep << MT6360_FSLP_SHFT);
+}
+
+static int mt6360_charger_set_ichg(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_ICHG], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL7,
+                                 MT6360_ICHG_MASK,
+                                 sel << MT6360_ICHG_SHFT);
+}
+
+static int mt6360_charger_set_cv(struct mt6360_chg_info *mci,
+                                const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_VOREG], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL4,
+                                 MT6360_VOREG_MASK,
+                                 sel << MT6360_VOREG_SHFT);
+}
+
+static int mt6360_charger_set_aicr(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_AICR], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL3,
+                                 MT6360_IAICR_MASK,
+                                 sel << MT6360_IAICR_SHFT);
+}
+
+static int mt6360_charger_set_mivr(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_VMIVR], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL3,
+                                 MT6360_VMIVR_MASK,
+                                 sel << MT6360_VMIVR_SHFT);
+}
+
+static int mt6360_charger_set_iprechg(struct mt6360_chg_info *mci,
+                                     const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_IPREC], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL8,
+                                 MT6360_IPREC_MASK,
+                                 sel << MT6360_IPREC_SHFT);
+}
+
+static int mt6360_charger_set_ieoc(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_IEOC], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL9,
+                                 MT6360_IEOC_MASK,
+                                 sel << MT6360_IEOC_SHFT);
+}
+
+static int mt6360_charger_get_property(struct power_supply *psy,
+                                      enum power_supply_property psp,
+                                      union power_supply_propval *val)
+{
+       struct mt6360_chg_info *mci = power_supply_get_drvdata(psy);
+       int ret = 0;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               ret = mt6360_charger_get_online(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_STATUS:
+               ret = mt6360_charger_get_status(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               ret = mt6360_charger_get_charge_type(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+               ret = mt6360_charger_get_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+               ret = mt6360_charger_get_max_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+               ret = mt6360_charger_get_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+               ret = mt6360_charger_get_max_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+               ret = mt6360_charger_get_aicr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+               ret = mt6360_charger_get_mivr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               ret = mt6360_charger_get_iprechg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               ret = mt6360_charger_get_ieoc(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_USB_TYPE:
+               val->intval = mci->psy_usb_type;
+               break;
+       default:
+               ret = -ENODATA;
+       }
+       return ret;
+}
+
+static int mt6360_charger_set_property(struct power_supply *psy,
+                                      enum power_supply_property psp,
+                                      const union power_supply_propval *val)
+{
+       struct mt6360_chg_info *mci = power_supply_get_drvdata(psy);
+       int ret;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               ret = mt6360_charger_set_online(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+               ret = mt6360_charger_set_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+               ret = mt6360_charger_set_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+               ret = mt6360_charger_set_aicr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+               ret = mt6360_charger_set_mivr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               ret = mt6360_charger_set_iprechg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               ret = mt6360_charger_set_ieoc(mci, val);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
+static int mt6360_charger_property_is_writeable(struct power_supply *psy,
+                                              enum power_supply_property psp)
+{
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static enum power_supply_property mt6360_charger_properties[] = {
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_CHARGE_TYPE,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+       POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+       POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+       POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+       POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static const struct power_supply_desc mt6360_charger_desc = {
+       .type                   = POWER_SUPPLY_TYPE_USB,
+       .properties             = mt6360_charger_properties,
+       .num_properties         = ARRAY_SIZE(mt6360_charger_properties),
+       .get_property           = mt6360_charger_get_property,
+       .set_property           = mt6360_charger_set_property,
+       .property_is_writeable  = mt6360_charger_property_is_writeable,
+       .usb_types              = mt6360_charger_usb_types,
+       .num_usb_types          = ARRAY_SIZE(mt6360_charger_usb_types),
+};
+
+static const struct regulator_ops mt6360_chg_otg_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_desc mt6360_otg_rdesc = {
+       .of_match = "usb-otg-vbus",
+       .name = "usb-otg-vbus",
+       .ops = &mt6360_chg_otg_ops,
+       .owner = THIS_MODULE,
+       .type = REGULATOR_VOLTAGE,
+       .min_uV = 4425000,
+       .uV_step = 25000,
+       .n_voltages = 57,
+       .vsel_reg = MT6360_PMU_CHG_CTRL5,
+       .vsel_mask = MT6360_VOBST_MASK,
+       .enable_reg = MT6360_PMU_CHG_CTRL1,
+       .enable_mask = MT6360_OPA_MODE_MASK,
+};
+
+static irqreturn_t mt6360_pmu_attach_i_handler(int irq, void *data)
+{
+       struct mt6360_chg_info *mci = data;
+       int ret;
+       unsigned int usb_status;
+       int last_usb_type;
+
+       mutex_lock(&mci->chgdet_lock);
+       if (!mci->bc12_en) {
+               dev_warn(mci->dev, "Received attach interrupt, bc12 disabled, ignore irq\n");
+               goto out;
+       }
+       last_usb_type = mci->psy_usb_type;
+       /* Plug in */
+       ret = regmap_read(mci->regmap, MT6360_PMU_USB_STATUS1, &usb_status);
+       if (ret < 0)
+               goto out;
+       usb_status &= MT6360_USB_STATUS_MASK;
+       usb_status >>= MT6360_USB_STATUS_SHFT;
+       switch (usb_status) {
+       case MT6360_CHG_TYPE_NOVBUS:
+               dev_dbg(mci->dev, "Received attach interrupt, no vbus\n");
+               goto out;
+       case MT6360_CHG_TYPE_UNDER_GOING:
+               dev_dbg(mci->dev, "Received attach interrupt, under going...\n");
+               goto out;
+       case MT6360_CHG_TYPE_SDP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+               break;
+       case MT6360_CHG_TYPE_SDPNSTD:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+               break;
+       case MT6360_CHG_TYPE_CDP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+               break;
+       case MT6360_CHG_TYPE_DCP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+               break;
+       case MT6360_CHG_TYPE_DISABLE_BC12:
+               dev_dbg(mci->dev, "Received attach interrupt, bc12 detect not enable\n");
+               goto out;
+       default:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+               dev_dbg(mci->dev, "Received attach interrupt, reserved address\n");
+               goto out;
+       }
+
+       dev_dbg(mci->dev, "Received attach interrupt, chg_type = %d\n", mci->psy_usb_type);
+       if (last_usb_type != mci->psy_usb_type)
+               power_supply_changed(mci->psy);
+out:
+       mutex_unlock(&mci->chgdet_lock);
+       return IRQ_HANDLED;
+}
+
+static void mt6360_handle_chrdet_ext_evt(struct mt6360_chg_info *mci)
+{
+       int ret;
+       bool pwr_rdy;
+
+       mutex_lock(&mci->chgdet_lock);
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               goto out;
+       if (mci->pwr_rdy == pwr_rdy) {
+               dev_dbg(mci->dev, "Received vbus interrupt, pwr_rdy is same(%d)\n", pwr_rdy);
+               goto out;
+       }
+       mci->pwr_rdy = pwr_rdy;
+       dev_dbg(mci->dev, "Received vbus interrupt, pwr_rdy = %d\n", pwr_rdy);
+       if (!pwr_rdy) {
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+               power_supply_changed(mci->psy);
+
+       }
+       ret = regmap_update_bits(mci->regmap,
+                                MT6360_PMU_DEVICE_TYPE,
+                                MT6360_USBCHGEN_MASK,
+                                pwr_rdy ? MT6360_USBCHGEN_MASK : 0);
+       if (ret < 0)
+               goto out;
+       mci->bc12_en = pwr_rdy;
+out:
+       mutex_unlock(&mci->chgdet_lock);
+}
+
+static void mt6360_chrdet_work(struct work_struct *work)
+{
+       struct mt6360_chg_info *mci = (struct mt6360_chg_info *)container_of(
+                                    work, struct mt6360_chg_info, chrdet_work);
+
+       mt6360_handle_chrdet_ext_evt(mci);
+}
+
+static irqreturn_t mt6360_pmu_chrdet_ext_evt_handler(int irq, void *data)
+{
+       struct mt6360_chg_info *mci = data;
+
+       mt6360_handle_chrdet_ext_evt(mci);
+       return IRQ_HANDLED;
+}
+
+static int mt6360_chg_irq_register(struct platform_device *pdev)
+{
+       const struct {
+               const char *name;
+               irq_handler_t handler;
+       } irq_descs[] = {
+               { "attach_i", mt6360_pmu_attach_i_handler },
+               { "chrdet_ext_evt", mt6360_pmu_chrdet_ext_evt_handler }
+       };
+       int i, ret;
+
+       for (i = 0; i < ARRAY_SIZE(irq_descs); i++) {
+               ret = platform_get_irq_byname(pdev, irq_descs[i].name);
+               if (ret < 0)
+                       return ret;
+
+               ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
+                                               irq_descs[i].handler,
+                                               IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                               irq_descs[i].name,
+                                               platform_get_drvdata(pdev));
+               if (ret < 0)
+                       return dev_err_probe(&pdev->dev, ret, "Failed to request %s irq\n",
+                                            irq_descs[i].name);
+       }
+
+       return 0;
+}
+
+static u32 mt6360_vinovp_trans_to_sel(u32 val)
+{
+       u32 vinovp_tbl[] = { 5500000, 6500000, 11000000, 14500000 };
+       int i;
+
+       /* Select the smaller and equal supported value */
+       for (i = 0; i < ARRAY_SIZE(vinovp_tbl)-1; i++) {
+               if (val < vinovp_tbl[i+1])
+                       break;
+       }
+       return i;
+}
+
+static int mt6360_chg_init_setting(struct mt6360_chg_info *mci)
+{
+       int ret;
+       u32 sel;
+
+       sel = mt6360_vinovp_trans_to_sel(mci->vinovp);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL19,
+                                 MT6360_VINOVP_MASK, sel << MT6360_VINOVP_SHFT);
+       if (ret)
+               return dev_err_probe(mci->dev, ret, "%s: Failed to apply vinovp\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_DEVICE_TYPE,
+                                MT6360_USBCHGEN_MASK, 0);
+       if (ret)
+               return dev_err_probe(mci->dev, ret, "%s: Failed to disable bc12\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL2,
+                                MT6360_IINLMTSEL_MASK,
+                                MT6360_IINLMTSEL_AICR <<
+                                       MT6360_IINLMTSEL_SHFT);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to switch iinlmtsel to aicr\n", __func__);
+       usleep_range(5000, 6000);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL3,
+                                MT6360_ILIM_EN_MASK, 0);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to disable ilim\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL10,
+                                MT6360_OTG_OC_MASK, MT6360_OTG_OC_MASK);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to config otg oc to 3A\n", __func__);
+       return 0;
+}
+
+static int mt6360_charger_probe(struct platform_device *pdev)
+{
+       struct mt6360_chg_info *mci;
+       struct power_supply_config charger_cfg = {};
+       struct regulator_config config = { };
+       int ret;
+
+       mci = devm_kzalloc(&pdev->dev, sizeof(*mci), GFP_KERNEL);
+       if (!mci)
+               return -ENOMEM;
+
+       mci->dev = &pdev->dev;
+       mci->vinovp = 6500000;
+       mutex_init(&mci->chgdet_lock);
+       platform_set_drvdata(pdev, mci);
+       devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+
+       ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp);
+       if (ret)
+               dev_warn(&pdev->dev, "Failed to parse vinovp in DT, keep default 6.5v\n");
+
+       mci->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!mci->regmap)
+               return dev_err_probe(&pdev->dev, -ENODEV, "Failed to get parent regmap\n");
+
+       ret = mt6360_chg_init_setting(mci);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to initial setting\n");
+
+       memcpy(&mci->psy_desc, &mt6360_charger_desc, sizeof(mci->psy_desc));
+       mci->psy_desc.name = dev_name(&pdev->dev);
+       charger_cfg.drv_data = mci;
+       charger_cfg.of_node = pdev->dev.of_node;
+       mci->psy = devm_power_supply_register(&pdev->dev,
+                                             &mci->psy_desc, &charger_cfg);
+       if (IS_ERR(mci->psy))
+               return dev_err_probe(&pdev->dev, PTR_ERR(mci->psy),
+                                    "Failed to register power supply dev\n");
+
+
+       ret = mt6360_chg_irq_register(pdev);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to register irqs\n");
+
+       config.dev = &pdev->dev;
+       config.regmap = mci->regmap;
+       mci->otg_rdev = devm_regulator_register(&pdev->dev, &mt6360_otg_rdesc,
+                                               &config);
+       if (IS_ERR(mci->otg_rdev))
+               return PTR_ERR(mci->otg_rdev);
+
+       schedule_work(&mci->chrdet_work);
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused mt6360_charger_of_id[] = {
+       { .compatible = "mediatek,mt6360-chg", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mt6360_charger_of_id);
+
+static const struct platform_device_id mt6360_charger_id[] = {
+       { "mt6360-chg", 0 },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, mt6360_charger_id);
+
+static struct platform_driver mt6360_charger_driver = {
+       .driver = {
+               .name = "mt6360-chg",
+               .of_match_table = of_match_ptr(mt6360_charger_of_id),
+       },
+       .probe = mt6360_charger_probe,
+       .id_table = mt6360_charger_id,
+};
+module_platform_driver(mt6360_charger_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 Charger Driver");
+MODULE_LICENSE("GPL");
index d99e2f1..0c2132c 100644 (file)
@@ -571,6 +571,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
        int err, len, index;
        const __be32 *list;
 
+       info->technology                     = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
        info->energy_full_design_uwh         = -EINVAL;
        info->charge_full_design_uah         = -EINVAL;
        info->voltage_min_design_uv          = -EINVAL;
@@ -618,6 +619,24 @@ int power_supply_get_battery_info(struct power_supply *psy,
         * Documentation/power/power_supply_class.rst.
         */
 
+       if (!of_property_read_string(battery_np, "device-chemistry", &value)) {
+               if (!strcmp("nickel-cadmium", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_NiCd;
+               else if (!strcmp("nickel-metal-hydride", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_NiMH;
+               else if (!strcmp("lithium-ion", value))
+                       /* Imprecise lithium-ion type */
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LION;
+               else if (!strcmp("lithium-ion-polymer", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LIPO;
+               else if (!strcmp("lithium-ion-iron-phosphate", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LiFe;
+               else if (!strcmp("lithium-ion-manganese-oxide", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LiMn;
+               else
+                       dev_warn(&psy->dev, "%s unknown battery type\n", value);
+       }
+
        of_property_read_u32(battery_np, "energy-full-design-microwatt-hours",
                             &info->energy_full_design_uwh);
        of_property_read_u32(battery_np, "charge-full-design-microamp-hours",
index c890e1c..84cc9fb 100644 (file)
@@ -929,11 +929,8 @@ static int smbb_charger_probe(struct platform_device *pdev)
                int irq;
 
                irq = platform_get_irq_byname(pdev, smbb_charger_irqs[i].name);
-               if (irq < 0) {
-                       dev_err(&pdev->dev, "failed to get irq '%s'\n",
-                               smbb_charger_irqs[i].name);
+               if (irq < 0)
                        return irq;
-               }
 
                smbb_charger_irqs[i].handler(irq, chg);
 
index 8190619..a5e09ac 100644 (file)
@@ -9,10 +9,12 @@
 #include <linux/device.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
+#include <linux/iio/consumer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mfd/rn5t618.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
@@ -64,6 +66,8 @@ struct rn5t618_power_info {
        struct power_supply *battery;
        struct power_supply *usb;
        struct power_supply *adp;
+       struct iio_channel *channel_vusb;
+       struct iio_channel *channel_vadp;
        int irq;
 };
 
@@ -77,6 +81,7 @@ static enum power_supply_usb_type rn5t618_usb_types[] = {
 static enum power_supply_property rn5t618_usb_props[] = {
        /* input current limit is not very accurate */
        POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_USB_TYPE,
        POWER_SUPPLY_PROP_ONLINE,
@@ -85,6 +90,7 @@ static enum power_supply_property rn5t618_usb_props[] = {
 static enum power_supply_property rn5t618_adp_props[] = {
        /* input current limit is not very accurate */
        POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_ONLINE,
 };
@@ -463,6 +469,15 @@ static int rn5t618_adp_get_property(struct power_supply *psy,
                        return ret;
 
                val->intval = FROM_CUR_REG(regval);
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               if (!info->channel_vadp)
+                       return -ENODATA;
+
+               ret = iio_read_channel_processed_scale(info->channel_vadp, &val->intval, 1000);
+               if (ret < 0)
+                       return ret;
+
                break;
        default:
                return -EINVAL;
@@ -588,6 +603,15 @@ static int rn5t618_usb_get_property(struct power_supply *psy,
 
                        val->intval = FROM_CUR_REG(regval);
                }
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               if (!info->channel_vusb)
+                       return -ENODATA;
+
+               ret = iio_read_channel_processed_scale(info->channel_vusb, &val->intval, 1000);
+               if (ret < 0)
+                       return ret;
+
                break;
        default:
                return -EINVAL;
@@ -711,6 +735,20 @@ static int rn5t618_power_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, info);
 
+       info->channel_vusb = devm_iio_channel_get(&pdev->dev, "vusb");
+       if (IS_ERR(info->channel_vusb)) {
+               if (PTR_ERR(info->channel_vusb) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(info->channel_vusb);
+       }
+
+       info->channel_vadp = devm_iio_channel_get(&pdev->dev, "vadp");
+       if (IS_ERR(info->channel_vadp)) {
+               if (PTR_ERR(info->channel_vadp) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(info->channel_vadp);
+       }
+
        ret = regmap_read(info->rn5t618->regmap, RN5T618_CONTROL, &v);
        if (ret)
                return ret;
index f84dbaa..c4a95b0 100644 (file)
@@ -31,8 +31,9 @@ enum {
        REG_CURRENT_AVG,
        REG_MAX_ERR,
        REG_CAPACITY,
-       REG_TIME_TO_EMPTY,
-       REG_TIME_TO_FULL,
+       REG_TIME_TO_EMPTY_NOW,
+       REG_TIME_TO_EMPTY_AVG,
+       REG_TIME_TO_FULL_AVG,
        REG_STATUS,
        REG_CAPACITY_LEVEL,
        REG_CYCLE_COUNT,
@@ -102,7 +103,7 @@ static const struct chip_data {
        [REG_TEMPERATURE] =
                SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
        [REG_VOLTAGE] =
-               SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
+               SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 65535),
        [REG_CURRENT_NOW] =
                SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
        [REG_CURRENT_AVG] =
@@ -119,9 +120,11 @@ static const struct chip_data {
                SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
        [REG_FULL_CHARGE_CAPACITY_CHARGE] =
                SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
-       [REG_TIME_TO_EMPTY] =
+       [REG_TIME_TO_EMPTY_NOW] =
+               SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, 0x11, 0, 65535),
+       [REG_TIME_TO_EMPTY_AVG] =
                SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535),
-       [REG_TIME_TO_FULL] =
+       [REG_TIME_TO_FULL_AVG] =
                SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535),
        [REG_CHARGE_CURRENT] =
                SBS_DATA(POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, 0x14, 0, 65535),
@@ -165,6 +168,7 @@ static const enum power_supply_property sbs_properties[] = {
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN,
        POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
        POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
        POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -748,6 +752,7 @@ static void  sbs_unit_adjustment(struct i2c_client *client,
                val->intval -= TEMP_KELVIN_TO_CELSIUS;
                break;
 
+       case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
        case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
                /* sbs provides time to empty and time to full in minutes.
@@ -966,6 +971,7 @@ static int sbs_get_property(struct power_supply *psy,
        case POWER_SUPPLY_PROP_CURRENT_NOW:
        case POWER_SUPPLY_PROP_CURRENT_AVG:
        case POWER_SUPPLY_PROP_TEMP:
+       case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
        case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
index 1ae8374..ae45069 100644 (file)
@@ -1229,10 +1229,8 @@ static int sc27xx_fgu_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "no irq resource specified\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_threaded_irq(data->dev, irq, NULL,
                                        sc27xx_fgu_interrupt,
index df24042..753944e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/power_supply.h>
 #include <linux/property.h>
 #include <linux/regmap.h>
+#include <linux/regulator/driver.h>
 
 #include <dt-bindings/power/summit,smb347-charger.h>
 
@@ -55,6 +56,7 @@
 #define CFG_PIN_EN_CTRL_ACTIVE_LOW             0x60
 #define CFG_PIN_EN_APSD_IRQ                    BIT(1)
 #define CFG_PIN_EN_CHARGER_ERROR               BIT(2)
+#define CFG_PIN_EN_CTRL                                BIT(4)
 #define CFG_THERM                              0x07
 #define CFG_THERM_SOFT_HOT_COMPENSATION_MASK   0x03
 #define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT  0
 #define CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT 2
 #define CFG_THERM_MONITOR_DISABLED             BIT(4)
 #define CFG_SYSOK                              0x08
+#define CFG_SYSOK_INOK_ACTIVE_HIGH             BIT(0)
 #define CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED  BIT(2)
 #define CFG_OTHER                              0x09
 #define CFG_OTHER_RID_MASK                     0xc0
 #define CFG_OTHER_RID_ENABLED_AUTO_OTG         0xc0
 #define CFG_OTG                                        0x0a
 #define CFG_OTG_TEMP_THRESHOLD_MASK            0x30
+#define CFG_OTG_CURRENT_LIMIT_250mA            BIT(2)
+#define CFG_OTG_CURRENT_LIMIT_750mA            BIT(3)
 #define CFG_OTG_TEMP_THRESHOLD_SHIFT           4
 #define CFG_OTG_CC_COMPENSATION_MASK           0xc0
 #define CFG_OTG_CC_COMPENSATION_SHIFT          6
@@ -91,6 +96,7 @@
 #define CMD_A                                  0x30
 #define CMD_A_CHG_ENABLED                      BIT(1)
 #define CMD_A_SUSPEND_ENABLED                  BIT(2)
+#define CMD_A_OTG_ENABLED                      BIT(4)
 #define CMD_A_ALLOW_WRITE                      BIT(7)
 #define CMD_B                                  0x31
 #define CMD_C                                  0x33
  * @regmap: pointer to driver regmap
  * @mains: power_supply instance for AC/DC power
  * @usb: power_supply instance for USB power
+ * @usb_rdev: USB VBUS regulator device
  * @id: SMB charger ID
  * @mains_online: is AC/DC input connected
  * @usb_online: is USB input connected
- * @charging_enabled: is charging enabled
  * @irq_unsupported: is interrupt unsupported by SMB hardware
+ * @usb_vbus_enabled: is USB VBUS powered by SMB charger
  * @max_charge_current: maximum current (in uA) the battery can be charged
  * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
  * @pre_charge_current: current (in uA) to use in pre-charging phase
  * @use_usb_otg: USB OTG output can be used (not implemented yet)
  * @enable_control: how charging enable/disable is controlled
  *                 (driver/pin controls)
+ * @inok_polarity: polarity of INOK signal which denotes presence of external
+ *                power supply
  *
  * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
  * hardware support for these. This is useful when we want to have for
@@ -189,11 +198,12 @@ struct smb347_charger {
        struct regmap           *regmap;
        struct power_supply     *mains;
        struct power_supply     *usb;
+       struct regulator_dev    *usb_rdev;
        unsigned int            id;
        bool                    mains_online;
        bool                    usb_online;
-       bool                    charging_enabled;
        bool                    irq_unsupported;
+       bool                    usb_vbus_enabled;
 
        unsigned int            max_charge_current;
        unsigned int            max_charge_voltage;
@@ -214,6 +224,7 @@ struct smb347_charger {
        bool                    use_usb;
        bool                    use_usb_otg;
        unsigned int            enable_control;
+       unsigned int            inok_polarity;
 };
 
 enum smb_charger_chipid {
@@ -358,21 +369,18 @@ static int smb347_charging_status(struct smb347_charger *smb)
 
 static int smb347_charging_set(struct smb347_charger *smb, bool enable)
 {
-       int ret = 0;
-
        if (smb->enable_control != SMB3XX_CHG_ENABLE_SW) {
                dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
                return 0;
        }
 
-       if (smb->charging_enabled != enable) {
-               ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
-                                        enable ? CMD_A_CHG_ENABLED : 0);
-               if (!ret)
-                       smb->charging_enabled = enable;
+       if (enable && smb->usb_vbus_enabled) {
+               dev_dbg(smb->dev, "charging not enabled because USB is in host mode\n");
+               return 0;
        }
 
-       return ret;
+       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+                                 enable ? CMD_A_CHG_ENABLED : 0);
 }
 
 static inline int smb347_charging_enable(struct smb347_charger *smb)
@@ -671,10 +679,22 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
  *
  * Returns %0 on success and negative errno in case of failure.
  */
-static int smb347_set_writable(struct smb347_charger *smb, bool writable)
+static int smb347_set_writable(struct smb347_charger *smb, bool writable,
+                              bool irq_toggle)
 {
-       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
-                                 writable ? CMD_A_ALLOW_WRITE : 0);
+       struct i2c_client *client = to_i2c_client(smb->dev);
+       int ret;
+
+       if (writable && irq_toggle && !smb->irq_unsupported)
+               disable_irq(client->irq);
+
+       ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+                                writable ? CMD_A_ALLOW_WRITE : 0);
+
+       if ((!writable || ret) && irq_toggle && !smb->irq_unsupported)
+               enable_irq(client->irq);
+
+       return ret;
 }
 
 static int smb347_hw_init(struct smb347_charger *smb)
@@ -682,7 +702,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
        unsigned int val;
        int ret;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, false);
        if (ret < 0)
                return ret;
 
@@ -724,6 +744,15 @@ static int smb347_hw_init(struct smb347_charger *smb)
        if (ret < 0)
                goto fail;
 
+       /* Activate pin control, making it writable. */
+       switch (smb->enable_control) {
+       case SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW:
+       case SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH:
+               ret = regmap_set_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL);
+               if (ret < 0)
+                       goto fail;
+       }
+
        /*
         * Make the charging functionality controllable by a write to the
         * command register unless pin control is specified in the platform
@@ -758,7 +787,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
        ret = smb347_start_stop_charging(smb);
 
 fail:
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, false);
        return ret;
 }
 
@@ -866,7 +895,7 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
        if (smb->irq_unsupported)
                return 0;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, true);
        if (ret < 0)
                return ret;
 
@@ -891,7 +920,7 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
        ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
                                 enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
 fail:
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, true);
        return ret;
 }
 
@@ -919,7 +948,7 @@ static int smb347_irq_init(struct smb347_charger *smb,
        if (!client->irq)
                return 0;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, false);
        if (ret < 0)
                return ret;
 
@@ -931,7 +960,7 @@ static int smb347_irq_init(struct smb347_charger *smb,
                                 CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
                                 CFG_STAT_DISABLED);
 
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, false);
 
        if (ret < 0) {
                dev_warn(smb->dev, "failed to initialize IRQ: %d\n", ret);
@@ -1241,6 +1270,13 @@ static void smb347_dt_parse_dev_info(struct smb347_charger *smb)
        /* Select charging control */
        device_property_read_u32(dev, "summit,enable-charge-control",
                                 &smb->enable_control);
+
+       /*
+        * Polarity of INOK signal indicating presence of external power
+        * supply connected to the charger.
+        */
+       device_property_read_u32(dev, "summit,inok-polarity",
+                                &smb->inok_polarity);
 }
 
 static int smb347_get_battery_info(struct smb347_charger *smb)
@@ -1292,12 +1328,176 @@ static int smb347_get_battery_info(struct smb347_charger *smb)
        return 0;
 }
 
+static int smb347_usb_vbus_get_current_limit(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(smb->regmap, CFG_OTG, &val);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * It's unknown what happens if this bit is unset due to lack of
+        * access to the datasheet, assume it's limit-enable.
+        */
+       if (!(val & CFG_OTG_CURRENT_LIMIT_250mA))
+               return 0;
+
+       return val & CFG_OTG_CURRENT_LIMIT_750mA ? 750000 : 250000;
+}
+
+static int smb347_usb_vbus_set_new_current_limit(struct smb347_charger *smb,
+                                                int max_uA)
+{
+       const unsigned int mask = CFG_OTG_CURRENT_LIMIT_750mA |
+                                 CFG_OTG_CURRENT_LIMIT_250mA;
+       unsigned int val = CFG_OTG_CURRENT_LIMIT_250mA;
+       int ret;
+
+       if (max_uA >= 750000)
+               val |= CFG_OTG_CURRENT_LIMIT_750mA;
+
+       ret = regmap_update_bits(smb->regmap, CFG_OTG, mask, val);
+       if (ret < 0)
+               dev_err(smb->dev, "failed to change USB current limit\n");
+
+       return ret;
+}
+
+static int smb347_usb_vbus_set_current_limit(struct regulator_dev *rdev,
+                                            int min_uA, int max_uA)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, max_uA);
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
+static int smb347_usb_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret, max_uA;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       smb347_charging_disable(smb);
+
+       if (device_property_read_bool(&rdev->dev, "summit,needs-inok-toggle")) {
+               unsigned int sysok = 0;
+
+               if (smb->inok_polarity == SMB3XX_SYSOK_INOK_ACTIVE_LOW)
+                       sysok = CFG_SYSOK_INOK_ACTIVE_HIGH;
+
+               /*
+                * VBUS won't be powered if INOK is active, so we need to
+                * manually disable INOK on some platforms.
+                */
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                        CFG_SYSOK_INOK_ACTIVE_HIGH, sysok);
+               if (ret < 0) {
+                       dev_err(smb->dev, "failed to disable INOK\n");
+                       goto done;
+               }
+       }
+
+       ret = smb347_usb_vbus_get_current_limit(rdev);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to get USB VBUS current limit\n");
+               goto done;
+       }
+
+       max_uA = ret;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, 250000);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to preset USB VBUS current limit\n");
+               goto done;
+       }
+
+       ret = regmap_set_bits(smb->regmap, CMD_A, CMD_A_OTG_ENABLED);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to enable USB VBUS\n");
+               goto done;
+       }
+
+       smb->usb_vbus_enabled = true;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, max_uA);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to restore USB VBUS current limit\n");
+               goto done;
+       }
+done:
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
+static int smb347_usb_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_clear_bits(smb->regmap, CMD_A, CMD_A_OTG_ENABLED);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to disable USB VBUS\n");
+               goto done;
+       }
+
+       smb->usb_vbus_enabled = false;
+
+       if (device_property_read_bool(&rdev->dev, "summit,needs-inok-toggle")) {
+               unsigned int sysok = 0;
+
+               if (smb->inok_polarity == SMB3XX_SYSOK_INOK_ACTIVE_HIGH)
+                       sysok = CFG_SYSOK_INOK_ACTIVE_HIGH;
+
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                        CFG_SYSOK_INOK_ACTIVE_HIGH, sysok);
+               if (ret < 0) {
+                       dev_err(smb->dev, "failed to enable INOK\n");
+                       goto done;
+               }
+       }
+
+       smb347_start_stop_charging(smb);
+done:
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
 static const struct regmap_config smb347_regmap = {
        .reg_bits       = 8,
        .val_bits       = 8,
        .max_register   = SMB347_MAX_REGISTER,
        .volatile_reg   = smb347_volatile_reg,
        .readable_reg   = smb347_readable_reg,
+       .cache_type     = REGCACHE_FLAT,
+       .num_reg_defaults_raw = SMB347_MAX_REGISTER,
+};
+
+static const struct regulator_ops smb347_usb_vbus_regulator_ops = {
+       .is_enabled     = regulator_is_enabled_regmap,
+       .enable         = smb347_usb_vbus_regulator_enable,
+       .disable        = smb347_usb_vbus_regulator_disable,
+       .get_current_limit = smb347_usb_vbus_get_current_limit,
+       .set_current_limit = smb347_usb_vbus_set_current_limit,
 };
 
 static const struct power_supply_desc smb347_mains_desc = {
@@ -1316,10 +1516,24 @@ static const struct power_supply_desc smb347_usb_desc = {
        .num_properties = ARRAY_SIZE(smb347_properties),
 };
 
+static const struct regulator_desc smb347_usb_vbus_regulator_desc = {
+       .name           = "smb347-usb-vbus",
+       .of_match       = of_match_ptr("usb-vbus"),
+       .ops            = &smb347_usb_vbus_regulator_ops,
+       .type           = REGULATOR_VOLTAGE,
+       .owner          = THIS_MODULE,
+       .enable_reg     = CMD_A,
+       .enable_mask    = CMD_A_OTG_ENABLED,
+       .enable_val     = CMD_A_OTG_ENABLED,
+       .fixed_uV       = 5000000,
+       .n_voltages     = 1,
+};
+
 static int smb347_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct power_supply_config mains_usb_cfg = {};
+       struct regulator_config usb_rdev_cfg = {};
        struct device *dev = &client->dev;
        struct smb347_charger *smb;
        int ret;
@@ -1367,6 +1581,18 @@ static int smb347_probe(struct i2c_client *client,
        if (ret)
                return ret;
 
+       usb_rdev_cfg.dev = dev;
+       usb_rdev_cfg.driver_data = smb;
+       usb_rdev_cfg.regmap = smb->regmap;
+
+       smb->usb_rdev = devm_regulator_register(dev,
+                                               &smb347_usb_vbus_regulator_desc,
+                                               &usb_rdev_cfg);
+       if (IS_ERR(smb->usb_rdev)) {
+               smb347_irq_disable(smb);
+               return PTR_ERR(smb->usb_rdev);
+       }
+
        return 0;
 }
 
@@ -1374,11 +1600,17 @@ static int smb347_remove(struct i2c_client *client)
 {
        struct smb347_charger *smb = i2c_get_clientdata(client);
 
+       smb347_usb_vbus_regulator_disable(smb->usb_rdev);
        smb347_irq_disable(smb);
 
        return 0;
 }
 
+static void smb347_shutdown(struct i2c_client *client)
+{
+       smb347_remove(client);
+}
+
 static const struct i2c_device_id smb347_id[] = {
        { "smb345", SMB345 },
        { "smb347", SMB347 },
@@ -1402,6 +1634,7 @@ static struct i2c_driver smb347_driver = {
        },
        .probe = smb347_probe,
        .remove = smb347_remove,
+       .shutdown = smb347_shutdown,
        .id_table = smb347_id,
 };
 module_i2c_driver(smb347_driver);
index 73cf68a..7c0099e 100644 (file)
@@ -158,16 +158,16 @@ static int get_energy_counter(struct powercap_zone *power_zone,
        /* prevent CPU hotplug, make sure the RAPL domain does not go
         * away while reading the counter.
         */
-       get_online_cpus();
+       cpus_read_lock();
        rd = power_zone_to_rapl_domain(power_zone);
 
        if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
                *energy_raw = energy_now;
-               put_online_cpus();
+               cpus_read_unlock();
 
                return 0;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        return -EIO;
 }
@@ -216,11 +216,11 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
        if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
                return -EACCES;
 
-       get_online_cpus();
+       cpus_read_lock();
        rapl_write_data_raw(rd, PL1_ENABLE, mode);
        if (rapl_defaults->set_floor_freq)
                rapl_defaults->set_floor_freq(rd, mode);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 }
@@ -234,13 +234,13 @@ static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
                *mode = false;
                return 0;
        }
-       get_online_cpus();
+       cpus_read_lock();
        if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
-               put_online_cpus();
+               cpus_read_unlock();
                return -EIO;
        }
        *mode = val;
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 }
@@ -317,7 +317,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
        int ret = 0;
        int id;
 
-       get_online_cpus();
+       cpus_read_lock();
        rd = power_zone_to_rapl_domain(power_zone);
        id = contraint_to_pl(rd, cid);
        if (id < 0) {
@@ -350,7 +350,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
        if (!ret)
                package_power_limit_irq_save(rp);
 set_exit:
-       put_online_cpus();
+       cpus_read_unlock();
        return ret;
 }
 
@@ -363,7 +363,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
        int ret = 0;
        int id;
 
-       get_online_cpus();
+       cpus_read_lock();
        rd = power_zone_to_rapl_domain(power_zone);
        id = contraint_to_pl(rd, cid);
        if (id < 0) {
@@ -382,7 +382,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
                prim = POWER_LIMIT4;
                break;
        default:
-               put_online_cpus();
+               cpus_read_unlock();
                return -EINVAL;
        }
        if (rapl_read_data_raw(rd, prim, true, &val))
@@ -391,7 +391,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
                *data = val;
 
 get_exit:
-       put_online_cpus();
+       cpus_read_unlock();
 
        return ret;
 }
@@ -403,7 +403,7 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
        int ret = 0;
        int id;
 
-       get_online_cpus();
+       cpus_read_lock();
        rd = power_zone_to_rapl_domain(power_zone);
        id = contraint_to_pl(rd, cid);
        if (id < 0) {
@@ -423,7 +423,7 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
        }
 
 set_time_exit:
-       put_online_cpus();
+       cpus_read_unlock();
        return ret;
 }
 
@@ -435,7 +435,7 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
        int ret = 0;
        int id;
 
-       get_online_cpus();
+       cpus_read_lock();
        rd = power_zone_to_rapl_domain(power_zone);
        id = contraint_to_pl(rd, cid);
        if (id < 0) {
@@ -458,14 +458,14 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
                val = 0;
                break;
        default:
-               put_online_cpus();
+               cpus_read_unlock();
                return -EINVAL;
        }
        if (!ret)
                *data = val;
 
 get_time_exit:
-       put_online_cpus();
+       cpus_read_unlock();
 
        return ret;
 }
@@ -491,7 +491,7 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
        int prim;
        int ret = 0;
 
-       get_online_cpus();
+       cpus_read_lock();
        rd = power_zone_to_rapl_domain(power_zone);
        switch (rd->rpl[id].prim_id) {
        case PL1_ENABLE:
@@ -504,7 +504,7 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
                prim = MAX_POWER;
                break;
        default:
-               put_online_cpus();
+               cpus_read_unlock();
                return -EINVAL;
        }
        if (rapl_read_data_raw(rd, prim, true, &val))
@@ -516,7 +516,7 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
        if (rd->rpl[id].prim_id == PL4_ENABLE)
                *data = *data * 2;
 
-       put_online_cpus();
+       cpus_read_unlock();
 
        return ret;
 }
@@ -1358,7 +1358,7 @@ static void power_limit_state_save(void)
        struct rapl_domain *rd;
        int nr_pl, ret, i;
 
-       get_online_cpus();
+       cpus_read_lock();
        list_for_each_entry(rp, &rapl_packages, plist) {
                if (!rp->power_zone)
                        continue;
@@ -1390,7 +1390,7 @@ static void power_limit_state_save(void)
                        }
                }
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static void power_limit_state_restore(void)
@@ -1399,7 +1399,7 @@ static void power_limit_state_restore(void)
        struct rapl_domain *rd;
        int nr_pl, i;
 
-       get_online_cpus();
+       cpus_read_lock();
        list_for_each_entry(rp, &rapl_packages, plist) {
                if (!rp->power_zone)
                        continue;
@@ -1425,7 +1425,7 @@ static void power_limit_state_restore(void)
                        }
                }
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static int rapl_pm_callback(struct notifier_block *nb,
index cc3b228..1be45f3 100644 (file)
@@ -138,6 +138,8 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
 /* List of verified CPUs. */
 static const struct x86_cpu_id pl4_support_ids[] = {
        { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY },
        {}
 };
 
index 8c20e52..e085c25 100644 (file)
@@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86_32 || COMPILE_TEST
-       depends on HAS_IOMEM && NET
+       depends on HAS_IOMEM && PCI
+       depends on NET
        imply PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index b3d96b7..41b92dc 100644 (file)
@@ -154,7 +154,7 @@ static int unregister_vclock(struct device *dev, void *data)
        struct ptp_clock *ptp = dev_get_drvdata(dev);
        struct ptp_clock_info *info = ptp->info;
        struct ptp_vclock *vclock;
-       u8 *num = data;
+       u32 *num = data;
 
        vclock = info_to_vclock(info);
        dev_info(dev->parent, "delete virtual clock ptp%d\n",
index 24ce9a1..4fd13b0 100644 (file)
@@ -1044,7 +1044,7 @@ config REGULATOR_RT6160
        help
          This adds support for voltage regulator in Richtek RT6160.
          This device automatically change voltage output mode from
-         Buck or Boost. The mode transistion depend on the input source voltage.
+         Buck or Boost. The mode transition depend on the input source voltage.
          The wide output range is from 2025mV to 5200mV and can be used on most
          common application scenario.
 
@@ -1053,10 +1053,21 @@ config REGULATOR_RT6245
        depends on I2C
        select REGMAP_I2C
        help
-         This adds supprot for Richtek RT6245 voltage regulator.
+         This adds support for Richtek RT6245 voltage regulator.
          It can support up to 14A output current and adjustable output voltage
          from 0.4375V to 1.3875V, per step 12.5mV.
 
+config REGULATOR_RTQ2134
+       tristate "Richtek RTQ2134 SubPMIC Regulator"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         This driver adds support for RTQ2134 SubPMIC regulators.
+         The RTQ2134 is a multi-phase, programmable power management IC that
+         integrate with four high efficient, synchronous step-down converter
+         cores. It features wide output voltage range and the capability to
+         configure the corresponding power stages.
+
 config REGULATOR_RTMV20
        tristate "Richtek RTMV20 Laser Diode Regulator"
        depends on I2C
@@ -1066,6 +1077,15 @@ config REGULATOR_RTMV20
          the Richtek RTMV20. It can support the load current up to 6A and
          integrate strobe/vsync/fsin signal to synchronize the IR camera.
 
+config REGULATOR_RTQ6752
+       tristate "Richtek RTQ6752 TFT LCD voltage regulator"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         This driver adds support for Richtek RTQ6752. RTQ6752 includes two
+         synchronous boost converters for PAVDD, and one synchronous NAVDD
+         buck-boost. This device is suitable for automotive TFT-LCD panel.
+
 config REGULATOR_S2MPA01
        tristate "Samsung S2MPA01 voltage regulator"
        depends on MFD_SEC_CORE || COMPILE_TEST
index 8c2f822..9e382b5 100644 (file)
@@ -128,6 +128,8 @@ obj-$(CONFIG_REGULATOR_RT5033)      += rt5033-regulator.o
 obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
 obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
 obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
+obj-$(CONFIG_REGULATOR_RTQ2134) += rtq2134-regulator.o
+obj-$(CONFIG_REGULATOR_RTQ6752)        += rtq6752-regulator.o
 obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
index b1eb469..d60fcce 100644 (file)
@@ -55,7 +55,8 @@
 #define BD718XX_HWOPNAME(swopname) swopname##_hwcontrol
 
 #define BD718XX_OPS(name, _list_voltage, _map_voltage, _set_voltage_sel, \
-                  _get_voltage_sel, _set_voltage_time_sel, _set_ramp_delay) \
+                  _get_voltage_sel, _set_voltage_time_sel, _set_ramp_delay, \
+                  _set_uvp, _set_ovp)                          \
 static const struct regulator_ops name = {                     \
        .enable = regulator_enable_regmap,                      \
        .disable = regulator_disable_regmap,                    \
@@ -66,6 +67,8 @@ static const struct regulator_ops name = {                    \
        .get_voltage_sel = (_get_voltage_sel),                  \
        .set_voltage_time_sel = (_set_voltage_time_sel),        \
        .set_ramp_delay = (_set_ramp_delay),                    \
+       .set_under_voltage_protection = (_set_uvp),             \
+       .set_over_voltage_protection = (_set_ovp),              \
 };                                                             \
                                                                \
 static const struct regulator_ops BD718XX_HWOPNAME(name) = {   \
@@ -76,6 +79,8 @@ static const struct regulator_ops BD718XX_HWOPNAME(name) = {  \
        .get_voltage_sel = (_get_voltage_sel),                  \
        .set_voltage_time_sel = (_set_voltage_time_sel),        \
        .set_ramp_delay = (_set_ramp_delay),                    \
+       .set_under_voltage_protection = (_set_uvp),             \
+       .set_over_voltage_protection = (_set_ovp),              \
 }                                                              \
 
 /*
@@ -154,17 +159,9 @@ static void voltage_change_done(struct regulator_dev *rdev, unsigned int sel,
                 * exceed it due to the scheduling.
                 */
                msleep(1);
-               /*
-                * Note for next hacker. The PWRGOOD should not be masked on
-                * BD71847 so we will just unconditionally enable detection
-                * when voltage is set.
-                * If someone want's to disable PWRGOOD he must implement
-                * caching and restoring the old value here. I am not
-                * aware of such use-cases so for the sake of the simplicity
-                * we just always enable PWRGOOD here.
-                */
-               ret = regmap_update_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
-                                        *mask, 0);
+
+               ret = regmap_clear_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                        *mask);
                if (ret)
                        dev_err(&rdev->dev,
                                "Failed to re-enable voltage monitoring (%d)\n",
@@ -208,12 +205,27 @@ static int voltage_change_prepare(struct regulator_dev *rdev, unsigned int sel,
                 * time configurable.
                 */
                if (new > now) {
+                       int tmp;
+                       int prot_bit;
                        int ldo_offset = rdev->desc->id - BD718XX_LDO1;
 
-                       *mask = BD718XX_LDO1_VRMON80 << ldo_offset;
-                       ret = regmap_update_bits(rdev->regmap,
-                                                BD718XX_REG_MVRFLTMASK2,
-                                                *mask, *mask);
+                       prot_bit = BD718XX_LDO1_VRMON80 << ldo_offset;
+                       ret = regmap_read(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                         &tmp);
+                       if (ret) {
+                               dev_err(&rdev->dev,
+                                       "Failed to read voltage monitoring state\n");
+                               return ret;
+                       }
+
+                       if (!(tmp & prot_bit)) {
+                               /* We disable protection if it was enabled... */
+                               ret = regmap_set_bits(rdev->regmap,
+                                                     BD718XX_REG_MVRFLTMASK2,
+                                                     prot_bit);
+                               /* ...and we also want to re-enable it */
+                               *mask = prot_bit;
+                       }
                        if (ret) {
                                dev_err(&rdev->dev,
                                        "Failed to stop voltage monitoring\n");
@@ -266,99 +278,6 @@ static int bd71837_set_voltage_sel_pickable_restricted(
        return regulator_set_voltage_sel_pickable_regmap(rdev, sel);
 }
 
-/*
- * OPS common for BD71847 and BD71850
- */
-BD718XX_OPS(bd718xx_pickable_range_ldo_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd718xx_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
-
-/* BD71847 and BD71850 LDO 5 is by default OFF at RUN state */
-static const struct regulator_ops bd718xx_ldo5_ops_hwstate = {
-       .is_enabled = never_enabled_by_hwstate,
-       .list_voltage = regulator_list_voltage_pickable_linear_range,
-       .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
-       .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
-};
-
-BD718XX_OPS(bd718xx_pickable_range_buck_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           regulator_set_voltage_sel_pickable_regmap,
-           regulator_get_voltage_sel_pickable_regmap,
-           regulator_set_voltage_time_sel, NULL);
-
-BD718XX_OPS(bd718xx_ldo_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd718xx_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd718xx_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
-           NULL, bd718xx_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd718xx_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-BD718XX_OPS(bd718xx_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-/*
- * OPS for BD71837
- */
-BD718XX_OPS(bd71837_pickable_range_ldo_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd71837_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_pickable_range_buck_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd71837_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap,
-           regulator_set_voltage_time_sel, NULL);
-
-BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-/*
- * BD71837 bucks 3 and 4 support defining their enable/disable state also
- * when buck enable state is under HW state machine control. In that case the
- * bit [2] in CTRL register is used to indicate if regulator should be ON.
- */
-static const struct regulator_ops bd71837_buck34_ops_hwctrl = {
-       .is_enabled = bd71837_get_buck34_enable_hwctrl,
-       .list_voltage = regulator_list_voltage_linear_range,
-       .set_voltage_sel = regulator_set_voltage_sel_regmap,
-       .get_voltage_sel = regulator_get_voltage_sel_regmap,
-       .set_voltage_time_sel = regulator_set_voltage_time_sel,
-       .set_ramp_delay = regulator_set_ramp_delay_regmap,
-};
-
-/*
- * OPS for all of the ICs - BD718(37/47/50)
- */
-BD718XX_OPS(bd718xx_dvs_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           /* bd718xx_buck1234_set_ramp_delay */ regulator_set_ramp_delay_regmap);
-
 /*
  * BD71837 BUCK1/2/3/4
  * BD71847 BUCK1/2
@@ -536,6 +455,238 @@ struct bd718xx_regulator_data {
        int additional_init_amnt;
 };
 
+static int bd718x7_xvp_sanity_check(struct regulator_dev *rdev, int lim_uV,
+                                   int severity)
+{
+       /*
+        * BD71837/47/50 ... (ICs supported by this driver) do not provide
+        * warnings, only protection
+        */
+       if (severity != REGULATOR_SEVERITY_PROT) {
+               dev_err(&rdev->dev,
+                       "Unsupported Under Voltage protection level\n");
+               return -EINVAL;
+       }
+
+       /*
+        * And protection limit is not changeable. It can only be enabled
+        * or disabled
+        */
+       if (lim_uV)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int bd718x7_set_ldo_uvp(struct regulator_dev *rdev, int lim_uV,
+                              int severity, bool enable)
+{
+       int ldo_offset = rdev->desc->id - BD718XX_LDO1;
+       int prot_bit, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       prot_bit = BD718XX_LDO1_VRMON80 << ldo_offset;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                        prot_bit);
+
+       return regmap_set_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                              prot_bit);
+}
+
+static int bd718x7_get_buck_prot_reg(int id, int *reg)
+{
+
+       if (id > BD718XX_BUCK8) {
+               WARN_ON(id > BD718XX_BUCK8);
+               return -EINVAL;
+       }
+
+       if (id > BD718XX_BUCK4)
+               *reg = BD718XX_REG_MVRFLTMASK0;
+       else
+               *reg = BD718XX_REG_MVRFLTMASK1;
+
+       return 0;
+}
+
+static int bd718x7_get_buck_ovp_info(int id, int *reg, int *bit)
+{
+       int ret;
+
+       ret = bd718x7_get_buck_prot_reg(id, reg);
+       if (ret)
+               return ret;
+
+       *bit = BIT((id % 4) * 2 + 1);
+
+       return 0;
+}
+
+static int bd718x7_get_buck_uvp_info(int id, int *reg, int *bit)
+{
+       int ret;
+
+       ret = bd718x7_get_buck_prot_reg(id, reg);
+       if (ret)
+               return ret;
+
+       *bit = BIT((id % 4) * 2);
+
+       return 0;
+}
+
+static int bd718x7_set_buck_uvp(struct regulator_dev *rdev, int lim_uV,
+                               int severity, bool enable)
+{
+       int bit, reg, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       ret = bd718x7_get_buck_uvp_info(rdev->desc->id, &reg, &bit);
+       if (ret)
+               return ret;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, reg, bit);
+
+       return regmap_set_bits(rdev->regmap, reg, bit);
+
+}
+
+static int bd718x7_set_buck_ovp(struct regulator_dev *rdev, int lim_uV,
+                               int severity,
+                               bool enable)
+{
+       int bit, reg, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       ret = bd718x7_get_buck_ovp_info(rdev->desc->id, &reg, &bit);
+       if (ret)
+               return ret;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, reg, bit);
+
+       return regmap_set_bits(rdev->regmap, reg, bit);
+}
+
+/*
+ * OPS common for BD71847 and BD71850
+ */
+BD718XX_OPS(bd718xx_pickable_range_ldo_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd718xx_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap, NULL, NULL,
+           bd718x7_set_ldo_uvp, NULL);
+
+/* BD71847 and BD71850 LDO 5 is by default OFF at RUN state */
+static const struct regulator_ops bd718xx_ldo5_ops_hwstate = {
+       .is_enabled = never_enabled_by_hwstate,
+       .list_voltage = regulator_list_voltage_pickable_linear_range,
+       .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
+       .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+       .set_under_voltage_protection = bd718x7_set_ldo_uvp,
+};
+
+BD718XX_OPS(bd718xx_pickable_range_buck_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           regulator_set_voltage_sel_pickable_regmap,
+           regulator_get_voltage_sel_pickable_regmap,
+           regulator_set_voltage_time_sel, NULL, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd718xx_ldo_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd718xx_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd718xx_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+           NULL, bd718xx_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd718xx_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd718xx_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+           regulator_map_voltage_ascend, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+/*
+ * OPS for BD71837
+ */
+BD718XX_OPS(bd71837_pickable_range_ldo_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd71837_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap, NULL, NULL,
+           bd718x7_set_ldo_uvp, NULL);
+
+BD718XX_OPS(bd71837_pickable_range_buck_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd71837_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap,
+           regulator_set_voltage_time_sel, NULL, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+/*
+ * BD71837 bucks 3 and 4 support defining their enable/disable state also
+ * when buck enable state is under HW state machine control. In that case the
+ * bit [2] in CTRL register is used to indicate if regulator should be ON.
+ */
+static const struct regulator_ops bd71837_buck34_ops_hwctrl = {
+       .is_enabled = bd71837_get_buck34_enable_hwctrl,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = regulator_set_ramp_delay_regmap,
+       .set_under_voltage_protection = bd718x7_set_buck_uvp,
+       .set_over_voltage_protection = bd718x7_set_buck_ovp,
+};
+
+/*
+ * OPS for all of the ICs - BD718(37/47/50)
+ */
+BD718XX_OPS(bd718xx_dvs_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           regulator_set_ramp_delay_regmap, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+
+
 /*
  * There is a HW quirk in BD71837. The shutdown sequence timings for
  * bucks/LDOs which are controlled via register interface are changed.
index cf7d534..82f52a2 100644 (file)
@@ -412,6 +412,134 @@ static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev,
        return regmap_field_write(regl->suspend_sleep, val);
 }
 
+static unsigned int da9063_get_overdrive_mask(const struct regulator_desc *desc)
+{
+       switch (desc->id) {
+       case DA9063_ID_BCORES_MERGED:
+       case DA9063_ID_BCORE1:
+               return DA9063_BCORE1_OD;
+       case DA9063_ID_BCORE2:
+               return DA9063_BCORE2_OD;
+       case DA9063_ID_BPRO:
+               return DA9063_BPRO_OD;
+       default:
+               return 0;
+       }
+}
+
+static int da9063_buck_set_limit_set_overdrive(struct regulator_dev *rdev,
+                                              int min_uA, int max_uA,
+                                              unsigned int overdrive_mask)
+{
+       /*
+        * When enabling overdrive, do it before changing the current limit to
+        * ensure sufficient supply throughout the switch.
+        */
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int ret;
+       unsigned int orig_overdrive;
+
+       ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                         &orig_overdrive);
+       if (ret < 0)
+               return ret;
+       orig_overdrive &= overdrive_mask;
+
+       if (orig_overdrive == 0) {
+               ret = regmap_set_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                               overdrive_mask);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = regulator_set_current_limit_regmap(rdev, min_uA / 2, max_uA / 2);
+       if (ret < 0 && orig_overdrive == 0)
+               /*
+                * regulator_set_current_limit_regmap may have rejected the
+                * change because of unusable min_uA and/or max_uA inputs.
+                * Attempt to restore original overdrive state, ignore failure-
+                * on-failure.
+                */
+               regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                                 overdrive_mask);
+
+       return ret;
+}
+
+static int da9063_buck_set_limit_clear_overdrive(struct regulator_dev *rdev,
+                                                int min_uA, int max_uA,
+                                                unsigned int overdrive_mask)
+{
+       /*
+        * When disabling overdrive, do it after changing the current limit to
+        * ensure sufficient supply throughout the switch.
+        */
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int ret, orig_limit;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &orig_limit);
+       if (ret < 0)
+               return ret;
+
+       ret = regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                               overdrive_mask);
+       if (ret < 0)
+               /*
+                * Attempt to restore original current limit, ignore failure-
+                * on-failure.
+                */
+               regmap_write(rdev->regmap, rdev->desc->csel_reg, orig_limit);
+
+       return ret;
+}
+
+static int da9063_buck_set_current_limit(struct regulator_dev *rdev,
+                                        int min_uA, int max_uA)
+{
+       unsigned int overdrive_mask, n_currents;
+
+       overdrive_mask = da9063_get_overdrive_mask(rdev->desc);
+       if (overdrive_mask) {
+               n_currents = rdev->desc->n_current_limits;
+               if (n_currents == 0)
+                       return -EINVAL;
+
+               if (max_uA > rdev->desc->curr_table[n_currents - 1])
+                       return da9063_buck_set_limit_set_overdrive(rdev, min_uA,
+                                                                  max_uA,
+                                                                  overdrive_mask);
+
+               return da9063_buck_set_limit_clear_overdrive(rdev, min_uA,
+                                                            max_uA,
+                                                            overdrive_mask);
+       }
+       return regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
+}
+
+static int da9063_buck_get_current_limit(struct regulator_dev *rdev)
+{
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int val, ret, limit;
+       unsigned int mask;
+
+       limit = regulator_get_current_limit_regmap(rdev);
+       if (limit < 0)
+               return limit;
+       mask = da9063_get_overdrive_mask(rdev->desc);
+       if (mask) {
+               ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H, &val);
+               if (ret < 0)
+                       return ret;
+               if (val & mask)
+                       limit *= 2;
+       }
+       return limit;
+}
+
 static const struct regulator_ops da9063_buck_ops = {
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -419,8 +547,8 @@ static const struct regulator_ops da9063_buck_ops = {
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear,
-       .set_current_limit      = regulator_set_current_limit_regmap,
-       .get_current_limit      = regulator_get_current_limit_regmap,
+       .set_current_limit      = da9063_buck_set_current_limit,
+       .get_current_limit      = da9063_buck_get_current_limit,
        .set_mode               = da9063_buck_set_mode,
        .get_mode               = da9063_buck_get_mode,
        .get_status             = da9063_buck_get_status,
index 8b70bfe..a45c1e1 100644 (file)
@@ -117,11 +117,11 @@ ux500_regulator_debug_init(struct platform_device *pdev,
        rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
 
        /* create "status" file */
-       debugfs_create_file("status", S_IRUGO, rdebug.dir, &pdev->dev,
+       debugfs_create_file("status", 0444, rdebug.dir, &pdev->dev,
                            &ux500_regulator_status_fops);
 
        /* create "power-state-count" file */
-       debugfs_create_file("power-state-count", S_IRUGO, rdebug.dir,
+       debugfs_create_file("power-state-count", 0444, rdebug.dir,
                            &pdev->dev, &ux500_regulator_power_state_cnt_fops);
 
        rdebug.regulator_array = regulator_info;
index a8de0aa..9113233 100644 (file)
@@ -205,35 +205,6 @@ struct regulator_dev *devm_regulator_register(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(devm_regulator_register);
 
-static int devm_rdev_match(struct device *dev, void *res, void *data)
-{
-       struct regulator_dev **r = res;
-       if (!r || !*r) {
-               WARN_ON(!r || !*r);
-               return 0;
-       }
-       return *r == data;
-}
-
-/**
- * devm_regulator_unregister - Resource managed regulator_unregister()
- * @dev:  device to supply
- * @rdev: regulator to free
- *
- * Unregister a regulator registered with devm_regulator_register().
- * Normally this function will not need to be called and the resource
- * management code will ensure that the resource is freed.
- */
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev)
-{
-       int rc;
-
-       rc = devres_release(dev, devm_rdev_release, devm_rdev_match, rdev);
-       if (rc != 0)
-               WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_regulator_unregister);
-
 struct regulator_supply_alias_match {
        struct device *dev;
        const char *id;
@@ -296,19 +267,8 @@ int devm_regulator_register_supply_alias(struct device *dev, const char *id,
 }
 EXPORT_SYMBOL_GPL(devm_regulator_register_supply_alias);
 
-/**
- * devm_regulator_unregister_supply_alias - Resource managed
- * regulator_unregister_supply_alias()
- *
- * @dev: device to supply
- * @id:  supply name or regulator ID
- *
- * Unregister an alias registered with
- * devm_regulator_register_supply_alias(). Normally this function
- * will not need to be called and the resource management code
- * will ensure that the resource is freed.
- */
-void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
+static void devm_regulator_unregister_supply_alias(struct device *dev,
+                                                  const char *id)
 {
        struct regulator_supply_alias_match match;
        int rc;
@@ -321,7 +281,6 @@ void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
        if (rc != 0)
                WARN_ON(rc);
 }
-EXPORT_SYMBOL_GPL(devm_regulator_unregister_supply_alias);
 
 /**
  * devm_regulator_bulk_register_supply_alias - Managed register
@@ -373,30 +332,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias);
 
-/**
- * devm_regulator_bulk_unregister_supply_alias - Managed unregister
- * multiple aliases
- *
- * @dev:    device to supply
- * @id:     list of supply names or regulator IDs
- * @num_id: number of aliases to unregister
- *
- * Unregister aliases registered with
- * devm_regulator_bulk_register_supply_alias(). Normally this function
- * will not need to be called and the resource management code
- * will ensure that the resource is freed.
- */
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
-                                                const char *const *id,
-                                                int num_id)
-{
-       int i;
-
-       for (i = 0; i < num_id; ++i)
-               devm_regulator_unregister_supply_alias(dev, id[i]);
-}
-EXPORT_SYMBOL_GPL(devm_regulator_bulk_unregister_supply_alias);
-
 struct regulator_notifier_match {
        struct regulator *regulator;
        struct notifier_block *nb;
index 3928461..599ad20 100644 (file)
@@ -287,8 +287,9 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
                                               &cfg);
        if (IS_ERR(drvdata->dev)) {
-               ret = PTR_ERR(drvdata->dev);
-               dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+               ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
+                                   "Failed to register regulator: %ld\n",
+                                   PTR_ERR(drvdata->dev));
                return ret;
        }
 
index 845bc3b..662d87a 100644 (file)
@@ -4,7 +4,7 @@
 //
 // Copyright (c) 2013 Linaro Ltd.
 // Copyright (c) 2011 HiSilicon Ltd.
-// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
+// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd.
 //
 // Guodong Xu <guodong.xu@linaro.org>
 
@@ -27,34 +27,34 @@ struct hi6421_spmi_reg_info {
        u32                     eco_uA;
 };
 
-static const unsigned int ldo3_voltages[] = {
+static const unsigned int range_1v5_to_2v0[] = {
        1500000, 1550000, 1600000, 1650000,
        1700000, 1725000, 1750000, 1775000,
        1800000, 1825000, 1850000, 1875000,
        1900000, 1925000, 1950000, 2000000
 };
 
-static const unsigned int ldo4_voltages[] = {
+static const unsigned int range_1v725_to_1v9[] = {
        1725000, 1750000, 1775000, 1800000,
        1825000, 1850000, 1875000, 1900000
 };
 
-static const unsigned int ldo9_voltages[] = {
+static const unsigned int range_1v75_to_3v3[] = {
        1750000, 1800000, 1825000, 2800000,
        2850000, 2950000, 3000000, 3300000
 };
 
-static const unsigned int ldo15_voltages[] = {
+static const unsigned int range_1v8_to_3v0[] = {
        1800000, 1850000, 2400000, 2600000,
        2700000, 2850000, 2950000, 3000000
 };
 
-static const unsigned int ldo17_voltages[] = {
+static const unsigned int range_2v5_to_3v3[] = {
        2500000, 2600000, 2700000, 2800000,
        3000000, 3100000, 3200000, 3300000
 };
 
-static const unsigned int ldo34_voltages[] = {
+static const unsigned int range_2v6_to_3v3[] = {
        2600000, 2700000, 2800000, 2900000,
        3000000, 3100000, 3200000, 3300000
 };
@@ -73,14 +73,14 @@ static const unsigned int ldo34_voltages[] = {
  */
 #define HI6421V600_LDO(_id, vtable, ereg, emask, vreg,                        \
                       odelay, etime, ecomask, ecoamp)                         \
-       [HI6421V600_##_id] = {                                                 \
+       [hi6421v600_##_id] = {                                                 \
                .desc = {                                                      \
                        .name           = #_id,                                \
                        .of_match        = of_match_ptr(#_id),                 \
                        .regulators_node = of_match_ptr("regulators"),         \
                        .ops            = &hi6421_spmi_ldo_rops,               \
                        .type           = REGULATOR_VOLTAGE,                   \
-                       .id             = HI6421V600_##_id,                    \
+                       .id             = hi6421v600_##_id,                    \
                        .owner          = THIS_MODULE,                         \
                        .volt_table     = vtable,                              \
                        .n_voltages     = ARRAY_SIZE(vtable),                  \
@@ -185,46 +185,46 @@ static const struct regulator_ops hi6421_spmi_ldo_rops = {
 
 /* HI6421v600 regulators with known registers */
 enum hi6421_spmi_regulator_id {
-       HI6421V600_LDO3,
-       HI6421V600_LDO4,
-       HI6421V600_LDO9,
-       HI6421V600_LDO15,
-       HI6421V600_LDO16,
-       HI6421V600_LDO17,
-       HI6421V600_LDO33,
-       HI6421V600_LDO34,
+       hi6421v600_ldo3,
+       hi6421v600_ldo4,
+       hi6421v600_ldo9,
+       hi6421v600_ldo15,
+       hi6421v600_ldo16,
+       hi6421v600_ldo17,
+       hi6421v600_ldo33,
+       hi6421v600_ldo34,
 };
 
 static struct hi6421_spmi_reg_info regulator_info[] = {
-       HI6421V600_LDO(LDO3, ldo3_voltages,
+       HI6421V600_LDO(ldo3, range_1v5_to_2v0,
                       0x16, 0x01, 0x51,
                       20000, 120,
                       0, 0),
-       HI6421V600_LDO(LDO4, ldo4_voltages,
+       HI6421V600_LDO(ldo4, range_1v725_to_1v9,
                       0x17, 0x01, 0x52,
                       20000, 120,
                       0x10, 10000),
-       HI6421V600_LDO(LDO9, ldo9_voltages,
+       HI6421V600_LDO(ldo9, range_1v75_to_3v3,
                       0x1c, 0x01, 0x57,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO15, ldo15_voltages,
+       HI6421V600_LDO(ldo15, range_1v8_to_3v0,
                       0x21, 0x01, 0x5c,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO16, ldo15_voltages,
+       HI6421V600_LDO(ldo16, range_1v8_to_3v0,
                       0x22, 0x01, 0x5d,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO17, ldo17_voltages,
+       HI6421V600_LDO(ldo17, range_2v5_to_3v3,
                       0x23, 0x01, 0x5e,
                       20000, 120,
                       0x10, 10000),
-       HI6421V600_LDO(LDO33, ldo17_voltages,
+       HI6421V600_LDO(ldo33, range_2v5_to_3v3,
                       0x32, 0x01, 0x6d,
                       20000, 120,
                       0, 0),
-       HI6421V600_LDO(LDO34, ldo34_voltages,
+       HI6421V600_LDO(ldo34, range_2v6_to_3v3,
                       0x33, 0x01, 0x6e,
                       20000, 120,
                       0, 0),
index fabe2e5..5227644 100644 (file)
@@ -184,7 +184,7 @@ static irqreturn_t regulator_notifier_isr(int irq, void *data)
         * If retry_count exceeds the given safety limit we call IC specific die
         * handler which can try disabling regulator(s).
         *
-        * If no die handler is given we will just bug() as a last resort.
+        * If no die handler is given we will just power-off as a last resort.
         *
         * We could try disabling all associated rdevs - but we might shoot
         * ourselves in the head and leave the problematic regulator enabled. So
index 0d35be4..eb80278 100644 (file)
@@ -28,18 +28,15 @@ struct mt6358_regulator_info {
        u32 qi;
        const u32 *index_table;
        unsigned int n_table;
-       u32 vsel_shift;
        u32 da_vsel_reg;
        u32 da_vsel_mask;
-       u32 da_vsel_shift;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
 };
 
 #define MT6358_BUCK(match, vreg, min, max, step,               \
        volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask,   \
-       _da_vsel_shift, _modeset_reg, _modeset_shift)           \
+       _modeset_reg, _modeset_shift)           \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -61,15 +58,13 @@ struct mt6358_regulator_info {
        .qi = BIT(0),   \
        .da_vsel_reg = _da_vsel_reg,    \
        .da_vsel_mask = _da_vsel_mask,  \
-       .da_vsel_shift = _da_vsel_shift,        \
        .modeset_reg = _modeset_reg,    \
        .modeset_mask = BIT(_modeset_shift),    \
-       .modeset_shift = _modeset_shift \
 }
 
 #define MT6358_LDO(match, vreg, ldo_volt_table,        \
        ldo_index_table, enreg, enbit, vosel,   \
-       vosel_mask, vosel_shift)        \
+       vosel_mask)     \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -89,12 +84,11 @@ struct mt6358_regulator_info {
        .qi = BIT(15),  \
        .index_table = ldo_index_table, \
        .n_table = ARRAY_SIZE(ldo_index_table), \
-       .vsel_shift = vosel_shift,      \
 }
 
 #define MT6358_LDO1(match, vreg, min, max, step,       \
        volt_ranges, _da_vsel_reg, _da_vsel_mask,       \
-       _da_vsel_shift, vosel, vosel_mask)      \
+       vosel, vosel_mask)      \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -113,7 +107,6 @@ struct mt6358_regulator_info {
        },      \
        .da_vsel_reg = _da_vsel_reg,    \
        .da_vsel_mask = _da_vsel_mask,  \
-       .da_vsel_shift = _da_vsel_shift,        \
        .status_reg = MT6358_LDO_##vreg##_DBG1, \
        .qi = BIT(0),   \
 }
@@ -260,9 +253,9 @@ static int mt6358_set_voltage_sel(struct regulator_dev *rdev,
        pvol = info->index_table;
 
        idx = pvol[selector];
+       idx <<= ffs(info->desc.vsel_mask) - 1;
        ret = regmap_update_bits(rdev->regmap, info->desc.vsel_reg,
-                                info->desc.vsel_mask,
-                                idx << info->vsel_shift);
+                                info->desc.vsel_mask, idx);
 
        return ret;
 }
@@ -282,7 +275,8 @@ static int mt6358_get_voltage_sel(struct regulator_dev *rdev)
                return ret;
        }
 
-       selector = (selector & info->desc.vsel_mask) >> info->vsel_shift;
+       selector = (selector & info->desc.vsel_mask) >>
+                       (ffs(info->desc.vsel_mask) - 1);
        pvol = info->index_table;
        for (idx = 0; idx < info->desc.n_voltages; idx++) {
                if (pvol[idx] == selector)
@@ -305,7 +299,7 @@ static int mt6358_get_buck_voltage_sel(struct regulator_dev *rdev)
                return ret;
        }
 
-       ret = (regval >> info->da_vsel_shift) & info->da_vsel_mask;
+       ret = (regval & info->da_vsel_mask) >> (ffs(info->da_vsel_mask) - 1);
 
        return ret;
 }
@@ -342,11 +336,10 @@ static int mt6358_regulator_set_mode(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
-       dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x, %#x\n",
-               info->modeset_reg, info->modeset_mask,
-               info->modeset_shift, val);
+       dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x\n",
+               info->modeset_reg, info->modeset_mask, val);
 
-       val <<= info->modeset_shift;
+       val <<= ffs(info->modeset_mask) - 1;
 
        return regmap_update_bits(rdev->regmap, info->modeset_reg,
                                  info->modeset_mask, val);
@@ -364,7 +357,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+       switch ((regval & info->modeset_mask) >> (ffs(info->modeset_mask) - 1)) {
        case MT6358_BUCK_MODE_AUTO:
                return REGULATOR_MODE_NORMAL;
        case MT6358_BUCK_MODE_FORCE_PWM:
@@ -412,30 +405,30 @@ static const struct regulator_ops mt6358_volt_fixed_ops = {
 static struct mt6358_regulator_info mt6358_regulators[] = {
        MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
                    buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
-                   0, MT6358_VDRAM1_ANA_CON0, 8),
+                   MT6358_VDRAM1_ANA_CON0, 8),
        MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
-                   0, MT6358_VCORE_VGPU_ANA_CON0, 1),
+                   MT6358_VCORE_VGPU_ANA_CON0, 1),
        MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
-                   buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, 0,
+                   buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f,
                    MT6358_VPA_ANA_CON0, 3),
        MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
-                   0, MT6358_VPROC_ANA_CON0, 1),
+                   MT6358_VPROC_ANA_CON0, 1),
        MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
-                   0, MT6358_VPROC_ANA_CON0, 2),
+                   MT6358_VPROC_ANA_CON0, 2),
        MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
-                   buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, 0,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f,
                    MT6358_VCORE_VGPU_ANA_CON0, 2),
        MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
-                   buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, 0,
+                   buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f,
                    MT6358_VS2_ANA_CON0, 8),
        MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
-                   0, MT6358_VMODEM_ANA_CON0, 8),
+                   MT6358_VMODEM_ANA_CON0, 8),
        MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
-                   buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, 0,
+                   buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f,
                    MT6358_VS1_ANA_CON0, 8),
        MT6358_REG_FIXED("ldo_vrf12", VRF12,
                         MT6358_LDO_VRF12_CON0, 0, 1200000),
@@ -457,49 +450,49 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
        MT6358_REG_FIXED("ldo_vaud28", VAUD28,
                         MT6358_LDO_VAUD28_CON0, 0, 2800000),
        MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
-                  MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
+                  MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf),
        MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
-                  MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
-                  MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vusb", VUSB, vusb_voltages, vusb_idx,
-                  MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcamd", VCAMD, vcamd_voltages, vcamd_idx,
-                  MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vefuse", VEFUSE, vefuse_voltages, vefuse_idx,
-                  MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vmch", VMCH, vmch_vemc_voltages, vmch_vemc_idx,
-                  MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcama1", VCAMA1, vcama_voltages, vcama_idx,
-                  MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vemc", VEMC, vmch_vemc_voltages, vmch_vemc_idx,
-                  MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcn33_bt", VCN33_BT, vcn33_bt_wifi_voltages,
                   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_0,
-                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+                  0, MT6358_VCN33_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vcn33_wifi", VCN33_WIFI, vcn33_bt_wifi_voltages,
                   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_1,
-                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+                  0, MT6358_VCN33_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vcama2", VCAMA2, vcama_voltages, vcama_idx,
-                  MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vmc", VMC, vmc_voltages, vmc_idx,
-                  MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vldo28", VLDO28, vldo28_voltages, vldo28_idx,
                   MT6358_LDO_VLDO28_CON0_0, 0,
-                  MT6358_VLDO28_ANA_CON0, 0x300, 8),
+                  MT6358_VLDO28_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
-                  MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00),
        MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON0, 0x7f),
        MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON2, 0x7f),
        MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON3, 0x7f),
        MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON1, 0x7f),
 };
 
index 7ce0bd3..de3b046 100644 (file)
@@ -27,7 +27,6 @@
  * @qi: Mask for query enable signal status of regulators.
  * @modeset_reg: for operating AUTO/PWM mode register.
  * @modeset_mask: MASK for operating modeset register.
- * @modeset_shift: SHIFT for operating modeset register.
  */
 struct mt6359_regulator_info {
        struct regulator_desc desc;
@@ -35,10 +34,8 @@ struct mt6359_regulator_info {
        u32 qi;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
        u32 lp_mode_reg;
        u32 lp_mode_mask;
-       u32 lp_mode_shift;
 };
 
 #define MT6359_BUCK(match, _name, min, max, step,              \
@@ -68,10 +65,8 @@ struct mt6359_regulator_info {
        .qi = BIT(0),                                           \
        .lp_mode_reg = _lp_mode_reg,                            \
        .lp_mode_mask = BIT(_lp_mode_shift),                    \
-       .lp_mode_shift = _lp_mode_shift,                        \
        .modeset_reg = _modeset_reg,                            \
        .modeset_mask = BIT(_modeset_shift),                    \
-       .modeset_shift = _modeset_shift                         \
 }
 
 #define MT6359_LDO_LINEAR(match, _name, min, max, step,                \
@@ -282,8 +277,10 @@ static unsigned int mt6359_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       if ((regval & info->modeset_mask) >> info->modeset_shift ==
-               MT6359_BUCK_MODE_FORCE_PWM)
+       regval &= info->modeset_mask;
+       regval >>= ffs(info->modeset_mask) - 1;
+
+       if (regval == MT6359_BUCK_MODE_FORCE_PWM)
                return REGULATOR_MODE_FAST;
 
        ret = regmap_read(rdev->regmap, info->lp_mode_reg, &regval);
@@ -310,7 +307,7 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
        switch (mode) {
        case REGULATOR_MODE_FAST:
                val = MT6359_BUCK_MODE_FORCE_PWM;
-               val <<= info->modeset_shift;
+               val <<= ffs(info->modeset_mask) - 1;
                ret = regmap_update_bits(rdev->regmap,
                                         info->modeset_reg,
                                         info->modeset_mask,
@@ -319,14 +316,14 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
        case REGULATOR_MODE_NORMAL:
                if (curr_mode == REGULATOR_MODE_FAST) {
                        val = MT6359_BUCK_MODE_AUTO;
-                       val <<= info->modeset_shift;
+                       val <<= ffs(info->modeset_mask) - 1;
                        ret = regmap_update_bits(rdev->regmap,
                                                 info->modeset_reg,
                                                 info->modeset_mask,
                                                 val);
                } else if (curr_mode == REGULATOR_MODE_IDLE) {
                        val = MT6359_BUCK_MODE_NORMAL;
-                       val <<= info->lp_mode_shift;
+                       val <<= ffs(info->lp_mode_mask) - 1;
                        ret = regmap_update_bits(rdev->regmap,
                                                 info->lp_mode_reg,
                                                 info->lp_mode_mask,
@@ -336,7 +333,7 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
                break;
        case REGULATOR_MODE_IDLE:
                val = MT6359_BUCK_MODE_LP >> 1;
-               val <<= info->lp_mode_shift;
+               val <<= ffs(info->lp_mode_mask) - 1;
                ret = regmap_update_bits(rdev->regmap,
                                         info->lp_mode_reg,
                                         info->lp_mode_mask,
index 0a30df5..b9bf7ad 100644 (file)
@@ -32,7 +32,6 @@ struct mt6397_regulator_info {
        u32 vselctrl_mask;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
 };
 
 #define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg,   \
@@ -61,7 +60,6 @@ struct mt6397_regulator_info {
        .vselctrl_mask = BIT(1),                                        \
        .modeset_reg = _modeset_reg,                                    \
        .modeset_mask = BIT(_modeset_shift),                            \
-       .modeset_shift = _modeset_shift                                 \
 }
 
 #define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel,   \
@@ -175,11 +173,11 @@ static int mt6397_regulator_set_mode(struct regulator_dev *rdev,
                goto err_mode;
        }
 
-       dev_dbg(&rdev->dev, "mt6397 buck set_mode %#x, %#x, %#x, %#x\n",
-               info->modeset_reg, info->modeset_mask,
-               info->modeset_shift, val);
+       dev_dbg(&rdev->dev, "mt6397 buck set_mode %#x, %#x, %#x\n",
+               info->modeset_reg, info->modeset_mask, val);
+
+       val <<= ffs(info->modeset_mask) - 1;
 
-       val <<= info->modeset_shift;
        ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
                                 info->modeset_mask, val);
 err_mode:
@@ -204,7 +202,10 @@ static unsigned int mt6397_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+       regval &= info->modeset_mask;
+       regval >>= ffs(info->modeset_mask) - 1;
+
+       switch (regval) {
        case MT6397_BUCK_MODE_AUTO:
                return REGULATOR_MODE_NORMAL;
        case MT6397_BUCK_MODE_FORCE_PWM:
index 0e73116..da4cf5a 100644 (file)
 #include <linux/mfd/rt5033-private.h>
 #include <linux/regulator/of_regulator.h>
 
+static const struct linear_range rt5033_buck_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1000000, 0, 20, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 21, 31, 0),
+};
+
+static const struct linear_range rt5033_ldo_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1200000, 0, 18, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 19, 31, 0),
+};
+
 static const struct regulator_ops rt5033_safe_ldo_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
@@ -24,8 +34,7 @@ static const struct regulator_ops rt5033_buck_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
-       .list_voltage           = regulator_list_voltage_linear,
-       .map_voltage            = regulator_map_voltage_linear,
+       .list_voltage           = regulator_list_voltage_linear_range,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
 };
@@ -40,8 +49,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
                .type           = REGULATOR_VOLTAGE,
                .owner          = THIS_MODULE,
                .n_voltages     = RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM,
-               .min_uV         = RT5033_REGULATOR_BUCK_VOLTAGE_MIN,
-               .uV_step        = RT5033_REGULATOR_BUCK_VOLTAGE_STEP,
+               .linear_ranges  = rt5033_buck_ranges,
+               .n_linear_ranges = ARRAY_SIZE(rt5033_buck_ranges),
                .enable_reg     = RT5033_REG_CTRL,
                .enable_mask    = RT5033_CTRL_EN_BUCK_MASK,
                .vsel_reg       = RT5033_REG_BUCK_CTRL,
@@ -56,8 +65,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
                .type           = REGULATOR_VOLTAGE,
                .owner          = THIS_MODULE,
                .n_voltages     = RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM,
-               .min_uV         = RT5033_REGULATOR_LDO_VOLTAGE_MIN,
-               .uV_step        = RT5033_REGULATOR_LDO_VOLTAGE_STEP,
+               .linear_ranges  = rt5033_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(rt5033_ldo_ranges),
                .enable_reg     = RT5033_REG_CTRL,
                .enable_mask    = RT5033_CTRL_EN_LDO_MASK,
                .vsel_reg       = RT5033_REG_LDO_CTRL,
index d3299a7..cb22a20 100644 (file)
@@ -144,7 +144,7 @@ static int rt6245_init_device_properties(struct device *dev)
 static int rt6245_reg_write(void *context, unsigned int reg, unsigned int val)
 {
        struct i2c_client *i2c = context;
-       const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
+       static const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
        unsigned int code, bit_count;
 
        code = func_base[reg];
diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
new file mode 100644 (file)
index 0000000..f21e3f8
--- /dev/null
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum {
+       RTQ2134_IDX_BUCK1 = 0,
+       RTQ2134_IDX_BUCK2,
+       RTQ2134_IDX_BUCK3,
+       RTQ2134_IDX_MAX
+};
+
+#define RTQ2134_AUTO_MODE              0
+#define RTQ2134_FCCM_MODE              1
+
+#define RTQ2134_BUCK_DVS0_CTRL         0
+#define RTQ2134_BUCK_VSEL_CTRL         2
+
+#define RTQ2134_REG_IO_CHIPNAME                0x01
+#define RTQ2134_REG_FLT_RECORDTEMP     0x13
+#define RTQ2134_REG_FLT_RECORDBUCK(_id)        (0x14 + (_id))
+#define RTQ2134_REG_FLT_BUCKCTRL(_id)  (0x37 + (_id))
+#define RTQ2134_REG_BUCK1_CFG0         0x42
+#define RTQ2134_REG_BUCK1_DVS0CFG1     0x48
+#define RTQ2134_REG_BUCK1_DVS0CFG0     0x49
+#define RTQ2134_REG_BUCK1_DVS1CFG1     0x4A
+#define RTQ2134_REG_BUCK1_DVS1CFG0     0x4B
+#define RTQ2134_REG_BUCK1_DVSCFG       0x52
+#define RTQ2134_REG_BUCK1_RSPCFG       0x54
+#define RTQ2134_REG_BUCK2_CFG0         0x5F
+#define RTQ2134_REG_BUCK2_DVS0CFG1     0x62
+#define RTQ2134_REG_BUCK2_DVS0CFG0     0x63
+#define RTQ2134_REG_BUCK2_DVS1CFG1     0x64
+#define RTQ2134_REG_BUCK2_DVS1CFG0     0x65
+#define RTQ2134_REG_BUCK2_DVSCFG       0x6C
+#define RTQ2134_REG_BUCK2_RSPCFG       0x6E
+#define RTQ2134_REG_BUCK3_CFG0         0x79
+#define RTQ2134_REG_BUCK3_DVS0CFG1     0x7C
+#define RTQ2134_REG_BUCK3_DVS0CFG0     0x7D
+#define RTQ2134_REG_BUCK3_DVS1CFG1     0x7E
+#define RTQ2134_REG_BUCK3_DVS1CFG0     0x7F
+#define RTQ2134_REG_BUCK3_DVSCFG       0x86
+#define RTQ2134_REG_BUCK3_RSPCFG       0x88
+#define RTQ2134_REG_BUCK3_SLEWCTRL     0x89
+
+#define RTQ2134_VOUT_MAXNUM            256
+#define RTQ2134_VOUT_MASK              0xFF
+#define RTQ2134_VOUTEN_MASK            BIT(0)
+#define RTQ2134_ACTDISCHG_MASK         BIT(0)
+#define RTQ2134_RSPUP_MASK             GENMASK(6, 4)
+#define RTQ2134_FCCM_MASK              BIT(5)
+#define RTQ2134_UVHICCUP_MASK          BIT(3)
+#define RTQ2134_BUCKDVS_CTRL_MASK      GENMASK(1, 0)
+#define RTQ2134_CHIPOT_MASK            BIT(2)
+#define RTQ2134_BUCKOV_MASK            BIT(5)
+#define RTQ2134_BUCKUV_MASK            BIT(4)
+
+struct rtq2134_regulator_desc {
+       struct regulator_desc desc;
+       /* Extension for proprietary register and mask */
+       unsigned int mode_reg;
+       unsigned int mode_mask;
+       unsigned int suspend_enable_reg;
+       unsigned int suspend_enable_mask;
+       unsigned int suspend_vsel_reg;
+       unsigned int suspend_vsel_mask;
+       unsigned int suspend_mode_reg;
+       unsigned int suspend_mode_mask;
+       unsigned int dvs_ctrl_reg;
+};
+
+static int rtq2134_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val;
+
+       if (mode == REGULATOR_MODE_NORMAL)
+               val = RTQ2134_AUTO_MODE;
+       else if (mode == REGULATOR_MODE_FAST)
+               val = RTQ2134_FCCM_MODE;
+       else
+               return -EINVAL;
+
+       val <<= ffs(desc->mode_mask) - 1;
+       return regmap_update_bits(rdev->regmap, desc->mode_reg, desc->mode_mask,
+                                 val);
+}
+
+static unsigned int rtq2134_buck_get_mode(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int mode;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, desc->mode_reg, &mode);
+       if (ret)
+               return ret;
+
+       if (mode & desc->mode_mask)
+               return REGULATOR_MODE_FAST;
+       return REGULATOR_MODE_NORMAL;
+}
+
+static int rtq2134_buck_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       int sel;
+
+       sel = regulator_map_voltage_linear_range(rdev, uV, uV);
+       if (sel < 0)
+               return sel;
+
+       sel <<= ffs(desc->suspend_vsel_mask) - 1;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_vsel_reg,
+                                 desc->suspend_vsel_mask, sel);
+}
+
+static int rtq2134_buck_set_suspend_enable(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val = desc->suspend_enable_mask;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_enable_reg,
+                                 desc->suspend_enable_mask, val);
+}
+
+static int rtq2134_buck_set_suspend_disable(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_enable_reg,
+                                 desc->suspend_enable_mask, 0);
+}
+
+static int rtq2134_buck_set_suspend_mode(struct regulator_dev *rdev,
+                                        unsigned int mode)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val;
+
+       if (mode == REGULATOR_MODE_NORMAL)
+               val = RTQ2134_AUTO_MODE;
+       else if (mode == REGULATOR_MODE_FAST)
+               val = RTQ2134_FCCM_MODE;
+       else
+               return -EINVAL;
+
+       val <<= ffs(desc->suspend_mode_mask) - 1;
+       return regmap_update_bits(rdev->regmap, desc->suspend_mode_reg,
+                                 desc->suspend_mode_mask, val);
+}
+
+static int rtq2134_buck_get_error_flags(struct regulator_dev *rdev,
+                                       unsigned int *flags)
+{
+       int rid = rdev_get_id(rdev);
+       unsigned int chip_error, buck_error, events = 0;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, RTQ2134_REG_FLT_RECORDTEMP,
+                         &chip_error);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to get chip error flag\n");
+               return ret;
+       }
+
+       ret = regmap_read(rdev->regmap, RTQ2134_REG_FLT_RECORDBUCK(rid),
+                         &buck_error);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to get buck error flag\n");
+               return ret;
+       }
+
+       if (chip_error & RTQ2134_CHIPOT_MASK)
+               events |= REGULATOR_ERROR_OVER_TEMP;
+
+       if (buck_error & RTQ2134_BUCKUV_MASK)
+               events |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+       if (buck_error & RTQ2134_BUCKOV_MASK)
+               events |= REGULATOR_ERROR_REGULATION_OUT;
+
+       *flags = events;
+       return 0;
+}
+
+static const struct regulator_ops rtq2134_buck_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_active_discharge = regulator_set_active_discharge_regmap,
+       .set_ramp_delay = regulator_set_ramp_delay_regmap,
+       .set_mode = rtq2134_buck_set_mode,
+       .get_mode = rtq2134_buck_get_mode,
+       .set_suspend_voltage = rtq2134_buck_set_suspend_voltage,
+       .set_suspend_enable = rtq2134_buck_set_suspend_enable,
+       .set_suspend_disable = rtq2134_buck_set_suspend_disable,
+       .set_suspend_mode = rtq2134_buck_set_suspend_mode,
+       .get_error_flags = rtq2134_buck_get_error_flags,
+};
+
+static const struct linear_range rtq2134_buck_vout_ranges[] = {
+       REGULATOR_LINEAR_RANGE(300000, 0, 200, 5000),
+       REGULATOR_LINEAR_RANGE(1310000, 201, 255, 10000)
+};
+
+static unsigned int rtq2134_buck_of_map_mode(unsigned int mode)
+{
+       switch (mode) {
+       case RTQ2134_AUTO_MODE:
+               return REGULATOR_MODE_NORMAL;
+       case RTQ2134_FCCM_MODE:
+               return REGULATOR_MODE_FAST;
+       }
+
+       return REGULATOR_MODE_INVALID;
+}
+
+static int rtq2134_buck_of_parse_cb(struct device_node *np,
+                                   const struct regulator_desc *desc,
+                                   struct regulator_config *cfg)
+{
+       struct rtq2134_regulator_desc *rdesc =
+               (struct rtq2134_regulator_desc *)desc;
+       int rid = desc->id;
+       bool uv_shutdown, vsel_dvs;
+       unsigned int val;
+       int ret;
+
+       vsel_dvs = of_property_read_bool(np, "richtek,use-vsel-dvs");
+       if (vsel_dvs)
+               val = RTQ2134_BUCK_VSEL_CTRL;
+       else
+               val = RTQ2134_BUCK_DVS0_CTRL;
+
+       ret = regmap_update_bits(cfg->regmap, rdesc->dvs_ctrl_reg,
+                                RTQ2134_BUCKDVS_CTRL_MASK, val);
+       if (ret)
+               return ret;
+
+       uv_shutdown = of_property_read_bool(np, "richtek,uv-shutdown");
+       if (uv_shutdown)
+               val = 0;
+       else
+               val = RTQ2134_UVHICCUP_MASK;
+
+       return regmap_update_bits(cfg->regmap, RTQ2134_REG_FLT_BUCKCTRL(rid),
+                                 RTQ2134_UVHICCUP_MASK, val);
+}
+
+static const unsigned int rtq2134_buck_ramp_delay_table[] = {
+       0, 16000, 0, 8000, 4000, 2000, 1000, 500
+};
+
+#define RTQ2134_BUCK_DESC(_id) { \
+       .desc = { \
+               .name = "rtq2134_buck" #_id, \
+               .of_match = of_match_ptr("buck" #_id), \
+               .regulators_node = of_match_ptr("regulators"), \
+               .id = RTQ2134_IDX_BUCK##_id, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               .ops = &rtq2134_buck_ops, \
+               .n_voltages = RTQ2134_VOUT_MAXNUM, \
+               .linear_ranges = rtq2134_buck_vout_ranges, \
+               .n_linear_ranges = ARRAY_SIZE(rtq2134_buck_vout_ranges), \
+               .vsel_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG1, \
+               .vsel_mask = RTQ2134_VOUT_MASK, \
+               .enable_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG0, \
+               .enable_mask = RTQ2134_VOUTEN_MASK, \
+               .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
+               .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
+               .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
+               .ramp_mask = RTQ2134_RSPUP_MASK, \
+               .ramp_delay_table = rtq2134_buck_ramp_delay_table, \
+               .n_ramp_values = ARRAY_SIZE(rtq2134_buck_ramp_delay_table), \
+               .of_map_mode = rtq2134_buck_of_map_mode, \
+               .of_parse_cb = rtq2134_buck_of_parse_cb, \
+       }, \
+       .mode_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG0, \
+       .mode_mask = RTQ2134_FCCM_MASK, \
+       .suspend_mode_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG0, \
+       .suspend_mode_mask = RTQ2134_FCCM_MASK, \
+       .suspend_enable_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG0, \
+       .suspend_enable_mask = RTQ2134_VOUTEN_MASK, \
+       .suspend_vsel_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG1, \
+       .suspend_vsel_mask = RTQ2134_VOUT_MASK, \
+       .dvs_ctrl_reg = RTQ2134_REG_BUCK##_id##_DVSCFG, \
+}
+
+static const struct rtq2134_regulator_desc rtq2134_regulator_descs[] = {
+       RTQ2134_BUCK_DESC(1),
+       RTQ2134_BUCK_DESC(2),
+       RTQ2134_BUCK_DESC(3)
+};
+
+static bool rtq2134_is_accissible_reg(struct device *dev, unsigned int reg)
+{
+       if (reg >= RTQ2134_REG_IO_CHIPNAME && reg <= RTQ2134_REG_BUCK3_SLEWCTRL)
+               return true;
+       return false;
+}
+
+static const struct regmap_config rtq2134_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = RTQ2134_REG_BUCK3_SLEWCTRL,
+
+       .readable_reg = rtq2134_is_accissible_reg,
+       .writeable_reg = rtq2134_is_accissible_reg,
+};
+
+static int rtq2134_probe(struct i2c_client *i2c)
+{
+       struct regmap *regmap;
+       struct regulator_dev *rdev;
+       struct regulator_config regulator_cfg = {};
+       int i;
+
+       regmap = devm_regmap_init_i2c(i2c, &rtq2134_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&i2c->dev, "Failed to allocate regmap\n");
+               return PTR_ERR(regmap);
+       }
+
+       regulator_cfg.dev = &i2c->dev;
+       regulator_cfg.regmap = regmap;
+       for (i = 0; i < ARRAY_SIZE(rtq2134_regulator_descs); i++) {
+               rdev = devm_regulator_register(&i2c->dev,
+                                              &rtq2134_regulator_descs[i].desc,
+                                              &regulator_cfg);
+               if (IS_ERR(rdev)) {
+                       dev_err(&i2c->dev, "Failed to init %d regulator\n", i);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused rtq2134_device_tables[] = {
+       { .compatible = "richtek,rtq2134", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, rtq2134_device_tables);
+
+static struct i2c_driver rtq2134_driver = {
+       .driver = {
+               .name = "rtq2134",
+               .of_match_table = rtq2134_device_tables,
+       },
+       .probe_new = rtq2134_probe,
+};
+module_i2c_driver(rtq2134_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ2134 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rtq6752-regulator.c b/drivers/regulator/rtq6752-regulator.c
new file mode 100644 (file)
index 0000000..609d3fc
--- /dev/null
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum {
+       RTQ6752_IDX_PAVDD = 0,
+       RTQ6752_IDX_NAVDD = 1,
+       RTQ6752_IDX_MAX
+};
+
+#define RTQ6752_REG_PAVDD      0x00
+#define RTQ6752_REG_NAVDD      0x01
+#define RTQ6752_REG_PAVDDONDLY 0x07
+#define RTQ6752_REG_PAVDDSSTIME        0x08
+#define RTQ6752_REG_NAVDDONDLY 0x0D
+#define RTQ6752_REG_NAVDDSSTIME        0x0E
+#define RTQ6752_REG_OPTION1    0x12
+#define RTQ6752_REG_CHSWITCH   0x16
+#define RTQ6752_REG_FAULT      0x1D
+
+#define RTQ6752_VOUT_MASK      GENMASK(5, 0)
+#define RTQ6752_NAVDDEN_MASK   BIT(3)
+#define RTQ6752_PAVDDEN_MASK   BIT(0)
+#define RTQ6752_PAVDDAD_MASK   BIT(4)
+#define RTQ6752_NAVDDAD_MASK   BIT(3)
+#define RTQ6752_PAVDDF_MASK    BIT(3)
+#define RTQ6752_NAVDDF_MASK    BIT(0)
+#define RTQ6752_ENABLE_MASK    (BIT(RTQ6752_IDX_MAX) - 1)
+
+#define RTQ6752_VOUT_MINUV     5000000
+#define RTQ6752_VOUT_STEPUV    50000
+#define RTQ6752_VOUT_NUM       47
+#define RTQ6752_I2CRDY_TIMEUS  1000
+#define RTQ6752_MINSS_TIMEUS   5000
+
+struct rtq6752_priv {
+       struct regmap *regmap;
+       struct gpio_desc *enable_gpio;
+       struct mutex lock;
+       unsigned char enable_flag;
+};
+
+static int rtq6752_set_vdd_enable(struct regulator_dev *rdev)
+{
+       struct rtq6752_priv *priv = rdev_get_drvdata(rdev);
+       int rid = rdev_get_id(rdev), ret;
+
+       mutex_lock(&priv->lock);
+       if (priv->enable_gpio) {
+               gpiod_set_value(priv->enable_gpio, 1);
+
+               usleep_range(RTQ6752_I2CRDY_TIMEUS,
+                            RTQ6752_I2CRDY_TIMEUS + 100);
+       }
+
+       if (!priv->enable_flag) {
+               regcache_cache_only(priv->regmap, false);
+               ret = regcache_sync(priv->regmap);
+               if (ret) {
+                       mutex_unlock(&priv->lock);
+                       return ret;
+               }
+       }
+
+       priv->enable_flag |= BIT(rid);
+       mutex_unlock(&priv->lock);
+
+       return regulator_enable_regmap(rdev);
+}
+
+static int rtq6752_set_vdd_disable(struct regulator_dev *rdev)
+{
+       struct rtq6752_priv *priv = rdev_get_drvdata(rdev);
+       int rid = rdev_get_id(rdev), ret;
+
+       ret = regulator_disable_regmap(rdev);
+       if (ret)
+               return ret;
+
+       mutex_lock(&priv->lock);
+       priv->enable_flag &= ~BIT(rid);
+
+       if (!priv->enable_flag) {
+               regcache_cache_only(priv->regmap, true);
+               regcache_mark_dirty(priv->regmap);
+       }
+
+       if (priv->enable_gpio)
+               gpiod_set_value(priv->enable_gpio, 0);
+
+       mutex_unlock(&priv->lock);
+
+       return 0;
+}
+
+static int rtq6752_get_error_flags(struct regulator_dev *rdev,
+                                  unsigned int *flags)
+{
+       unsigned int val, events = 0;
+       const unsigned int fault_mask[] = {
+               RTQ6752_PAVDDF_MASK, RTQ6752_NAVDDF_MASK };
+       int rid = rdev_get_id(rdev), ret;
+
+       ret = regmap_read(rdev->regmap, RTQ6752_REG_FAULT, &val);
+       if (ret)
+               return ret;
+
+       if (val & fault_mask[rid])
+               events = REGULATOR_ERROR_REGULATION_OUT;
+
+       *flags = events;
+       return 0;
+}
+
+static const struct regulator_ops rtq6752_regulator_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = rtq6752_set_vdd_enable,
+       .disable = rtq6752_set_vdd_disable,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_active_discharge = regulator_set_active_discharge_regmap,
+       .get_error_flags = rtq6752_get_error_flags,
+};
+
+static const struct regulator_desc rtq6752_regulator_descs[] = {
+       {
+               .name = "rtq6752-pavdd",
+               .of_match = of_match_ptr("pavdd"),
+               .regulators_node = of_match_ptr("regulators"),
+               .id = RTQ6752_IDX_PAVDD,
+               .n_voltages = RTQ6752_VOUT_NUM,
+               .ops = &rtq6752_regulator_ops,
+               .owner = THIS_MODULE,
+               .min_uV = RTQ6752_VOUT_MINUV,
+               .uV_step = RTQ6752_VOUT_STEPUV,
+               .enable_time = RTQ6752_MINSS_TIMEUS,
+               .vsel_reg = RTQ6752_REG_PAVDD,
+               .vsel_mask = RTQ6752_VOUT_MASK,
+               .enable_reg = RTQ6752_REG_CHSWITCH,
+               .enable_mask = RTQ6752_PAVDDEN_MASK,
+               .active_discharge_reg = RTQ6752_REG_OPTION1,
+               .active_discharge_mask = RTQ6752_PAVDDAD_MASK,
+               .active_discharge_off = RTQ6752_PAVDDAD_MASK,
+       },
+       {
+               .name = "rtq6752-navdd",
+               .of_match = of_match_ptr("navdd"),
+               .regulators_node = of_match_ptr("regulators"),
+               .id = RTQ6752_IDX_NAVDD,
+               .n_voltages = RTQ6752_VOUT_NUM,
+               .ops = &rtq6752_regulator_ops,
+               .owner = THIS_MODULE,
+               .min_uV = RTQ6752_VOUT_MINUV,
+               .uV_step = RTQ6752_VOUT_STEPUV,
+               .enable_time = RTQ6752_MINSS_TIMEUS,
+               .vsel_reg = RTQ6752_REG_NAVDD,
+               .vsel_mask = RTQ6752_VOUT_MASK,
+               .enable_reg = RTQ6752_REG_CHSWITCH,
+               .enable_mask = RTQ6752_NAVDDEN_MASK,
+               .active_discharge_reg = RTQ6752_REG_OPTION1,
+               .active_discharge_mask = RTQ6752_NAVDDAD_MASK,
+               .active_discharge_off = RTQ6752_NAVDDAD_MASK,
+       }
+};
+
+static int rtq6752_init_device_properties(struct rtq6752_priv *priv)
+{
+       u8 raw_vals[] = { 0, 0 };
+       int ret;
+
+       /* Configure PAVDD on and softstart delay time to the minimum */
+       ret = regmap_raw_write(priv->regmap, RTQ6752_REG_PAVDDONDLY, raw_vals,
+                              ARRAY_SIZE(raw_vals));
+       if (ret)
+               return ret;
+
+       /* Configure NAVDD on and softstart delay time to the minimum */
+       return regmap_raw_write(priv->regmap, RTQ6752_REG_NAVDDONDLY, raw_vals,
+                               ARRAY_SIZE(raw_vals));
+}
+
+static bool rtq6752_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       if (reg == RTQ6752_REG_FAULT)
+               return true;
+       return false;
+}
+
+static const struct reg_default rtq6752_reg_defaults[] = {
+       { RTQ6752_REG_PAVDD, 0x14 },
+       { RTQ6752_REG_NAVDD, 0x14 },
+       { RTQ6752_REG_PAVDDONDLY, 0x01 },
+       { RTQ6752_REG_PAVDDSSTIME, 0x01 },
+       { RTQ6752_REG_NAVDDONDLY, 0x01 },
+       { RTQ6752_REG_NAVDDSSTIME, 0x01 },
+       { RTQ6752_REG_OPTION1, 0x07 },
+       { RTQ6752_REG_CHSWITCH, 0x29 },
+};
+
+static const struct regmap_config rtq6752_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_RBTREE,
+       .max_register = RTQ6752_REG_FAULT,
+       .reg_defaults = rtq6752_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(rtq6752_reg_defaults),
+       .volatile_reg = rtq6752_is_volatile_reg,
+};
+
+static int rtq6752_probe(struct i2c_client *i2c)
+{
+       struct rtq6752_priv *priv;
+       struct regulator_config reg_cfg = {};
+       struct regulator_dev *rdev;
+       int i, ret;
+
+       priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       mutex_init(&priv->lock);
+
+       priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable",
+                                                   GPIOD_OUT_HIGH);
+       if (IS_ERR(priv->enable_gpio)) {
+               dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
+               return PTR_ERR(priv->enable_gpio);
+       }
+
+       usleep_range(RTQ6752_I2CRDY_TIMEUS, RTQ6752_I2CRDY_TIMEUS + 100);
+       /* Default EN pin to high, PAVDD and NAVDD will be on */
+       priv->enable_flag = RTQ6752_ENABLE_MASK;
+
+       priv->regmap = devm_regmap_init_i2c(i2c, &rtq6752_regmap_config);
+       if (IS_ERR(priv->regmap)) {
+               dev_err(&i2c->dev, "Failed to init regmap\n");
+               return PTR_ERR(priv->regmap);
+       }
+
+       ret = rtq6752_init_device_properties(priv);
+       if (ret) {
+               dev_err(&i2c->dev, "Failed to init device properties\n");
+               return ret;
+       }
+
+       reg_cfg.dev = &i2c->dev;
+       reg_cfg.regmap = priv->regmap;
+       reg_cfg.driver_data = priv;
+
+       for (i = 0; i < ARRAY_SIZE(rtq6752_regulator_descs); i++) {
+               rdev = devm_regulator_register(&i2c->dev,
+                                              rtq6752_regulator_descs + i,
+                                              &reg_cfg);
+               if (IS_ERR(rdev)) {
+                       dev_err(&i2c->dev, "Failed to init %d regulator\n", i);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused rtq6752_device_table[] = {
+       { .compatible = "richtek,rtq6752", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, rtq6752_device_table);
+
+static struct i2c_driver rtq6752_driver = {
+       .driver = {
+               .name = "rtq6752",
+               .of_match_table = rtq6752_device_table,
+       },
+       .probe_new = rtq6752_probe,
+};
+module_i2c_driver(rtq6752_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ6752 Regulator Driver");
+MODULE_LICENSE("GPL v2");
index e021ae0..8360b39 100644 (file)
 #include <linux/gpio/consumer.h>
 #include <linux/mfd/sy7636a.h>
 
-#define SY7636A_POLL_ENABLED_TIME 500
+struct sy7636a_data {
+       struct regmap *regmap;
+       struct gpio_desc *pgood_gpio;
+};
 
 static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
 {
@@ -35,10 +38,10 @@ static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
 
 static int sy7636a_get_status(struct regulator_dev *rdev)
 {
-       struct sy7636a *sy7636a = rdev_get_drvdata(rdev);
+       struct sy7636a_data *data = dev_get_drvdata(rdev->dev.parent);
        int ret = 0;
 
-       ret = gpiod_get_value_cansleep(sy7636a->pgood_gpio);
+       ret = gpiod_get_value_cansleep(data->pgood_gpio);
        if (ret < 0)
                dev_err(&rdev->dev, "Failed to read pgood gpio: %d\n", ret);
 
@@ -61,46 +64,50 @@ static const struct regulator_desc desc = {
        .owner = THIS_MODULE,
        .enable_reg = SY7636A_REG_OPERATION_MODE_CRL,
        .enable_mask = SY7636A_OPERATION_MODE_CRL_ONOFF,
-       .poll_enabled_time = SY7636A_POLL_ENABLED_TIME,
        .regulators_node = of_match_ptr("regulators"),
        .of_match = of_match_ptr("vcom"),
 };
 
 static int sy7636a_regulator_probe(struct platform_device *pdev)
 {
-       struct sy7636a *sy7636a = dev_get_drvdata(pdev->dev.parent);
+       struct regmap *regmap = dev_get_drvdata(pdev->dev.parent);
        struct regulator_config config = { };
        struct regulator_dev *rdev;
        struct gpio_desc *gdp;
+       struct sy7636a_data *data;
        int ret;
 
-       if (!sy7636a)
+       if (!regmap)
                return -EPROBE_DEFER;
 
-       platform_set_drvdata(pdev, sy7636a);
-
-       gdp = devm_gpiod_get(sy7636a->dev, "epd-pwr-good", GPIOD_IN);
+       gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
        if (IS_ERR(gdp)) {
-               dev_err(sy7636a->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+               dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
                return PTR_ERR(gdp);
        }
 
-       sy7636a->pgood_gpio = gdp;
+       data = devm_kzalloc(&pdev->dev, sizeof(struct sy7636a_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->regmap = regmap;
+       data->pgood_gpio = gdp;
+
+       platform_set_drvdata(pdev, data);
 
-       ret = regmap_write(sy7636a->regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
+       ret = regmap_write(regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
        if (ret) {
-               dev_err(sy7636a->dev, "Failed to initialize regulator: %d\n", ret);
+               dev_err(pdev->dev.parent, "Failed to initialize regulator: %d\n", ret);
                return ret;
        }
 
        config.dev = &pdev->dev;
-       config.dev->of_node = sy7636a->dev->of_node;
-       config.driver_data = sy7636a;
-       config.regmap = sy7636a->regmap;
+       config.dev->of_node = pdev->dev.parent->of_node;
+       config.regmap = regmap;
 
        rdev = devm_regulator_register(&pdev->dev, &desc, &config);
        if (IS_ERR(rdev)) {
-               dev_err(sy7636a->dev, "Failed to register %s regulator\n",
+               dev_err(pdev->dev.parent, "Failed to register %s regulator\n",
                        pdev->name);
                return PTR_ERR(rdev);
        }
index 62d243f..5e915cf 100644 (file)
@@ -25,6 +25,7 @@ struct sy8824_config {
        unsigned int vsel_min;
        unsigned int vsel_step;
        unsigned int vsel_count;
+       const struct regmap_config *config;
 };
 
 struct sy8824_device_info {
@@ -110,6 +111,15 @@ static int sy8824_regulator_register(struct sy8824_device_info *di,
 static const struct regmap_config sy8824_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .num_reg_defaults_raw = 1,
+       .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config sy20276_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .num_reg_defaults_raw = 2,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int sy8824_i2c_probe(struct i2c_client *client)
@@ -134,7 +144,7 @@ static int sy8824_i2c_probe(struct i2c_client *client)
        di->dev = dev;
        di->cfg = of_device_get_match_data(dev);
 
-       regmap = devm_regmap_init_i2c(client, &sy8824_regmap_config);
+       regmap = devm_regmap_init_i2c(client, di->cfg->config);
        if (IS_ERR(regmap)) {
                dev_err(dev, "Failed to allocate regmap!\n");
                return PTR_ERR(regmap);
@@ -160,6 +170,7 @@ static const struct sy8824_config sy8824c_cfg = {
        .vsel_min = 762500,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy8824_regmap_config,
 };
 
 static const struct sy8824_config sy8824e_cfg = {
@@ -169,6 +180,7 @@ static const struct sy8824_config sy8824e_cfg = {
        .vsel_min = 700000,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy8824_regmap_config,
 };
 
 static const struct sy8824_config sy20276_cfg = {
@@ -178,6 +190,7 @@ static const struct sy8824_config sy20276_cfg = {
        .vsel_min = 600000,
        .vsel_step = 10000,
        .vsel_count = 128,
+       .config = &sy20276_regmap_config,
 };
 
 static const struct sy8824_config sy20278_cfg = {
@@ -187,6 +200,7 @@ static const struct sy8824_config sy20278_cfg = {
        .vsel_min = 762500,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy20276_regmap_config,
 };
 
 static const struct of_device_id sy8824_dt_ids[] = {
index 52e8c17..7d5d9f8 100644 (file)
 #define   SY8827N_MODE         (1 << 6)
 #define SY8827N_VSEL1          1
 #define SY8827N_CTRL           2
+#define SY8827N_ID1            3
+#define SY8827N_ID2            4
+#define SY8827N_PGOOD          5
+#define SY8827N_MAX            (SY8827N_PGOOD + 1)
 
 #define SY8827N_NVOLTAGES      64
 #define SY8827N_VSELMIN                600000
@@ -102,9 +106,19 @@ static int sy8827n_regulator_register(struct sy8827n_device_info *di,
        return PTR_ERR_OR_ZERO(rdev);
 }
 
+static bool sy8827n_volatile_reg(struct device *dev, unsigned int reg)
+{
+       if (reg == SY8827N_PGOOD)
+               return true;
+       return false;
+}
+
 static const struct regmap_config sy8827n_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .volatile_reg = sy8827n_volatile_reg,
+       .num_reg_defaults_raw = SY8827N_MAX,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int sy8827n_i2c_probe(struct i2c_client *client)
index 1d5b0a1..06cbe60 100644 (file)
@@ -1211,12 +1211,10 @@ static int tps65910_probe(struct platform_device *pdev)
 
                rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
                                               &config);
-               if (IS_ERR(rdev)) {
-                       dev_err(tps65910->dev,
-                               "failed to register %s regulator\n",
-                               pdev->name);
-                       return PTR_ERR(rdev);
-               }
+               if (IS_ERR(rdev))
+                       return dev_err_probe(tps65910->dev, PTR_ERR(rdev),
+                                            "failed to register %s regulator\n",
+                                            pdev->name);
 
                /* Save regulator for cleanup */
                pmic->rdev[i] = rdev;
index cbadb1c..d2a3797 100644 (file)
@@ -37,7 +37,6 @@ struct vctrl_voltage_table {
 struct vctrl_data {
        struct regulator_dev *rdev;
        struct regulator_desc desc;
-       struct regulator *ctrl_reg;
        bool enabled;
        unsigned int min_slew_down_rate;
        unsigned int ovp_threshold;
@@ -82,7 +81,12 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
 static int vctrl_get_voltage(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+       int ctrl_uV;
+
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
+       ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
 
        return vctrl_calc_output_voltage(vctrl, ctrl_uV);
 }
@@ -92,14 +96,19 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
                             unsigned int *selector)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
-       int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
-       int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
+       int orig_ctrl_uV;
+       int uV;
        int ret;
 
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
+       orig_ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
+       uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
+
        if (req_min_uV >= uV || !vctrl->ovp_threshold)
                /* voltage rising or no OVP */
-               return regulator_set_voltage_rdev(ctrl_reg->rdev,
+               return regulator_set_voltage_rdev(rdev->supply->rdev,
                        vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
                        vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
                        PM_SUSPEND_ON);
@@ -117,7 +126,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
                next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
                next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
 
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            next_ctrl_uV,
                                            next_ctrl_uV,
                                            PM_SUSPEND_ON);
@@ -134,7 +143,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
 
 err:
        /* Try to go back to original voltage */
-       regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+       regulator_set_voltage_rdev(rdev->supply->rdev, orig_ctrl_uV, orig_ctrl_uV,
                                   PM_SUSPEND_ON);
 
        return ret;
@@ -151,16 +160,18 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
                                 unsigned int selector)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
        unsigned int orig_sel = vctrl->sel;
        int ret;
 
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
        if (selector >= rdev->desc->n_voltages)
                return -EINVAL;
 
        if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
                /* voltage rising or no OVP */
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            vctrl->vtable[selector].ctrl,
                                            vctrl->vtable[selector].ctrl,
                                            PM_SUSPEND_ON);
@@ -179,7 +190,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
                else
                        next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
 
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            vctrl->vtable[next_sel].ctrl,
                                            vctrl->vtable[next_sel].ctrl,
                                            PM_SUSPEND_ON);
@@ -202,7 +213,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
 err:
        if (vctrl->sel != orig_sel) {
                /* Try to go back to original voltage */
-               if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+               if (!regulator_set_voltage_rdev(rdev->supply->rdev,
                                           vctrl->vtable[orig_sel].ctrl,
                                           vctrl->vtable[orig_sel].ctrl,
                                           PM_SUSPEND_ON))
@@ -234,10 +245,6 @@ static int vctrl_parse_dt(struct platform_device *pdev,
        u32 pval;
        u32 vrange_ctrl[2];
 
-       vctrl->ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
-       if (IS_ERR(vctrl->ctrl_reg))
-               return PTR_ERR(vctrl->ctrl_reg);
-
        ret = of_property_read_u32(np, "ovp-threshold-percent", &pval);
        if (!ret) {
                vctrl->ovp_threshold = pval;
@@ -315,11 +322,11 @@ static int vctrl_cmp_ctrl_uV(const void *a, const void *b)
        return at->ctrl - bt->ctrl;
 }
 
-static int vctrl_init_vtable(struct platform_device *pdev)
+static int vctrl_init_vtable(struct platform_device *pdev,
+                            struct regulator *ctrl_reg)
 {
        struct vctrl_data *vctrl = platform_get_drvdata(pdev);
        struct regulator_desc *rdesc = &vctrl->desc;
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
        struct vctrl_voltage_range *vrange_ctrl = &vctrl->vrange.ctrl;
        int n_voltages;
        int ctrl_uV;
@@ -395,23 +402,19 @@ static int vctrl_init_vtable(struct platform_device *pdev)
 static int vctrl_enable(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ret = regulator_enable(vctrl->ctrl_reg);
 
-       if (!ret)
-               vctrl->enabled = true;
+       vctrl->enabled = true;
 
-       return ret;
+       return 0;
 }
 
 static int vctrl_disable(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ret = regulator_disable(vctrl->ctrl_reg);
 
-       if (!ret)
-               vctrl->enabled = false;
+       vctrl->enabled = false;
 
-       return ret;
+       return 0;
 }
 
 static int vctrl_is_enabled(struct regulator_dev *rdev)
@@ -447,6 +450,7 @@ static int vctrl_probe(struct platform_device *pdev)
        struct regulator_desc *rdesc;
        struct regulator_config cfg = { };
        struct vctrl_voltage_range *vrange_ctrl;
+       struct regulator *ctrl_reg;
        int ctrl_uV;
        int ret;
 
@@ -461,15 +465,20 @@ static int vctrl_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
+       if (IS_ERR(ctrl_reg))
+               return PTR_ERR(ctrl_reg);
+
        vrange_ctrl = &vctrl->vrange.ctrl;
 
        rdesc = &vctrl->desc;
        rdesc->name = "vctrl";
        rdesc->type = REGULATOR_VOLTAGE;
        rdesc->owner = THIS_MODULE;
+       rdesc->supply_name = "ctrl";
 
-       if ((regulator_get_linear_step(vctrl->ctrl_reg) == 1) ||
-           (regulator_count_voltages(vctrl->ctrl_reg) == -EINVAL)) {
+       if ((regulator_get_linear_step(ctrl_reg) == 1) ||
+           (regulator_count_voltages(ctrl_reg) == -EINVAL)) {
                rdesc->continuous_voltage_range = true;
                rdesc->ops = &vctrl_ops_cont;
        } else {
@@ -486,11 +495,12 @@ static int vctrl_probe(struct platform_device *pdev)
        cfg.init_data = init_data;
 
        if (!rdesc->continuous_voltage_range) {
-               ret = vctrl_init_vtable(pdev);
+               ret = vctrl_init_vtable(pdev, ctrl_reg);
                if (ret)
                        return ret;
 
-               ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+               /* Use locked consumer API when not in regulator framework */
+               ctrl_uV = regulator_get_voltage(ctrl_reg);
                if (ctrl_uV < 0) {
                        dev_err(&pdev->dev, "failed to get control voltage\n");
                        return ctrl_uV;
@@ -513,6 +523,9 @@ static int vctrl_probe(struct platform_device *pdev)
                }
        }
 
+       /* Drop ctrl-supply here in favor of regulator core managed supply */
+       devm_regulator_put(ctrl_reg);
+
        vctrl->rdev = devm_regulator_register(&pdev->dev, rdesc, &cfg);
        if (IS_ERR(vctrl->rdev)) {
                ret = PTR_ERR(vctrl->rdev);
index 328f70f..5656cac 100644 (file)
@@ -116,7 +116,7 @@ config RESET_LPC18XX
 
 config RESET_MCHP_SPARX5
        bool "Microchip Sparx5 reset driver"
-       depends on HAS_IOMEM || COMPILE_TEST
+       depends on ARCH_SPARX5 || COMPILE_TEST
        default y if SPARX5_SWITCH
        select MFD_SYSCON
        help
index daa425e..59dc0ff 100644 (file)
@@ -53,7 +53,8 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
                               unsigned long id)
 {
        struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-       int val, err;
+       int err;
+       u32 val;
 
        err = zynqmp_pm_reset_get_status(priv->data->reset_id + id, &val);
        if (err)
index 6bb7752..db59872 100644 (file)
@@ -552,7 +552,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        dbio = dreq->bio;
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        memset(dbio, 0, sizeof (struct dasd_diag_bio));
                        dbio->type = rw_cmd;
index 0de1a46..460e0f1 100644 (file)
@@ -1004,15 +1004,23 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
 static void dasd_eckd_store_conf_data(struct dasd_device *device,
                                      struct dasd_conf_data *conf_data, int chp)
 {
+       struct dasd_eckd_private *private = device->private;
        struct channel_path_desc_fmt0 *chp_desc;
        struct subchannel_id sch_id;
+       void *cdp;
 
-       ccw_device_get_schid(device->cdev, &sch_id);
        /*
         * path handling and read_conf allocate data
         * free it before replacing the pointer
+        * also replace the old private->conf_data pointer
+        * with the new one if this points to the same data
         */
-       kfree(device->path[chp].conf_data);
+       cdp = device->path[chp].conf_data;
+       if (private->conf_data == cdp) {
+               private->conf_data = (void *)conf_data;
+               dasd_eckd_identify_conf_parts(private);
+       }
+       ccw_device_get_schid(device->cdev, &sch_id);
        device->path[chp].conf_data = conf_data;
        device->path[chp].cssid = sch_id.cssid;
        device->path[chp].ssid = sch_id.ssid;
@@ -1020,6 +1028,7 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device,
        if (chp_desc)
                device->path[chp].chpid = chp_desc->chpid;
        kfree(chp_desc);
+       kfree(cdp);
 }
 
 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
@@ -3267,7 +3276,7 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
        end_blk = (curr_trk + 1) * recs_per_trk;
 
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        if (first_blk + blk_count >= end_blk) {
                                cqr->proc_bytes = blk_count * blksize;
@@ -3999,7 +4008,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
                              last_rec - recid + 1, cmd, basedev, blksize);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
@@ -4166,7 +4175,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        idaw_dst = NULL;
        idaw_len = 0;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                seg_len = bv.bv_len;
                while (seg_len) {
                        if (new_track) {
@@ -4509,7 +4518,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                new_track = 1;
                recid = first_rec;
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv.bv_page) + bv.bv_offset;
+                       dst = bvec_virt(&bv);
                        seg_len = bv.bv_len;
                        while (seg_len) {
                                if (new_track) {
@@ -4542,7 +4551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                }
        } else {
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv.bv_page) + bv.bv_offset;
+                       dst = bvec_virt(&bv);
                        last_tidaw = itcw_add_tidaw(itcw, 0x00,
                                                    dst, bv.bv_len);
                        if (IS_ERR(last_tidaw)) {
@@ -4778,7 +4787,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
                        idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                seg_len = bv.bv_len;
                if (cmd == DASD_ECKD_CCW_READ_TRACK)
                        memset(dst, 0, seg_len);
@@ -4839,7 +4848,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->uses_cdl && recid <= 2*blk_per_trk)
index 3ad319a..e084f4d 100644 (file)
@@ -501,7 +501,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
        }
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
@@ -583,7 +583,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->rdc_data.mode.bits.data_chain != 0)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->rdc_data.mode.bits.data_chain == 0)
index 493e846..fa966e0 100644 (file)
@@ -24,6 +24,8 @@
 
 #include "dasd_int.h"
 
+static struct lock_class_key dasd_bio_compl_lkclass;
+
 /*
  * Allocate and register gendisk structure for device.
  */
@@ -38,13 +40,15 @@ int dasd_gendisk_alloc(struct dasd_block *block)
        if (base->devindex >= DASD_PER_MAJOR)
                return -EBUSY;
 
-       gdp = alloc_disk(1 << DASD_PARTN_BITS);
+       gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
+                               &dasd_bio_compl_lkclass);
        if (!gdp)
                return -ENOMEM;
 
        /* Initialize gendisk structure. */
        gdp->major = DASD_MAJOR;
        gdp->first_minor = base->devindex << DASD_PARTN_BITS;
+       gdp->minors = 1 << DASD_PARTN_BITS;
        gdp->fops = &dasd_device_operations;
 
        /*
@@ -73,7 +77,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
            test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
                set_disk_ro(gdp, 1);
        dasd_add_link_to_gendisk(gdp, base);
-       gdp->queue = block->request_queue;
        block->gdp = gdp;
        set_capacity(block->gdp, 0);
        device_add_disk(&base->cdev->dev, block->gdp, NULL);
index 9f64244..468cbeb 100644 (file)
@@ -575,10 +575,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
        else
                argp = (void __user *)arg;
 
-       if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
-               PRINT_DEBUG("empty data ptr");
+       if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg)
                return -EINVAL;
-       }
 
        base = dasd_device_from_gendisk(bdev->bd_disk);
        if (!base)
index 29180bd..5be3d1c 100644 (file)
@@ -892,8 +892,7 @@ dcssblk_submit_bio(struct bio *bio)
 
        index = (bio->bi_iter.bi_sector >> 3);
        bio_for_each_segment(bvec, bio, iter) {
-               page_addr = (unsigned long)
-                       page_address(bvec.bv_page) + bvec.bv_offset;
+               page_addr = (unsigned long)bvec_virt(&bvec);
                source_addr = dev_info->start + (index<<12) + bytes_done;
                if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
                        // More paranoia.
index 792b4bf..b4b84e3 100644 (file)
 #include <linux/platform_device.h>
 #include <asm/types.h>
 #include <asm/irq.h>
+#include <asm/debug.h>
 
 #include "sclp.h"
 
 #define SCLP_HEADER            "sclp: "
 
+struct sclp_trace_entry {
+       char id[4];
+       u32 a;
+       u64 b;
+};
+
+#define SCLP_TRACE_ENTRY_SIZE          sizeof(struct sclp_trace_entry)
+#define SCLP_TRACE_MAX_SIZE            128
+#define SCLP_TRACE_EVENT_MAX_SIZE      64
+
+/* Debug trace area intended for all entries in abbreviated form. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
+                        &debug_hex_ascii_view);
+
+/* Error trace area intended for full entries relating to failed requests. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
+                        SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
+
 /* Lock to protect internal data consistency. */
 static DEFINE_SPINLOCK(sclp_lock);
 
@@ -54,6 +73,114 @@ int sclp_console_drop = 1;
 /* Number of times the console dropped buffer pages */
 unsigned long sclp_console_full;
 
+/* The currently active SCLP command word. */
+static sclp_cmdw_t active_cmd;
+
+static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
+{
+       struct sclp_trace_entry e;
+
+       memset(&e, 0, sizeof(e));
+       strncpy(e.id, id, sizeof(e.id));
+       e.a = a;
+       e.b = b;
+       debug_event(&sclp_debug, prio, &e, sizeof(e));
+       if (err)
+               debug_event(&sclp_debug_err, 0, &e, sizeof(e));
+}
+
+static inline int no_zeroes_len(void *data, int len)
+{
+       char *d = data;
+
+       /* Minimize trace area usage by not tracing trailing zeroes. */
+       while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
+               len--;
+
+       return len;
+}
+
+static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
+{
+       debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
+       if (errlen)
+               debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
+}
+
+static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
+{
+       struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
+       int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
+
+       /* Full SCCB tracing if debug level is set to max. */
+       if (sclp_debug.level == DEBUG_MAX_LEVEL)
+               return len;
+
+       /* Minimal tracing for console writes. */
+       if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
+           (evbuf->type == EVTYP_MSG  || evbuf->type == EVTYP_VT220MSG))
+               limit = SCLP_TRACE_ENTRY_SIZE;
+
+       return min(len, limit);
+}
+
+static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
+                                  sclp_cmdw_t cmd, struct sccb_header *sccb,
+                                  bool err)
+{
+       sclp_trace(prio, id, a, b, err);
+       if (sccb) {
+               sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
+                              err ? sccb->length : 0);
+       }
+}
+
+static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
+                                   struct evbuf_header *evbuf, bool err)
+{
+       sclp_trace(prio, id, a, b, err);
+       sclp_trace_bin(prio + 1, evbuf,
+                      min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
+                      err ? evbuf->length : 0);
+}
+
+static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
+                                 bool err)
+{
+       struct sccb_header *sccb = req->sccb;
+       union {
+               struct {
+                       u16 status;
+                       u16 response;
+                       u16 timeout;
+                       u16 start_count;
+               };
+               u64 b;
+       } summary;
+
+       summary.status = req->status;
+       summary.response = sccb ? sccb->response_code : 0;
+       summary.timeout = (u16)req->queue_timeout;
+       summary.start_count = (u16)req->start_count;
+
+       sclp_trace(prio, id, (u32)(addr_t)sccb, summary.b, err);
+}
+
+static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
+                                      struct sclp_register *reg)
+{
+       struct {
+               u64 receive;
+               u64 send;
+       } d;
+
+       d.receive = reg->receive_mask;
+       d.send = reg->send_mask;
+
+       sclp_trace(prio, id, a, b, false);
+       sclp_trace_bin(prio, &d, sizeof(d), 0);
+}
+
 static int __init sclp_setup_console_pages(char *str)
 {
        int pages, rc;
@@ -162,6 +289,9 @@ static void sclp_request_timeout(bool force_restart)
 {
        unsigned long flags;
 
+       /* TMO: A timeout occurred (a=force_restart) */
+       sclp_trace(2, "TMO", force_restart, 0, true);
+
        spin_lock_irqsave(&sclp_lock, flags);
        if (force_restart) {
                if (sclp_running_state == sclp_running_state_running) {
@@ -237,6 +367,12 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
 
        do {
                req = __sclp_req_queue_remove_expired_req();
+
+               if (req) {
+                       /* RQTM: Request timed out (a=sccb, b=summary) */
+                       sclp_trace_req(2, "RQTM", req, true);
+               }
+
                if (req && req->callback)
                        req->callback(req, req->callback_data);
        } while (req);
@@ -248,6 +384,25 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
        spin_unlock_irqrestore(&sclp_lock, flags);
 }
 
+static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
+{
+       static u64 srvc_count;
+       int rc;
+
+       /* SRV1: Service call about to be issued (a=command, b=sccb address) */
+       sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
+
+       rc = sclp_service_call(command, sccb);
+
+       /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
+       sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
+
+       if (rc == 0)
+               active_cmd = command;
+
+       return rc;
+}
+
 /* Try to start a request. Return zero if the request was successfully
  * started or if it will be started at a later time. Return non-zero otherwise.
  * Called while sclp_lock is locked. */
@@ -259,7 +414,7 @@ __sclp_start_request(struct sclp_req *req)
        if (sclp_running_state != sclp_running_state_idle)
                return 0;
        del_timer(&sclp_request_timer);
-       rc = sclp_service_call(req->command, req->sccb);
+       rc = sclp_service_call_trace(req->command, req->sccb);
        req->start_count++;
 
        if (rc == 0) {
@@ -309,6 +464,10 @@ sclp_process_queue(void)
                }
                /* Post-processing for aborted request */
                list_del(&req->list);
+
+               /* RQAB: Request aborted (a=sccb, b=summary) */
+               sclp_trace_req(2, "RQAB", req, true);
+
                if (req->callback) {
                        spin_unlock_irqrestore(&sclp_lock, flags);
                        req->callback(req, req->callback_data);
@@ -341,6 +500,10 @@ sclp_add_request(struct sclp_req *req)
                spin_unlock_irqrestore(&sclp_lock, flags);
                return -EIO;
        }
+
+       /* RQAD: Request was added (a=sccb, b=caller) */
+       sclp_trace(2, "RQAD", (u32)(addr_t)req->sccb, _RET_IP_, false);
+
        req->status = SCLP_REQ_QUEUED;
        req->start_count = 0;
        list_add_tail(&req->list, &sclp_req_queue);
@@ -394,6 +557,11 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
                        else
                                reg = NULL;
                }
+
+               /* EVNT: Event callback (b=receiver) */
+               sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
+                                evbuf, !reg);
+
                if (reg && reg->receiver_fn) {
                        spin_unlock_irqrestore(&sclp_lock, flags);
                        reg->receiver_fn(evbuf);
@@ -455,6 +623,30 @@ __sclp_find_req(u32 sccb)
        return NULL;
 }
 
+static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
+{
+       struct sccb_header *sccb = (struct sccb_header *)(addr_t)sccb_int;
+       struct evbuf_header *evbuf;
+       u16 response;
+
+       if (!sccb)
+               return true;
+
+       /* Check SCCB response. */
+       response = sccb->response_code & 0xff;
+       if (response != 0x10 && response != 0x20)
+               return false;
+
+       /* Check event-processed flag on outgoing events. */
+       if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
+               evbuf = (struct evbuf_header *)(sccb + 1);
+               if (!(evbuf->flags & 0x80))
+                       return false;
+       }
+
+       return true;
+}
+
 /* Handler for external interruption. Perform request post-processing.
  * Prepare read event data request if necessary. Start processing of next
  * request on queue. */
@@ -469,6 +661,12 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
        spin_lock(&sclp_lock);
        finished_sccb = param32 & 0xfffffff8;
        evbuf_pending = param32 & 0x3;
+
+       /* INT: Interrupt received (a=intparm, b=cmd) */
+       sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
+                       (struct sccb_header *)(addr_t)finished_sccb,
+                       !ok_response(finished_sccb, active_cmd));
+
        if (finished_sccb) {
                del_timer(&sclp_request_timer);
                sclp_running_state = sclp_running_state_reset_pending;
@@ -477,13 +675,21 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
                        /* Request post-processing */
                        list_del(&req->list);
                        req->status = SCLP_REQ_DONE;
+
+                       /* RQOK: Request success (a=sccb, b=summary) */
+                       sclp_trace_req(2, "RQOK", req, false);
+
                        if (req->callback) {
                                spin_unlock(&sclp_lock);
                                req->callback(req, req->callback_data);
                                spin_lock(&sclp_lock);
                        }
+               } else {
+                       /* UNEX: Unexpected SCCB completion (a=sccb address) */
+                       sclp_trace(0, "UNEX", finished_sccb, 0, true);
                }
                sclp_running_state = sclp_running_state_idle;
+               active_cmd = 0;
        }
        if (evbuf_pending &&
            sclp_activation_state == sclp_activation_state_active)
@@ -507,9 +713,13 @@ sclp_sync_wait(void)
        unsigned long long old_tick;
        unsigned long flags;
        unsigned long cr0, cr0_sync;
+       static u64 sync_count;
        u64 timeout;
        int irq_context;
 
+       /* SYN1: Synchronous wait start (a=runstate, b=sync count) */
+       sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
+
        /* We'll be disabling timer interrupts, so we need a custom timeout
         * mechanism */
        timeout = 0;
@@ -547,6 +757,9 @@ sclp_sync_wait(void)
                _local_bh_enable();
        local_tick_enable(old_tick);
        local_irq_restore(flags);
+
+       /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
+       sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
 }
 EXPORT_SYMBOL(sclp_sync_wait);
 
@@ -576,8 +789,13 @@ sclp_dispatch_state_change(void)
                                reg = NULL;
                }
                spin_unlock_irqrestore(&sclp_lock, flags);
-               if (reg && reg->state_change_fn)
+               if (reg && reg->state_change_fn) {
+                       /* STCG: State-change callback (b=callback) */
+                       sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
+                                  false);
+
                        reg->state_change_fn(reg);
+               }
        } while (reg);
 }
 
@@ -651,6 +869,9 @@ sclp_register(struct sclp_register *reg)
        sccb_mask_t send_mask;
        int rc;
 
+       /* REG: Event listener registered (b=caller) */
+       sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
+
        rc = sclp_init();
        if (rc)
                return rc;
@@ -683,6 +904,9 @@ sclp_unregister(struct sclp_register *reg)
 {
        unsigned long flags;
 
+       /* UREG: Event listener unregistered (b=caller) */
+       sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
+
        spin_lock_irqsave(&sclp_lock, flags);
        list_del(&reg->list);
        spin_unlock_irqrestore(&sclp_lock, flags);
@@ -932,7 +1156,7 @@ sclp_check_interface(void)
        for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
                __sclp_make_init_req(0, 0);
                sccb = (struct init_sccb *) sclp_init_req.sccb;
-               rc = sclp_service_call(sclp_init_req.command, sccb);
+               rc = sclp_service_call_trace(sclp_init_req.command, sccb);
                if (rc == -EIO)
                        break;
                sclp_init_req.status = SCLP_REQ_RUNNING;
index 8dd8ad8..5e43410 100644 (file)
@@ -310,8 +310,6 @@ extern int sclp_console_drop;
 extern unsigned long sclp_console_full;
 extern bool sclp_mask_compat_mode;
 
-extern char *sclp_early_sccb;
-
 void sclp_early_wait_irq(void);
 int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
 unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
index ab0518c..998933e 100644 (file)
@@ -457,7 +457,7 @@ static int __init sclp_detect_standby_memory(void)
        struct read_storage_sccb *sccb;
        int i, id, assigned, rc;
 
-       if (OLDMEM_BASE) /* No standby memory in kdump mode */
+       if (oldmem_data.start) /* No standby memory in kdump mode */
                return 0;
        if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
                return 0;
index 039b207..c365110 100644 (file)
@@ -50,12 +50,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
 
        s390_update_cpu_mhz();
        pr_info("CPU capability may have changed\n");
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                dev = get_cpu_device(cpu);
                kobject_uevent(&dev->kobj, KOBJ_CHANGE);
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static void __ref sclp_cpu_change_notify(struct work_struct *work)
index b7329af..676634d 100644 (file)
@@ -17,7 +17,7 @@
 
 static struct read_info_sccb __bootdata(sclp_info_sccb);
 static int __bootdata(sclp_info_sccb_valid);
-char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
+char *__bootdata(sclp_early_sccb);
 int sclp_init_state = sclp_init_state_uninitialized;
 /*
  * Used to keep track of the size of the event masks. Qemu until version 2.11
@@ -211,6 +211,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
        return rc;
 }
 
+void sclp_early_set_buffer(void *sccb)
+{
+       sclp_early_sccb = sccb;
+}
+
 /*
  * Output one or more lines of text on the SCLP console (VT220 and /
  * or line-mode).
@@ -235,11 +240,20 @@ void sclp_early_printk(const char *str)
        __sclp_early_printk(str, strlen(str));
 }
 
+/*
+ * We can't pass sclp_info_sccb to sclp_early_cmd() here directly,
+ * because it might not fulfil the requiremets for a SCLP communication buffer:
+ *   - lie below 2G in memory
+ *   - be page-aligned
+ * Therefore, we use the buffer sclp_early_sccb (which fulfils all those
+ * requirements) temporarily for communication and copy a received response
+ * back into the buffer sclp_info_sccb upon successful completion.
+ */
 int __init sclp_early_read_info(void)
 {
        int i;
        int length = test_facility(140) ? EXT_SCCB_READ_SCP : PAGE_SIZE;
-       struct read_info_sccb *sccb = &sclp_info_sccb;
+       struct read_info_sccb *sccb = (struct read_info_sccb *)sclp_early_sccb;
        sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
                                  SCLP_CMDW_READ_SCP_INFO};
 
@@ -251,6 +265,7 @@ int __init sclp_early_read_info(void)
                if (sclp_early_cmd(commands[i], sccb))
                        break;
                if (sccb->header.response_code == 0x10) {
+                       memcpy(&sclp_info_sccb, sccb, length);
                        sclp_info_sccb_valid = 1;
                        return 0;
                }
index b5b0848..3ba2d93 100644 (file)
@@ -269,7 +269,7 @@ static int __init zcore_init(void)
 
        if (!is_ipl_type_dump())
                return -ENODATA;
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return -ENODATA;
 
        zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
index a974943..0ce48a3 100644 (file)
@@ -430,9 +430,26 @@ static ssize_t pimpampom_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(pimpampom);
 
+static ssize_t dev_busid_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct subchannel *sch = to_subchannel(dev);
+       struct pmcw *pmcw = &sch->schib.pmcw;
+
+       if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
+            pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
+               return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
+                                 pmcw->dev);
+       else
+               return sysfs_emit(buf, "none\n");
+}
+static DEVICE_ATTR_RO(dev_busid);
+
 static struct attribute *io_subchannel_type_attrs[] = {
        &dev_attr_chpids.attr,
        &dev_attr_pimpampom.attr,
+       &dev_attr_dev_busid.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(io_subchannel_type);
@@ -886,6 +903,18 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
 }
 static DEVICE_ATTR_RO(real_cssid);
 
+static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
+                           const char *buf, size_t count)
+{
+       CIO_TRACE_EVENT(4, "usr-rescan");
+
+       css_schedule_eval_all();
+       css_complete_work();
+
+       return count;
+}
+static DEVICE_ATTR_WO(rescan);
+
 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
                              char *buf)
 {
@@ -932,6 +961,7 @@ static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
 
 static struct attribute *cssdev_attrs[] = {
        &dev_attr_real_cssid.attr,
+       &dev_attr_rescan.attr,
        NULL,
 };
 
index f69ffbb..99c2212 100644 (file)
@@ -126,21 +126,9 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
 
 struct qdio_irq;
 
-struct siga_flag {
-       u8 input:1;
-       u8 output:1;
-       u8 sync:1;
-       u8 sync_after_ai:1;
-       u8 sync_out_after_pci:1;
-       u8:3;
-} __attribute__ ((packed));
-
 struct qdio_dev_perf_stat {
        unsigned int adapter_int;
        unsigned int qdio_int;
-       unsigned int pci_request_int;
-
-       unsigned int tasklet_outbound;
 
        unsigned int siga_read;
        unsigned int siga_write;
@@ -150,7 +138,6 @@ struct qdio_dev_perf_stat {
        unsigned int stop_polling;
        unsigned int inbound_queue_full;
        unsigned int outbound_call;
-       unsigned int outbound_handler;
        unsigned int outbound_queue_full;
        unsigned int fast_requeue;
        unsigned int target_full;
@@ -180,12 +167,6 @@ struct qdio_input_q {
 };
 
 struct qdio_output_q {
-       /* PCIs are enabled for the queue */
-       int pci_out_enabled;
-       /* timer to check for more outbound work */
-       struct timer_list timer;
-       /* tasklet to check for completions */
-       struct tasklet_struct tasklet;
 };
 
 /*
@@ -250,8 +231,7 @@ struct qdio_irq {
        unsigned long sch_token;        /* QEBSM facility */
 
        enum qdio_irq_states state;
-
-       struct siga_flag siga_flag;     /* siga sync information from qdioac */
+       u8 qdioac1;
 
        int nr_input_qs;
        int nr_output_qs;
@@ -263,7 +243,6 @@ struct qdio_irq {
        struct qdio_ssqd_desc ssqd_desc;
        void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
 
-       unsigned int scan_threshold;    /* used SBALs before tasklet schedule */
        int perf_stat_enabled;
 
        struct qdr *qdr;
@@ -325,13 +304,9 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
 #define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
 #define is_qebsm(q)                    (q->irq_ptr->sch_token != 0)
 
-#define need_siga_in(q)                        (q->irq_ptr->siga_flag.input)
-#define need_siga_out(q)               (q->irq_ptr->siga_flag.output)
-#define need_siga_sync(q)              (unlikely(q->irq_ptr->siga_flag.sync))
-#define need_siga_sync_after_ai(q)     \
-       (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
-#define need_siga_sync_out_after_pci(q)        \
-       (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+#define qdio_need_siga_in(irq)         ((irq)->qdioac1 & AC1_SIGA_INPUT_NEEDED)
+#define qdio_need_siga_out(irq)                ((irq)->qdioac1 & AC1_SIGA_OUTPUT_NEEDED)
+#define qdio_need_siga_sync(irq)       (unlikely((irq)->qdioac1 & AC1_SIGA_SYNC_NEEDED))
 
 #define for_each_input_queue(irq_ptr, q, i)            \
        for (i = 0; i < irq_ptr->nr_input_qs &&         \
@@ -345,11 +320,6 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
 #define sub_buf(bufnr, dec)    QDIO_BUFNR((bufnr) - (dec))
 #define prev_buf(bufnr)                sub_buf(bufnr, 1)
 
-#define queue_irqs_enabled(q)                  \
-       (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
-#define queue_irqs_disabled(q)                 \
-       (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
-
 extern u64 last_ai_time;
 
 /* prototypes for thin interrupt */
@@ -360,8 +330,6 @@ void qdio_thinint_exit(void);
 int test_nonshared_ind(struct qdio_irq *);
 
 /* prototypes for setup */
-void qdio_outbound_tasklet(struct tasklet_struct *t);
-void qdio_outbound_timer(struct timer_list *t);
 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                      struct irb *irb);
 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
index 00384f5..4bb7965 100644 (file)
@@ -197,8 +197,6 @@ DEFINE_SHOW_ATTRIBUTE(ssqd);
 static char *qperf_names[] = {
        "Assumed adapter interrupts",
        "QDIO interrupts",
-       "Requested PCIs",
-       "Outbound tasklet runs",
        "SIGA read",
        "SIGA write",
        "SIGA sync",
@@ -206,7 +204,6 @@ static char *qperf_names[] = {
        "Inbound stop_polling",
        "Inbound queue full",
        "Outbound calls",
-       "Outbound handler",
        "Outbound queue full",
        "Outbound fast_requeue",
        "Outbound target_full",
index 3052fab..45e810c 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/timer.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
 #include <linux/io.h>
@@ -304,12 +303,22 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
        return (cc) ? -EIO : 0;
 }
 
+static inline int qdio_sync_input_queue(struct qdio_q *q)
+{
+       return qdio_siga_sync(q, 0, q->mask);
+}
+
+static inline int qdio_sync_output_queue(struct qdio_q *q)
+{
+       return qdio_siga_sync(q, q->mask, 0);
+}
+
 static inline int qdio_siga_sync_q(struct qdio_q *q)
 {
        if (q->is_input_q)
-               return qdio_siga_sync(q, 0, q->mask);
+               return qdio_sync_input_queue(q);
        else
-               return qdio_siga_sync(q, q->mask, 0);
+               return qdio_sync_output_queue(q);
 }
 
 static int qdio_siga_output(struct qdio_q *q, unsigned int count,
@@ -373,22 +382,10 @@ static inline int qdio_siga_input(struct qdio_q *q)
        return (cc) ? -EIO : 0;
 }
 
-#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
-#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
-
-static inline void qdio_sync_queues(struct qdio_q *q)
-{
-       /* PCI capable outbound queues will also be scanned so sync them too */
-       if (pci_out_supported(q->irq_ptr))
-               qdio_siga_sync_all(q);
-       else
-               qdio_siga_sync_q(q);
-}
-
 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
                        unsigned char *state)
 {
-       if (need_siga_sync(q))
+       if (qdio_need_siga_sync(q->irq_ptr))
                qdio_siga_sync_q(q);
        return get_buf_state(q, bufnr, state, 0);
 }
@@ -455,10 +452,9 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
        if (!count)
                return 0;
 
-       /*
-        * No siga sync here, as a PCI or we after a thin interrupt
-        * already sync'ed the queues.
-        */
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_input_queue(q);
+
        count = get_buf_states(q, start, &state, count, 1);
        if (!count)
                return 0;
@@ -510,8 +506,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
        if (!atomic_read(&q->nr_buf_used))
                return 1;
 
-       if (need_siga_sync(q))
-               qdio_siga_sync_q(q);
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_input_queue(q);
        get_buf_state(q, start, &state, 0);
 
        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -521,15 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
        return 1;
 }
 
-static inline int qdio_tasklet_schedule(struct qdio_q *q)
-{
-       if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
-               tasklet_schedule(&q->u.out.tasklet);
-               return 0;
-       }
-       return -EPERM;
-}
-
 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
                                        unsigned int *error)
 {
@@ -538,17 +525,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
 
        q->timestamp = get_tod_clock_fast();
 
-       if (need_siga_sync(q))
-               if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
-                   !pci_out_supported(q->irq_ptr)) ||
-                   (queue_type(q) == QDIO_IQDIO_QFMT &&
-                   multicast_outbound(q)))
-                       qdio_siga_sync_q(q);
-
        count = atomic_read(&q->nr_buf_used);
        if (!count)
                return 0;
 
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_output_queue(q);
+
        count = get_buf_states(q, start, &state, count, 0);
        if (!count)
                return 0;
@@ -595,19 +578,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
        }
 }
 
-/* all buffers processed? */
-static inline int qdio_outbound_q_done(struct qdio_q *q)
-{
-       return atomic_read(&q->nr_buf_used) == 0;
-}
-
 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
                                unsigned long aob)
 {
        int retries = 0, cc;
        unsigned int busy_bit;
 
-       if (!need_siga_out(q))
+       if (!qdio_need_siga_out(q->irq_ptr))
                return 0;
 
        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
@@ -644,75 +621,6 @@ retry:
        return cc;
 }
 
-void qdio_outbound_tasklet(struct tasklet_struct *t)
-{
-       struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
-       struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
-       unsigned int start = q->first_to_check;
-       unsigned int error = 0;
-       int count;
-
-       qperf_inc(q, tasklet_outbound);
-       WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
-
-       count = get_outbound_buffer_frontier(q, start, &error);
-       if (count) {
-               q->first_to_check = add_buf(start, count);
-
-               if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
-                       qperf_inc(q, outbound_handler);
-                       DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
-                                     start, count);
-
-                       q->handler(q->irq_ptr->cdev, error, q->nr, start,
-                                  count, q->irq_ptr->int_parm);
-               }
-       }
-
-       if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
-           !qdio_outbound_q_done(q))
-               goto sched;
-
-       if (q->u.out.pci_out_enabled)
-               return;
-
-       /*
-        * Now we know that queue type is either qeth without pci enabled
-        * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
-        * is noticed and outbound_handler is called after some time.
-        */
-       if (qdio_outbound_q_done(q))
-               del_timer_sync(&q->u.out.timer);
-       else
-               if (!timer_pending(&q->u.out.timer) &&
-                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
-                       mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
-       return;
-
-sched:
-       qdio_tasklet_schedule(q);
-}
-
-void qdio_outbound_timer(struct timer_list *t)
-{
-       struct qdio_q *q = from_timer(q, t, u.out.timer);
-
-       qdio_tasklet_schedule(q);
-}
-
-static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
-{
-       struct qdio_q *out;
-       int i;
-
-       if (!pci_out_supported(irq) || !irq->scan_threshold)
-               return;
-
-       for_each_output_queue(irq, out, i)
-               if (!qdio_outbound_q_done(out))
-                       qdio_tasklet_schedule(out);
-}
-
 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
                                  enum qdio_irq_states state)
 {
@@ -734,25 +642,11 @@ static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 /* PCI interrupt handler */
 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 {
-       int i;
-       struct qdio_q *q;
-
        if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
                return;
 
        qdio_deliver_irq(irq_ptr);
        irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
-
-       if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
-               return;
-
-       for_each_output_queue(irq_ptr, q, i) {
-               if (qdio_outbound_q_done(q))
-                       continue;
-               if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
-                       qdio_siga_sync_q(q);
-               qdio_tasklet_schedule(q);
-       }
 }
 
 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
@@ -879,15 +773,34 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
 }
 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
 
-static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
 {
-       struct qdio_q *q;
-       int i;
+       struct ccw_device *cdev = irq->cdev;
+       long timeout;
+       int rc;
 
-       for_each_output_queue(irq_ptr, q, i) {
-               del_timer_sync(&q->u.out.timer);
-               tasklet_kill(&q->u.out.tasklet);
+       spin_lock_irq(get_ccwdev_lock(cdev));
+       qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
+       if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+               rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+       else
+               /* default behaviour is halt */
+               rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+       spin_unlock_irq(get_ccwdev_lock(cdev));
+       if (rc) {
+               DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
+               DBF_ERROR("rc:%4d", rc);
+               return rc;
        }
+
+       timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+                                                  irq->state == QDIO_IRQ_STATE_INACTIVE ||
+                                                  irq->state == QDIO_IRQ_STATE_ERR,
+                                                  10 * HZ);
+       if (timeout <= 0)
+               rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+
+       return rc;
 }
 
 /**
@@ -919,35 +832,13 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
        }
 
        /*
-        * Indicate that the device is going down. Scheduling the queue
-        * tasklets is forbidden from here on.
+        * Indicate that the device is going down.
         */
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 
-       qdio_shutdown_queues(irq_ptr);
        qdio_shutdown_debug_entries(irq_ptr);
 
-       /* cleanup subchannel */
-       spin_lock_irq(get_ccwdev_lock(cdev));
-       qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
-       if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
-               rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
-       else
-               /* default behaviour is halt */
-               rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
-       spin_unlock_irq(get_ccwdev_lock(cdev));
-       if (rc) {
-               DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
-               DBF_ERROR("rc:%4d", rc);
-               goto no_cleanup;
-       }
-
-       wait_event_interruptible_timeout(cdev->private->wait_q,
-               irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
-               irq_ptr->state == QDIO_IRQ_STATE_ERR,
-               10 * HZ);
-
-no_cleanup:
+       rc = qdio_cancel_ccw(irq_ptr, how);
        qdio_shutdown_thinint(irq_ptr);
        qdio_shutdown_irq(irq_ptr);
 
@@ -1061,8 +952,6 @@ static void qdio_trace_init_data(struct qdio_irq *irq,
        DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
        DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
        DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
-       DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
-       DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
        DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
                      data->no_output_qs);
        DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
@@ -1083,6 +972,7 @@ int qdio_establish(struct ccw_device *cdev,
 {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
        struct subchannel_id schid;
+       long timeout;
        int rc;
 
        ccw_device_get_schid(cdev, &schid);
@@ -1111,17 +1001,14 @@ int qdio_establish(struct ccw_device *cdev,
        qdio_setup_irq(irq_ptr, init_data);
 
        rc = qdio_establish_thinint(irq_ptr);
-       if (rc) {
-               qdio_shutdown_irq(irq_ptr);
-               mutex_unlock(&irq_ptr->setup_mutex);
-               return rc;
-       }
+       if (rc)
+               goto err_thinint;
 
        /* establish q */
        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
        irq_ptr->ccw.flags = CCW_FLAG_SLI;
        irq_ptr->ccw.count = irq_ptr->equeue.count;
-       irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+       irq_ptr->ccw.cda = (u32) virt_to_phys(irq_ptr->qdr);
 
        spin_lock_irq(get_ccwdev_lock(cdev));
        ccw_device_set_options_mask(cdev, 0);
@@ -1131,20 +1018,20 @@ int qdio_establish(struct ccw_device *cdev,
        if (rc) {
                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
                DBF_ERROR("rc:%4x", rc);
-               qdio_shutdown_thinint(irq_ptr);
-               qdio_shutdown_irq(irq_ptr);
-               mutex_unlock(&irq_ptr->setup_mutex);
-               return rc;
+               goto err_ccw_start;
        }
 
-       wait_event_interruptible_timeout(cdev->private->wait_q,
-               irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
-               irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+       timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+                                                  irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+                                                  irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+       if (timeout <= 0) {
+               rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+               goto err_ccw_timeout;
+       }
 
        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
-               mutex_unlock(&irq_ptr->setup_mutex);
-               qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
-               return -EIO;
+               rc = -EIO;
+               goto err_ccw_error;
        }
 
        qdio_setup_ssqd_info(irq_ptr);
@@ -1156,6 +1043,17 @@ int qdio_establish(struct ccw_device *cdev,
        qdio_print_subchannel_info(irq_ptr);
        qdio_setup_debug_entries(irq_ptr);
        return 0;
+
+err_ccw_timeout:
+       qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
+err_ccw_error:
+err_ccw_start:
+       qdio_shutdown_thinint(irq_ptr);
+err_thinint:
+       qdio_shutdown_irq(irq_ptr);
+       qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+       mutex_unlock(&irq_ptr->setup_mutex);
+       return rc;
 }
 EXPORT_SYMBOL_GPL(qdio_establish);
 
@@ -1219,12 +1117,10 @@ EXPORT_SYMBOL_GPL(qdio_activate);
 /**
  * handle_inbound - reset processed input buffers
  * @q: queue containing the buffers
- * @callflags: flags
  * @bufnr: first buffer to process
  * @count: how many buffers are emptied
  */
-static int handle_inbound(struct qdio_q *q, unsigned int callflags,
-                         int bufnr, int count)
+static int handle_inbound(struct qdio_q *q, int bufnr, int count)
 {
        int overlap;
 
@@ -1241,7 +1137,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
        atomic_add(count, &q->nr_buf_used);
 
-       if (need_siga_in(q))
+       if (qdio_need_siga_in(q->irq_ptr))
                return qdio_siga_input(q);
 
        return 0;
@@ -1250,16 +1146,13 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
 /**
  * handle_outbound - process filled outbound buffers
  * @q: queue containing the buffers
- * @callflags: flags
  * @bufnr: first buffer to process
  * @count: how many buffers are filled
  * @aob: asynchronous operation block
  */
-static int handle_outbound(struct qdio_q *q, unsigned int callflags,
-                          unsigned int bufnr, unsigned int count,
+static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
                           struct qaob *aob)
 {
-       const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
        unsigned char state = 0;
        int used, rc = 0;
 
@@ -1271,19 +1164,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
        if (used == QDIO_MAX_BUFFERS_PER_Q)
                qperf_inc(q, outbound_queue_full);
 
-       if (callflags & QDIO_FLAG_PCI_OUT) {
-               q->u.out.pci_out_enabled = 1;
-               qperf_inc(q, pci_request_int);
-       } else
-               q->u.out.pci_out_enabled = 0;
-
        if (queue_type(q) == QDIO_IQDIO_QFMT) {
                unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
 
                WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
                rc = qdio_kick_outbound_q(q, count, phys_aob);
-       } else if (need_siga_sync(q)) {
-               rc = qdio_siga_sync_q(q);
+       } else if (qdio_need_siga_sync(q->irq_ptr)) {
+               rc = qdio_sync_output_queue(q);
        } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
                   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
                   state == SLSB_CU_OUTPUT_PRIMED) {
@@ -1293,18 +1180,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
                rc = qdio_kick_outbound_q(q, count, 0);
        }
 
-       /* Let drivers implement their own completion scanning: */
-       if (!scan_threshold)
-               return rc;
-
-       /* in case of SIGA errors we must process the error immediately */
-       if (used >= scan_threshold || rc)
-               qdio_tasklet_schedule(q);
-       else
-               /* free the SBALs in case of no further traffic */
-               if (!timer_pending(&q->u.out.timer) &&
-                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
-                       mod_timer(&q->u.out.timer, jiffies + HZ);
        return rc;
 }
 
@@ -1336,11 +1211,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
        if (!count)
                return 0;
        if (callflags & QDIO_FLAG_SYNC_INPUT)
-               return handle_inbound(irq_ptr->input_qs[q_nr],
-                                     callflags, bufnr, count);
+               return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
-               return handle_outbound(irq_ptr->output_qs[q_nr],
-                                      callflags, bufnr, count, aob);
+               return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(do_QDIO);
@@ -1420,52 +1293,10 @@ int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
                return -ENODEV;
        q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
 
-       if (need_siga_sync(q))
-               qdio_siga_sync_q(q);
-
        return __qdio_inspect_queue(q, bufnr, error);
 }
 EXPORT_SYMBOL_GPL(qdio_inspect_queue);
 
-/**
- * qdio_get_next_buffers - process input buffers
- * @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
- * @bufnr: first filled buffer number
- * @error: buffers are in error state
- *
- * Return codes
- *   < 0 - error
- *   = 0 - no new buffers found
- *   > 0 - number of processed buffers
- */
-int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
-                         int *error)
-{
-       struct qdio_q *q;
-       struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
-       if (!irq_ptr)
-               return -ENODEV;
-       q = irq_ptr->input_qs[nr];
-
-       /*
-        * Cannot rely on automatic sync after interrupt since queues may
-        * also be examined without interrupt.
-        */
-       if (need_siga_sync(q))
-               qdio_sync_queues(q);
-
-       qdio_check_outbound_pci_queues(irq_ptr);
-
-       /* Note: upper-layer MUST stop processing immediately here ... */
-       if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
-               return -EIO;
-
-       return __qdio_inspect_queue(q, bufnr, error);
-}
-EXPORT_SYMBOL(qdio_get_next_buffers);
-
 /**
  * qdio_stop_irq - disable interrupt processing for the device
  * @cdev: associated ccw_device for the qdio subchannel
index da67e49..20efafe 100644 (file)
@@ -89,55 +89,6 @@ void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
 }
 EXPORT_SYMBOL_GPL(qdio_reset_buffers);
 
-/*
- * qebsm is only available under 64bit but the adapter sets the feature
- * flag anyway, so we manually override it.
- */
-static inline int qebsm_possible(void)
-{
-       return css_general_characteristics.qebsm;
-}
-
-/*
- * qib_param_field: pointer to 128 bytes or NULL, if no param field
- * nr_input_qs: pointer to nr_queues*128 words of data or NULL
- */
-static void set_impl_params(struct qdio_irq *irq_ptr,
-                           unsigned int qib_param_field_format,
-                           unsigned char *qib_param_field,
-                           unsigned long *input_slib_elements,
-                           unsigned long *output_slib_elements)
-{
-       struct qdio_q *q;
-       int i, j;
-
-       if (!irq_ptr)
-               return;
-
-       irq_ptr->qib.pfmt = qib_param_field_format;
-       if (qib_param_field)
-               memcpy(irq_ptr->qib.parm, qib_param_field,
-                      sizeof(irq_ptr->qib.parm));
-
-       if (!input_slib_elements)
-               goto output;
-
-       for_each_input_queue(irq_ptr, q, i) {
-               for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
-                       q->slib->slibe[j].parms =
-                               input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
-       }
-output:
-       if (!output_slib_elements)
-               return;
-
-       for_each_output_queue(irq_ptr, q, i) {
-               for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
-                       q->slib->slibe[j].parms =
-                               output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
-       }
-}
-
 static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
 {
        struct qdio_q *q;
@@ -267,26 +218,9 @@ static void setup_queues(struct qdio_irq *irq_ptr,
                q->is_input_q = 0;
                setup_storage_lists(q, irq_ptr,
                                    qdio_init->output_sbal_addr_array[i], i);
-
-               tasklet_setup(&q->u.out.tasklet, qdio_outbound_tasklet);
-               timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
        }
 }
 
-static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
-{
-       if (qdioac & AC1_SIGA_INPUT_NEEDED)
-               irq_ptr->siga_flag.input = 1;
-       if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
-               irq_ptr->siga_flag.output = 1;
-       if (qdioac & AC1_SIGA_SYNC_NEEDED)
-               irq_ptr->siga_flag.sync = 1;
-       if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
-               irq_ptr->siga_flag.sync_after_ai = 1;
-       if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
-               irq_ptr->siga_flag.sync_out_after_pci = 1;
-}
-
 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
                                  unsigned char qdioac, unsigned long token)
 {
@@ -363,7 +297,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
                qdioac = irq_ptr->ssqd_desc.qdioac1;
 
        check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
-       process_ac_flags(irq_ptr, qdioac);
+       irq_ptr->qdioac1 = qdioac;
        DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
        DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
 }
@@ -386,6 +320,8 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
        struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
        int i;
 
+       memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
        irq_ptr->qdr->qfmt = qdio_init->q_format;
        irq_ptr->qdr->ac = qdio_init->qdr_ac;
        irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
@@ -405,12 +341,15 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
 static void setup_qib(struct qdio_irq *irq_ptr,
                      struct qdio_initialize *init_data)
 {
-       if (qebsm_possible())
-               irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
-
-       irq_ptr->qib.rflags |= init_data->qib_rflags;
+       memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
 
        irq_ptr->qib.qfmt = init_data->q_format;
+       irq_ptr->qib.pfmt = init_data->qib_param_field_format;
+
+       irq_ptr->qib.rflags = init_data->qib_rflags;
+       if (css_general_characteristics.qebsm)
+               irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
        if (init_data->no_input_qs)
                irq_ptr->qib.isliba =
                        (unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -419,6 +358,10 @@ static void setup_qib(struct qdio_irq *irq_ptr,
                        (unsigned long)(irq_ptr->output_qs[0]->slib);
        memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
        ASCEBC(irq_ptr->qib.ebcnam, 8);
+
+       if (init_data->qib_param_field)
+               memcpy(irq_ptr->qib.parm, init_data->qib_param_field,
+                      sizeof(irq_ptr->qib.parm));
 }
 
 int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
@@ -426,8 +369,7 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        struct ccw_device *cdev = irq_ptr->cdev;
        struct ciw *ciw;
 
-       memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
-       memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+       irq_ptr->qdioac1 = 0;
        memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
        memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
        memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
@@ -436,13 +378,9 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
        irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
 
-       /* wipes qib.ac, required by ar7063 */
-       memset(irq_ptr->qdr, 0, sizeof(struct qdr));
-
        irq_ptr->int_parm = init_data->int_parm;
        irq_ptr->nr_input_qs = init_data->no_input_qs;
        irq_ptr->nr_output_qs = init_data->no_output_qs;
-       irq_ptr->scan_threshold = init_data->scan_threshold;
        ccw_device_get_schid(cdev, &irq_ptr->schid);
        setup_queues(irq_ptr, init_data);
 
@@ -450,10 +388,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
 
        setup_qib(irq_ptr, init_data);
-       set_impl_params(irq_ptr, init_data->qib_param_field_format,
-                       init_data->qib_param_field,
-                       init_data->input_slib_elements,
-                       init_data->output_slib_elements);
 
        /* fill input and output descriptors */
        setup_qdr(irq_ptr, init_data);
@@ -497,11 +431,8 @@ void qdio_shutdown_irq(struct qdio_irq *irq)
 
 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
 {
-       char s[80];
-
-       snprintf(s, 80, "qdio: %s %s on SC %x using "
-                "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
-                dev_name(&irq_ptr->cdev->dev),
+       dev_info(&irq_ptr->cdev->dev,
+                "qdio: %s on SC %x using AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s\n",
                 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
                        ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
                 irq_ptr->schid.sch_no,
@@ -509,12 +440,9 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
                 (irq_ptr->sch_token) ? 1 : 0,
                 pci_out_supported(irq_ptr) ? 1 : 0,
                 css_general_characteristics.aif_tdd,
-                (irq_ptr->siga_flag.input) ? "R" : " ",
-                (irq_ptr->siga_flag.output) ? "W" : " ",
-                (irq_ptr->siga_flag.sync) ? "S" : " ",
-                (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
-                (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
-       printk(KERN_INFO "%s", s);
+                qdio_need_siga_in(irq_ptr) ? "R" : " ",
+                qdio_need_siga_out(irq_ptr) ? "W" : " ",
+                qdio_need_siga_sync(irq_ptr) ? "S" : " ");
 }
 
 int __init qdio_setup_init(void)
@@ -541,7 +469,7 @@ int __init qdio_setup_init(void)
                  (css_general_characteristics.aif_osa) ? 1 : 0);
 
        /* Check for QEBSM support in general (bit 58). */
-       DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+       DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
        rc = 0;
 out:
        return rc;
index 8d3a1d8..439c1f6 100644 (file)
@@ -127,22 +127,13 @@ static struct bus_type ap_bus_type;
 /* Adapter interrupt definitions */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
 
-static int ap_airq_flag;
+static bool ap_irq_flag;
 
 static struct airq_struct ap_airq = {
        .handler = ap_interrupt_handler,
        .isc = AP_ISC,
 };
 
-/**
- * ap_using_interrupts() - Returns non-zero if interrupt support is
- * available.
- */
-static inline int ap_using_interrupts(void)
-{
-       return ap_airq_flag;
-}
-
 /**
  * ap_airq_ptr() - Get the address of the adapter interrupt indicator
  *
@@ -152,7 +143,7 @@ static inline int ap_using_interrupts(void)
  */
 void *ap_airq_ptr(void)
 {
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                return ap_airq.lsi_ptr;
        return NULL;
 }
@@ -396,7 +387,7 @@ void ap_wait(enum ap_sm_wait wait)
        switch (wait) {
        case AP_SM_WAIT_AGAIN:
        case AP_SM_WAIT_INTERRUPT:
-               if (ap_using_interrupts())
+               if (ap_irq_flag)
                        break;
                if (ap_poll_kthread) {
                        wake_up(&ap_poll_wait);
@@ -471,7 +462,7 @@ static void ap_tasklet_fn(unsigned long dummy)
         * be received. Doing it in the beginning of the tasklet is therefor
         * important that no requests on any AP get lost.
         */
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                xchg(ap_airq.lsi_ptr, 0);
 
        spin_lock_bh(&ap_queues_lock);
@@ -541,7 +532,7 @@ static int ap_poll_thread_start(void)
 {
        int rc;
 
-       if (ap_using_interrupts() || ap_poll_kthread)
+       if (ap_irq_flag || ap_poll_kthread)
                return 0;
        mutex_lock(&ap_poll_thread_mutex);
        ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -703,7 +694,7 @@ static int __ap_calc_helper(struct device *dev, void *arg)
 
        if (is_queue_dev(dev)) {
                pctrs->apqns++;
-               if ((to_ap_dev(dev))->drv)
+               if (dev->driver)
                        pctrs->bound++;
        }
 
@@ -883,7 +874,6 @@ static int ap_device_probe(struct device *dev)
                         to_ap_queue(dev)->qid);
        spin_unlock_bh(&ap_queues_lock);
 
-       ap_dev->drv = ap_drv;
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
 
        if (rc) {
@@ -891,7 +881,6 @@ static int ap_device_probe(struct device *dev)
                if (is_queue_dev(dev))
                        hash_del(&to_ap_queue(dev)->hnode);
                spin_unlock_bh(&ap_queues_lock);
-               ap_dev->drv = NULL;
        } else
                ap_check_bindings_complete();
 
@@ -904,7 +893,7 @@ out:
 static int ap_device_remove(struct device *dev)
 {
        struct ap_device *ap_dev = to_ap_dev(dev);
-       struct ap_driver *ap_drv = ap_dev->drv;
+       struct ap_driver *ap_drv = to_ap_drv(dev->driver);
 
        /* prepare ap queue device removal */
        if (is_queue_dev(dev))
@@ -923,7 +912,6 @@ static int ap_device_remove(struct device *dev)
        if (is_queue_dev(dev))
                hash_del(&to_ap_queue(dev)->hnode);
        spin_unlock_bh(&ap_queues_lock);
-       ap_dev->drv = NULL;
 
        put_device(dev);
 
@@ -1187,7 +1175,7 @@ static BUS_ATTR_RO(ap_adapter_mask);
 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
 {
        return scnprintf(buf, PAGE_SIZE, "%d\n",
-                        ap_using_interrupts() ? 1 : 0);
+                        ap_irq_flag ? 1 : 0);
 }
 
 static BUS_ATTR_RO(ap_interrupts);
@@ -1912,7 +1900,7 @@ static int __init ap_module_init(void)
        /* enable interrupts if available */
        if (ap_interrupts_available()) {
                rc = register_adapter_interrupt(&ap_airq);
-               ap_airq_flag = (rc == 0);
+               ap_irq_flag = (rc == 0);
        }
 
        /* Create /sys/bus/ap. */
@@ -1956,7 +1944,7 @@ out_work:
 out_bus:
        bus_unregister(&ap_bus_type);
 out:
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                unregister_adapter_interrupt(&ap_airq);
        kfree(ap_qci_info);
        return rc;
index 8f18abd..95b5777 100644 (file)
@@ -80,12 +80,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
 #define AP_FUNC_EP11  5
 #define AP_FUNC_APXA  6
 
-/*
- * AP interrupt states
- */
-#define AP_INTR_DISABLED       0       /* AP interrupt disabled */
-#define AP_INTR_ENABLED                1       /* AP interrupt enabled */
-
 /*
  * AP queue state machine states
  */
@@ -112,7 +106,7 @@ enum ap_sm_event {
  * AP queue state wait behaviour
  */
 enum ap_sm_wait {
-       AP_SM_WAIT_AGAIN,       /* retry immediately */
+       AP_SM_WAIT_AGAIN = 0,   /* retry immediately */
        AP_SM_WAIT_TIMEOUT,     /* wait for timeout */
        AP_SM_WAIT_INTERRUPT,   /* wait for thin interrupt (if available) */
        AP_SM_WAIT_NONE,        /* no wait */
@@ -157,7 +151,6 @@ void ap_driver_unregister(struct ap_driver *);
 
 struct ap_device {
        struct device device;
-       struct ap_driver *drv;          /* Pointer to AP device driver. */
        int device_type;                /* AP device type. */
 };
 
@@ -165,7 +158,6 @@ struct ap_device {
 
 struct ap_card {
        struct ap_device ap_dev;
-       void *private;                  /* ap driver private pointer. */
        int raw_hwtype;                 /* AP raw hardware type. */
        unsigned int functions;         /* AP device function bitfield. */
        int queue_depth;                /* AP queue depth.*/
@@ -182,11 +174,10 @@ struct ap_queue {
        struct hlist_node hnode;        /* Node for the ap_queues hashtable */
        struct ap_card *card;           /* Ptr to assoc. AP card. */
        spinlock_t lock;                /* Per device lock. */
-       void *private;                  /* ap driver private pointer. */
        enum ap_dev_state dev_state;    /* queue device state */
        bool config;                    /* configured state */
        ap_qid_t qid;                   /* AP queue id. */
-       int interrupt;                  /* indicate if interrupts are enabled */
+       bool interrupt;                 /* indicate if interrupts are enabled */
        int queue_count;                /* # messages currently on AP queue. */
        int pendingq_count;             /* # requests on pendingq list. */
        int requestq_count;             /* # requests on requestq list. */
index 669f96f..d70c4d3 100644 (file)
@@ -19,7 +19,7 @@
 static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
  * @qid: The AP queue number
  * @ind: the notification indicator byte
  *
@@ -27,7 +27,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
  * value it waits a while and tests the AP queue if interrupts
  * have been switched on using ap_test_queue().
  */
-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
 {
        struct ap_queue_status status;
        struct ap_qirq_ctrl qirqctrl = { 0 };
@@ -218,7 +218,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
                return AP_SM_WAIT_NONE;
        case AP_RESPONSE_NO_PENDING_REPLY:
                if (aq->queue_count > 0)
-                       return AP_SM_WAIT_INTERRUPT;
+                       return aq->interrupt ?
+                               AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
                aq->sm_state = AP_SM_STATE_IDLE;
                return AP_SM_WAIT_NONE;
        default:
@@ -272,7 +273,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
                fallthrough;
        case AP_RESPONSE_Q_FULL:
                aq->sm_state = AP_SM_STATE_QUEUE_FULL;
-               return AP_SM_WAIT_INTERRUPT;
+               return aq->interrupt ?
+                       AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
                return AP_SM_WAIT_TIMEOUT;
@@ -322,7 +324,7 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
        case AP_RESPONSE_NORMAL:
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
-               aq->interrupt = AP_INTR_DISABLED;
+               aq->interrupt = false;
                return AP_SM_WAIT_TIMEOUT;
        default:
                aq->dev_state = AP_DEV_STATE_ERROR;
@@ -355,7 +357,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
        switch (status.response_code) {
        case AP_RESPONSE_NORMAL:
                lsi_ptr = ap_airq_ptr();
-               if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+               if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
                        aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
                else
                        aq->sm_state = (aq->queue_count > 0) ?
@@ -396,7 +398,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
 
        if (status.irq_enabled == 1) {
                /* Irqs are now enabled */
-               aq->interrupt = AP_INTR_ENABLED;
+               aq->interrupt = true;
                aq->sm_state = (aq->queue_count > 0) ?
                        AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
        }
@@ -586,7 +588,7 @@ static ssize_t interrupt_show(struct device *dev,
        spin_lock_bh(&aq->lock);
        if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
                rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
-       else if (aq->interrupt == AP_INTR_ENABLED)
+       else if (aq->interrupt)
                rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
        else
                rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
@@ -767,7 +769,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
        aq->ap_dev.device.type = &ap_queue_type;
        aq->ap_dev.device_type = device_type;
        aq->qid = qid;
-       aq->interrupt = AP_INTR_DISABLED;
+       aq->interrupt = false;
        spin_lock_init(&aq->lock);
        INIT_LIST_HEAD(&aq->pendingq);
        INIT_LIST_HEAD(&aq->requestq);
index 122c85c..67f1455 100644 (file)
@@ -35,7 +35,7 @@ static int match_apqn(struct device *dev, const void *data)
 }
 
 /**
- * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
  * @matrix_mdev: the associated mediated matrix
  * @apqn: The queue APQN
  *
@@ -43,7 +43,7 @@ static int match_apqn(struct device *dev, const void *data)
  * devices of the vfio_ap_drv.
  * Verify that the APID and the APQI are set in the matrix.
  *
- * Returns the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the associated vfio_ap_queue
  */
 static struct vfio_ap_queue *vfio_ap_get_queue(
                                        struct ap_matrix_mdev *matrix_mdev,
@@ -64,7 +64,7 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
 }
 
 /**
- * vfio_ap_wait_for_irqclear
+ * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
  * @apqn: The AP Queue number
  *
  * Checks the IRQ bit for the status of this APQN using ap_tapq.
@@ -72,7 +72,6 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
  * Returns if ap_tapq function failed with invalid, deconfigured or
  * checkstopped AP.
  * Otherwise retries up to 5 times after waiting 20ms.
- *
  */
 static void vfio_ap_wait_for_irqclear(int apqn)
 {
@@ -105,13 +104,12 @@ static void vfio_ap_wait_for_irqclear(int apqn)
 }
 
 /**
- * vfio_ap_free_aqic_resources
+ * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
  * @q: The vfio_ap_queue
  *
  * Unregisters the ISC in the GIB when the saved ISC not invalid.
- * Unpin the guest's page holding the NIB when it exist.
- * Reset the saved_pfn and saved_isc to invalid values.
- *
+ * Unpins the guest's page holding the NIB when it exists.
+ * Resets the saved_pfn and saved_isc to invalid values.
  */
 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
 {
@@ -130,7 +128,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
 }
 
 /**
- * vfio_ap_irq_disable
+ * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
  * @q: The vfio_ap_queue
  *
  * Uses ap_aqic to disable the interruption and in case of success, reset
@@ -144,6 +142,8 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
  *
  * Returns if ap_aqic function failed with invalid, deconfigured or
  * checkstopped AP.
+ *
+ * Return: &struct ap_queue_status
  */
 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
 {
@@ -183,9 +183,8 @@ end_free:
 }
 
 /**
- * vfio_ap_setirq: Enable Interruption for a APQN
+ * vfio_ap_irq_enable - Enable Interruption for a APQN
  *
- * @dev: the device associated with the ap_queue
  * @q:  the vfio_ap_queue holding AQIC parameters
  *
  * Pin the NIB saved in *q
@@ -197,6 +196,8 @@ end_free:
  *
  * Otherwise return the ap_queue_status returned by the ap_aqic(),
  * all retry handling will be done by the guest.
+ *
+ * Return: &struct ap_queue_status
  */
 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
                                                 int isc,
@@ -253,7 +254,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
 }
 
 /**
- * handle_pqap: PQAP instruction callback
+ * handle_pqap - PQAP instruction callback
  *
  * @vcpu: The vcpu on which we received the PQAP instruction
  *
@@ -270,8 +271,8 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
  * We take the matrix_dev lock to ensure serialization on queues and
  * mediated device access.
  *
- * Return 0 if we could handle the request inside KVM.
- * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ * Return: 0 if we could handle the request inside KVM.
+ * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
  */
 static int handle_pqap(struct kvm_vcpu *vcpu)
 {
@@ -426,7 +427,7 @@ struct vfio_ap_queue_reserved {
 };
 
 /**
- * vfio_ap_has_queue
+ * vfio_ap_has_queue - determines if the AP queue containing the target in @data
  *
  * @dev: an AP queue device
  * @data: a struct vfio_ap_queue_reserved reference
@@ -443,7 +444,7 @@ struct vfio_ap_queue_reserved {
  * - If @data contains only an apqi value, @data will be flagged as
  *   reserved if the APQI field in the AP queue device matches
  *
- * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
  * @data does not contain either an apid or apqi.
  */
 static int vfio_ap_has_queue(struct device *dev, void *data)
@@ -473,9 +474,9 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
 }
 
 /**
- * vfio_ap_verify_queue_reserved
+ * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
+ * @apid or @aqpi is reserved
  *
- * @matrix_dev: a mediated matrix device
  * @apid: an AP adapter ID
  * @apqi: an AP queue index
  *
@@ -492,7 +493,7 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
  * - If only @apqi is not NULL, then there must be an AP queue device bound
  *   to the vfio_ap driver with an APQN containing @apqi
  *
- * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
  */
 static int vfio_ap_verify_queue_reserved(unsigned long *apid,
                                         unsigned long *apqi)
@@ -536,15 +537,15 @@ vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
 }
 
 /**
- * vfio_ap_mdev_verify_no_sharing
+ * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ *
+ * @matrix_mdev: the mediated matrix device
  *
  * Verifies that the APQNs derived from the cross product of the AP adapter IDs
  * and AP queue indexes comprising the AP matrix are not configured for another
  * mediated device. AP queue sharing is not allowed.
  *
- * @matrix_mdev: the mediated matrix device
- *
- * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
  */
 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
 {
@@ -578,7 +579,8 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
 }
 
 /**
- * assign_adapter_store
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_adapter attribute
@@ -586,10 +588,7 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
  *             be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APID from @buf and sets the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
  * returns one of the following errors:
  *
  *     1. -EINVAL
@@ -666,17 +665,15 @@ done:
 static DEVICE_ATTR_WO(assign_adapter);
 
 /**
- * unassign_adapter_store
+ * unassign_adapter_store - parses the APID from @buf and clears the
+ * corresponding bit in the mediated matrix device's APM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_adapter attribute
  * @buf:       a buffer containing the adapter number (APID) to be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APID from @buf and clears the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the APID is not a number
  *     -ENODEV if the APID it exceeds the maximum value configured for the
@@ -740,7 +737,9 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
 }
 
 /**
- * assign_domain_store
+ * assign_domain_store - parses the APQI from @buf and sets the
+ * corresponding bit in the mediated matrix device's AQM
+ *
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_domain attribute
@@ -748,10 +747,7 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
  *             be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APQI from @buf and sets the corresponding bit in the mediated
- * matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * Return: the number of bytes processed if the APQI is valid; otherwise returns
  * one of the following errors:
  *
  *     1. -EINVAL
@@ -824,7 +820,8 @@ static DEVICE_ATTR_WO(assign_domain);
 
 
 /**
- * unassign_domain_store
+ * unassign_domain_store - parses the APQI from @buf and clears the
+ * corresponding bit in the mediated matrix device's AQM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_domain attribute
@@ -832,10 +829,7 @@ static DEVICE_ATTR_WO(assign_domain);
  *             be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APQI from @buf and clears the corresponding bit in the
- * mediated matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * Return: the number of bytes processed if the APQI is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the APQI is not a number
  *     -ENODEV if the APQI exceeds the maximum value configured for the system
@@ -879,17 +873,16 @@ done:
 static DEVICE_ATTR_WO(unassign_domain);
 
 /**
- * assign_control_domain_store
+ * assign_control_domain_store - parses the domain ID from @buf and sets
+ * the corresponding bit in the mediated matrix device's ADM
+ *
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_control_domain attribute
  * @buf:       a buffer containing the domain ID to be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the domain ID from @buf and sets the corresponding bit in the mediated
- * matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the ID is not a number
  *     -ENODEV if the ID exceeds the maximum value configured for the system
@@ -937,17 +930,15 @@ done:
 static DEVICE_ATTR_WO(assign_control_domain);
 
 /**
- * unassign_control_domain_store
+ * unassign_control_domain_store - parses the domain ID from @buf and
+ * clears the corresponding bit in the mediated matrix device's ADM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_control_domain attribute
  * @buf:       a buffer containing the domain ID to be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the domain ID from @buf and clears the corresponding bit in the
- * mediated matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the ID is not a number
  *     -ENODEV if the ID exceeds the maximum value configured for the system
@@ -1085,14 +1076,12 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
 };
 
 /**
- * vfio_ap_mdev_set_kvm
+ * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
+ * to manage AP resources for the guest whose state is represented by @kvm
  *
  * @matrix_mdev: a mediated matrix device
  * @kvm: reference to KVM instance
  *
- * Sets all data for @matrix_mdev that are needed to manage AP resources
- * for the guest whose state is represented by @kvm.
- *
  * Note: The matrix_dev->lock must be taken prior to calling
  * this function; however, the lock will be temporarily released while the
  * guest's AP configuration is set to avoid a potential lockdep splat.
@@ -1100,7 +1089,7 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  * certain circumstances, will result in a circular lock dependency if this is
  * done under the @matrix_mdev->lock.
  *
- * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * Return: 0 if no other mediated matrix device has a reference to @kvm;
  * otherwise, returns an -EPERM.
  */
 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
@@ -1131,8 +1120,8 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
        return 0;
 }
 
-/*
- * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+/**
+ * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
  *
  * @nb: The notifier block
  * @action: Action to be taken
@@ -1141,6 +1130,7 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
  * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
  * pinned before). Other requests are ignored.
  *
+ * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
  */
 static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
                                       unsigned long action, void *data)
@@ -1161,19 +1151,17 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
 }
 
 /**
- * vfio_ap_mdev_unset_kvm
+ * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
+ * by @matrix_mdev.
  *
  * @matrix_mdev: a matrix mediated device
  *
- * Performs clean-up of resources no longer needed by @matrix_mdev.
- *
  * Note: The matrix_dev->lock must be taken prior to calling
  * this function; however, the lock will be temporarily released while the
  * guest's AP configuration is cleared to avoid a potential lockdep splat.
  * The kvm->lock is taken to clear the guest's AP configuration which, under
  * certain circumstances, will result in a circular lock dependency if this is
  * done under the @matrix_mdev->lock.
- *
  */
 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
 {
index 529ffe2..fa0cb86 100644 (file)
@@ -572,14 +572,14 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct module **pmod,
                                                     unsigned int weight)
 {
-       if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+       if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
                return NULL;
        zcrypt_queue_get(zq);
        get_device(&zq->queue->ap_dev.device);
        atomic_add(weight, &zc->load);
        atomic_add(weight, &zq->load);
        zq->request_count++;
-       *pmod = zq->queue->ap_dev.drv->driver.owner;
+       *pmod = zq->queue->ap_dev.device.driver->owner;
        return zq;
 }
 
index 40fd5d3..ef11d2a 100644 (file)
@@ -39,7 +39,7 @@
 static ssize_t type_show(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
-       struct zcrypt_card *zc = to_ap_card(dev)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
 }
@@ -50,8 +50,8 @@ static ssize_t online_show(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
        int online = ac->config && zc->online ? 1 : 0;
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -61,8 +61,8 @@ static ssize_t online_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
        struct zcrypt_queue *zq;
        int online, id, i = 0, maxzqs = 0;
        struct zcrypt_queue **zq_uelist = NULL;
@@ -116,7 +116,7 @@ static ssize_t load_show(struct device *dev,
                         struct device_attribute *attr,
                         char *buf)
 {
-       struct zcrypt_card *zc = to_ap_card(dev)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
 }
index bc34bed..6a3c2b4 100644 (file)
@@ -1724,10 +1724,10 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
        rlen = vlen = PAGE_SIZE/2;
        rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
                                       rarray, &rlen, varray, &vlen);
-       if (rc == 0 && rlen >= 10*8 && vlen >= 240) {
-               ci->new_apka_mk_state = (char) rarray[7*8];
-               ci->cur_apka_mk_state = (char) rarray[8*8];
-               ci->old_apka_mk_state = (char) rarray[9*8];
+       if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
+               ci->new_apka_mk_state = (char) rarray[10*8];
+               ci->cur_apka_mk_state = (char) rarray[11*8];
+               ci->old_apka_mk_state = (char) rarray[12*8];
                if (ci->old_apka_mk_state == '2')
                        memcpy(&ci->old_apka_mkvp, varray + 208, 8);
                if (ci->cur_apka_mk_state == '2')
index 62ceeb7..fa8293d 100644 (file)
@@ -89,7 +89,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
 
        if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
                zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
@@ -118,7 +118,6 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
        }
 
@@ -131,10 +130,9 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
 {
-       struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
 
-       if (zc)
-               zcrypt_card_unregister(zc);
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex2a_card_driver = {
@@ -176,10 +174,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2A_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
        }
 
@@ -192,11 +189,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
 {
-       struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
 
-       if (zq)
-               zcrypt_queue_unregister(zq);
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex2a_queue_driver = {
index 7a8cbdb..a0b9f11 100644 (file)
@@ -66,9 +66,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
                                 struct device_attribute *attr,
                                 char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct cca_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -97,9 +97,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct cca_info ci;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cao_state[] = { "invalid", "valid" };
        static const char * const new_state[] = { "empty", "partial", "full" };
 
@@ -261,7 +261,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
        switch (ac->ap_dev.device_type) {
        case AP_DEVICE_TYPE_CEX2C:
                zc->user_space_type = ZCRYPT_CEX2C;
@@ -287,7 +287,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
                return rc;
        }
@@ -297,7 +296,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
                                        &cca_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        }
@@ -311,13 +309,13 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
        struct ap_card *ac = to_ap_card(&ap_dev->device);
-       struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
-       if (zc)
-               zcrypt_card_unregister(zc);
+
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex2c_card_driver = {
@@ -359,10 +357,9 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2C_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
                return rc;
        }
@@ -372,7 +369,6 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
                                        &cca_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        }
@@ -386,13 +382,13 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
-       if (zq)
-               zcrypt_queue_unregister(zq);
+
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex2c_queue_driver = {
index f518b5f..1f7ec54 100644 (file)
@@ -75,9 +75,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
                                 struct device_attribute *attr,
                                 char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct cca_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -106,9 +106,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct cca_info ci;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cao_state[] = { "invalid", "valid" };
        static const char * const new_state[] = { "empty", "partial", "full" };
 
@@ -187,9 +187,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -208,9 +208,9 @@ static ssize_t ep11_fw_version_show(struct device *dev,
                                    struct device_attribute *attr,
                                    char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -231,9 +231,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
                                  struct device_attribute *attr,
                                  char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -264,10 +264,10 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        int i, n = 0;
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -309,9 +309,9 @@ static ssize_t ep11_mkvps_show(struct device *dev,
                               struct device_attribute *attr,
                               char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct ep11_domain_info di;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cwk_state[] = { "invalid", "valid" };
        static const char * const nwk_state[] = { "empty", "uncommitted",
                                                  "committed" };
@@ -357,9 +357,9 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
                                        struct device_attribute *attr,
                                        char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int i, n = 0;
        struct ep11_domain_info di;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
 
        memset(&di, 0, sizeof(di));
 
@@ -441,7 +441,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
        if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
                if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
                        zc->type_string = "CEX4A";
@@ -539,7 +539,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
                return rc;
        }
@@ -549,7 +548,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
                                        &cca_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
@@ -557,7 +555,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
                                        &ep11_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        }
@@ -571,15 +568,15 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
        struct ap_card *ac = to_ap_card(&ap_dev->device);
-       struct zcrypt_card *zc = ac->private;
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
        else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
                sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
-       if (zc)
-               zcrypt_card_unregister(zc);
+
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex4_card_driver = {
@@ -629,10 +626,9 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX4_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
                return rc;
        }
@@ -642,7 +638,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
                                        &cca_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
@@ -650,7 +645,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
                                        &ep11_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        }
@@ -664,15 +658,15 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
        else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
                sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
-       if (zq)
-               zcrypt_queue_unregister(zq);
+
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex4_queue_driver = {
index 20f1228..398bde2 100644 (file)
@@ -40,8 +40,8 @@ static ssize_t online_show(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        struct ap_queue *aq = to_ap_queue(dev);
-       struct zcrypt_queue *zq = aq->private;
        int online = aq->config && zq->online ? 1 : 0;
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -51,8 +51,8 @@ static ssize_t online_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        struct ap_queue *aq = to_ap_queue(dev);
-       struct zcrypt_queue *zq = aq->private;
        struct zcrypt_card *zc = zq->zcard;
        int online;
 
@@ -83,7 +83,7 @@ static ssize_t load_show(struct device *dev,
                         struct device_attribute *attr,
                         char *buf)
 {
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
 }
@@ -170,7 +170,7 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
        int rc;
 
        spin_lock(&zcrypt_list_lock);
-       zc = zq->queue->card->private;
+       zc = dev_get_drvdata(&zq->queue->card->ap_dev.device);
        zcrypt_card_get(zc);
        zq->zcard = zc;
        zq->online = 1; /* New devices are online by default. */
index 62f88cc..f96755a 100644 (file)
@@ -3804,14 +3804,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                                     unsigned long card_ptr)
 {
        struct qeth_card *card        = (struct qeth_card *) card_ptr;
-       struct net_device *dev = card->dev;
 
-       QETH_CARD_TEXT(card, 6, "qdouhdl");
-       if (qdio_error & QDIO_ERROR_FATAL) {
-               QETH_CARD_TEXT(card, 2, "achkcond");
-               netif_tx_stop_all_queues(dev);
-               qeth_schedule_recovery(card);
-       }
+       QETH_CARD_TEXT(card, 2, "achkcond");
+       netif_tx_stop_all_queues(card->dev);
+       qeth_schedule_recovery(card);
 }
 
 /**
index 2abf86c..d7cdd9c 100644 (file)
@@ -279,7 +279,7 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card,
 
 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        QETH_CARD_TEXT(card, 2, "fdbflush");
 
@@ -679,7 +679,7 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
                                      struct net_if_token *token,
                                      struct mac_addr_lnid *addr_lnid)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        u8 ntfy_mac[ETH_ALEN];
 
        ether_addr_copy(ntfy_mac, addr_lnid->mac);
index 6671d95..8f19bed 100644 (file)
@@ -69,10 +69,7 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 {
        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 
-       if (unlikely(qdio_err)) {
-               zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
-               return;
-       }
+       zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
 }
 
 static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
index 25f6e1a..66652ab 100644 (file)
@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
                if (!h->ctlr)
                        err = SCSI_DH_RES_TEMP_UNAVAIL;
                else {
-                       list_add_rcu(&h->node, &h->ctlr->dh_list);
                        h->sdev = sdev;
+                       list_add_rcu(&h->node, &h->ctlr->dh_list);
                }
                spin_unlock(&list_lock);
                err = SCSI_DH_OK;
@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
        spin_lock(&list_lock);
        if (h->ctlr) {
                list_del_rcu(&h->node);
-               h->sdev = NULL;
                kref_put(&h->ctlr->kref, release_controller);
        }
        spin_unlock(&list_lock);
        sdev->handler_data = NULL;
+       synchronize_rcu();
        kfree(h);
 }
 
index bee1bec..935b01e 100644 (file)
@@ -807,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
        for (i = 0; i < size; ++i) {
                struct ibmvfc_event *evt = &pool->events[i];
 
+               /*
+                * evt->active states
+                *  1 = in flight
+                *  0 = being completed
+                * -1 = free/freed
+                */
+               atomic_set(&evt->active, -1);
                atomic_set(&evt->free, 1);
                evt->crq.valid = 0x80;
                evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
@@ -1017,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
 
        BUG_ON(!ibmvfc_valid_event(pool, evt));
        BUG_ON(atomic_inc_return(&evt->free) != 1);
+       BUG_ON(atomic_dec_and_test(&evt->active));
 
        spin_lock_irqsave(&evt->queue->l_lock, flags);
        list_add_tail(&evt->queue_list, &evt->queue->free);
@@ -1072,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list)
  **/
 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
 {
+       /*
+        * Anything we are failing should still be active. Otherwise, it
+        * implies we already got a response for the command and are doing
+        * something bad like double completing it.
+        */
+       BUG_ON(!atomic_dec_and_test(&evt->active));
        if (evt->cmnd) {
                evt->cmnd->result = (error_code << 16);
                evt->done = ibmvfc_scsi_eh_done;
@@ -1723,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
 
                evt->done(evt);
        } else {
+               atomic_set(&evt->active, 1);
                spin_unlock_irqrestore(&evt->queue->l_lock, flags);
                ibmvfc_trc_start(evt);
        }
@@ -3251,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
                return;
        }
 
-       if (unlikely(atomic_read(&evt->free))) {
+       if (unlikely(atomic_dec_if_positive(&evt->active))) {
                dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
                        crq->ioba);
                return;
@@ -3778,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost
                return;
        }
 
-       if (unlikely(atomic_read(&evt->free))) {
+       if (unlikely(atomic_dec_if_positive(&evt->active))) {
                dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
                        crq->ioba);
                return;
index 4f0f3ba..92fb889 100644 (file)
@@ -745,6 +745,7 @@ struct ibmvfc_event {
        struct ibmvfc_target *tgt;
        struct scsi_cmnd *cmnd;
        atomic_t free;
+       atomic_t active;
        union ibmvfc_iu *xfer_iu;
        void (*done)(struct ibmvfc_event *evt);
        void (*_done)(struct ibmvfc_event *evt);
index 5983e05..e29523a 100644 (file)
@@ -13193,6 +13193,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        if (!phba)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&phba->poll_list);
+
        /* Perform generic PCI device enabling operation */
        error = lpfc_enable_pci_dev(phba);
        if (error)
@@ -13327,7 +13329,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Enable RAS FW log support */
        lpfc_sli4_ras_setup(phba);
 
-       INIT_LIST_HEAD(&phba->poll_list);
        timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
 
index abf7b40..c509440 100644 (file)
@@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
        mimd_t          mimd;
        uint32_t        adapno;
        int             iterator;
-
+       bool            is_found;
 
        if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
                *rval = -EFAULT;
@@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
 
        adapter = NULL;
        iterator = 0;
+       is_found = false;
 
        list_for_each_entry(adapter, &adapters_list_g, list) {
-               if (iterator++ == adapno) break;
+               if (iterator++ == adapno) {
+                       is_found = true;
+                       break;
+               }
        }
 
-       if (!adapter) {
+       if (!is_found) {
                *rval = -ENODEV;
                return NULL;
        }
@@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
        uint32_t        adapno;
        int             iterator;
        mraid_mmadp_t*  adapter;
+       bool            is_found;
 
        /*
         * When the kioc returns from driver, make sure it still doesn't
@@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
                iterator        = 0;
                adapter         = NULL;
                adapno          = kioc->adapno;
+               is_found        = false;
 
                con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
                                        "ioctl that was timedout before\n"));
 
                list_for_each_entry(adapter, &adapters_list_g, list) {
-                       if (iterator++ == adapno) break;
+                       if (iterator++ == adapno) {
+                               is_found = true;
+                               break;
+                       }
                }
 
                kioc->timedout = 0;
 
-               if (adapter) {
+               if (is_found)
                        mraid_mm_dealloc_kioc( adapter, kioc );
-               }
+
        }
        else {
                wake_up(&wait_q);
index 19b1c0c..cf4a3a2 100644 (file)
@@ -7851,7 +7851,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
                        return r;
        }
 
-       rc = _base_static_config_pages(ioc);
+       r = _base_static_config_pages(ioc);
        if (r)
                return r;
 
index 48548a9..32e60f0 100644 (file)
@@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
 
 void pm8001_task_done(struct sas_task *task)
 {
-       if (!del_timer(&task->slow_task->timer))
-               return;
+       del_timer(&task->slow_task->timer);
        complete(&task->slow_task->completion);
 }
 
@@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
 {
        struct sas_task_slow *slow = from_timer(slow, t, timer);
        struct sas_task *task = slow->task;
+       unsigned long flags;
 
-       task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-       complete(&task->slow_task->completion);
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+               complete(&task->slow_task->completion);
+       }
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
 }
 
 #define PM8001_TASK_TIMEOUT 20
@@ -748,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                }
                res = -TMF_RESP_FUNC_FAILED;
                /* Even TMF timed out, return direct. */
-               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-                               pm8001_dbg(pm8001_ha, FAIL,
-                                          "TMF task[%x]timeout.\n",
-                                          tmf->tmf);
-                               goto ex_err;
-                       }
+               if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                       pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
+                                  tmf->tmf);
+                       goto ex_err;
                }
 
                if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -834,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
                wait_for_completion(&task->slow_task->completion);
                res = TMF_RESP_FUNC_FAILED;
                /* Even TMF timed out, return direct. */
-               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-                               pm8001_dbg(pm8001_ha, FAIL,
-                                          "TMF task timeout.\n");
-                               goto ex_err;
-                       }
+               if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                       pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
+                       goto ex_err;
                }
 
                if (task->task_status.resp == SAS_TASK_COMPLETE &&
index b059bf2..5b6996a 100644 (file)
@@ -475,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
                error = shost->hostt->target_alloc(starget);
 
                if(error) {
-                       dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+                       if (error != -ENXIO)
+                               dev_err(dev, "target allocation failed, error %d\n", error);
                        /* don't want scsi_target_reap to do the final
                         * put because it will be under the host lock */
                        scsi_target_destroy(starget);
index 32489d2..c0d3111 100644 (file)
@@ -807,11 +807,17 @@ store_state_field(struct device *dev, struct device_attribute *attr,
        mutex_lock(&sdev->state_mutex);
        ret = scsi_device_set_state(sdev, state);
        /*
-        * If the device state changes to SDEV_RUNNING, we need to run
-        * the queue to avoid I/O hang.
+        * If the device state changes to SDEV_RUNNING, we need to
+        * run the queue to avoid I/O hang, and rescan the device
+        * to revalidate it. Running the queue first is necessary
+        * because another thread may be waiting inside
+        * blk_mq_freeze_queue_wait() and because that call may be
+        * waiting for pending I/O to finish.
         */
-       if (ret == 0 && state == SDEV_RUNNING)
+       if (ret == 0 && state == SDEV_RUNNING) {
                blk_mq_run_hw_queues(sdev->request_queue, true);
+               scsi_rescan_device(dev);
+       }
        mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
index b8d55af..610ebba 100644 (file)
@@ -129,6 +129,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
 static struct kmem_cache *sd_cdb_cache;
 static mempool_t *sd_cdb_pool;
 static mempool_t *sd_page_pool;
+static struct lock_class_key sd_bio_compl_lkclass;
 
 static const char *sd_cache_types[] = {
        "write through", "none", "write back",
@@ -886,7 +887,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
        cmd->cmnd[0] = UNMAP;
        cmd->cmnd[8] = 24;
 
-       buf = page_address(rq->special_vec.bv_page);
+       buf = bvec_virt(&rq->special_vec);
        put_unaligned_be16(6 + 16, &buf[0]);
        put_unaligned_be16(16, &buf[2]);
        put_unaligned_be64(lba, &buf[8]);
@@ -3408,7 +3409,8 @@ static int sd_probe(struct device *dev)
        if (!sdkp)
                goto out;
 
-       gd = alloc_disk(SD_MINORS);
+       gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
+                              &sd_bio_compl_lkclass);
        if (!gd)
                goto out_free;
 
@@ -3454,10 +3456,10 @@ static int sd_probe(struct device *dev)
 
        gd->major = sd_major((index & 0xf0) >> 4);
        gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+       gd->minors = SD_MINORS;
 
        gd->fops = &sd_fops;
        gd->private_data = &sdkp->driver;
-       gd->queue = sdkp->device->request_queue;
 
        /* defaults, until the device tells us otherwise */
        sdp->sector_size = 512;
index 91e2221..d5889b4 100644 (file)
@@ -166,7 +166,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
        bool exclude;           /* 1->open(O_EXCL) succeeded and is active */
        int open_cnt;           /* count of opens (perhaps < num(sfds) ) */
        char sgdebug;           /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
-       struct gendisk *disk;
+       char name[DISK_NAME_LEN];
        struct cdev * cdev;     /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
        struct kref d_ref;
 } Sg_device;
@@ -202,8 +202,7 @@ static void sg_device_destroy(struct kref *kref);
 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
 
 #define sg_printk(prefix, sdp, fmt, a...) \
-       sdev_prefix_printk(prefix, (sdp)->device,               \
-                          (sdp)->disk->disk_name, fmt, ##a)
+       sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a)
 
 /*
  * The SCSI interfaces that use read() and write() as an asynchronous variant of
@@ -832,7 +831,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
 
        srp->rq->timeout = timeout;
        kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
-       blk_execute_rq_nowait(sdp->disk, srp->rq, at_head, sg_rq_end_io);
+       blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io);
        return 0;
 }
 
@@ -1119,8 +1118,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
                return put_user(max_sectors_bytes(sdp->device->request_queue),
                                ip);
        case BLKTRACESETUP:
-               return blk_trace_setup(sdp->device->request_queue,
-                                      sdp->disk->disk_name,
+               return blk_trace_setup(sdp->device->request_queue, sdp->name,
                                       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
                                       NULL, p);
        case BLKTRACESTART:
@@ -1456,7 +1454,7 @@ static struct class *sg_sysfs_class;
 static int sg_sysfs_valid = 0;
 
 static Sg_device *
-sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
+sg_alloc(struct scsi_device *scsidp)
 {
        struct request_queue *q = scsidp->request_queue;
        Sg_device *sdp;
@@ -1492,9 +1490,7 @@ sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
 
        SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
                                        "sg_alloc: dev=%d \n", k));
-       sprintf(disk->disk_name, "sg%d", k);
-       disk->first_minor = k;
-       sdp->disk = disk;
+       sprintf(sdp->name, "sg%d", k);
        sdp->device = scsidp;
        mutex_init(&sdp->open_rel_lock);
        INIT_LIST_HEAD(&sdp->sfds);
@@ -1521,19 +1517,11 @@ static int
 sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
 {
        struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
-       struct gendisk *disk;
        Sg_device *sdp = NULL;
        struct cdev * cdev = NULL;
        int error;
        unsigned long iflags;
 
-       disk = alloc_disk(1);
-       if (!disk) {
-               pr_warn("%s: alloc_disk failed\n", __func__);
-               return -ENOMEM;
-       }
-       disk->major = SCSI_GENERIC_MAJOR;
-
        error = -ENOMEM;
        cdev = cdev_alloc();
        if (!cdev) {
@@ -1543,7 +1531,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
        cdev->owner = THIS_MODULE;
        cdev->ops = &sg_fops;
 
-       sdp = sg_alloc(disk, scsidp);
+       sdp = sg_alloc(scsidp);
        if (IS_ERR(sdp)) {
                pr_warn("%s: sg_alloc failed\n", __func__);
                error = PTR_ERR(sdp);
@@ -1561,7 +1549,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
                sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
                                                MKDEV(SCSI_GENERIC_MAJOR,
                                                      sdp->index),
-                                               sdp, "%s", disk->disk_name);
+                                               sdp, "%s", sdp->name);
                if (IS_ERR(sg_class_member)) {
                        pr_err("%s: device_create failed\n", __func__);
                        error = PTR_ERR(sg_class_member);
@@ -1589,7 +1577,6 @@ cdev_add_err:
        kfree(sdp);
 
 out:
-       put_disk(disk);
        if (cdev)
                cdev_del(cdev);
        return error;
@@ -1613,7 +1600,6 @@ sg_device_destroy(struct kref *kref)
        SCSI_LOG_TIMEOUT(3,
                sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
 
-       put_disk(sdp->disk);
        kfree(sdp);
 }
 
@@ -2606,7 +2592,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
                goto skip;
        read_lock(&sdp->sfd_lock);
        if (!list_empty(&sdp->sfds)) {
-               seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+               seq_printf(s, " >>> device=%s ", sdp->name);
                if (atomic_read(&sdp->detaching))
                        seq_puts(s, "detaching pending close ");
                else if (sdp->device) {
index 94c254e..2942a4e 100644 (file)
@@ -106,6 +106,8 @@ static struct scsi_driver sr_template = {
 static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG];
 static DEFINE_SPINLOCK(sr_index_lock);
 
+static struct lock_class_key sr_bio_compl_lkclass;
+
 /* This semaphore is used to mediate the 0->1 reference get in the
  * face of object destruction (i.e. we can't allow a get on an
  * object after last put) */
@@ -221,7 +223,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
        else if (med->media_event_code == 2)
                return DISK_EVENT_MEDIA_CHANGE;
        else if (med->media_event_code == 3)
-               return DISK_EVENT_EJECT_REQUEST;
+               return DISK_EVENT_MEDIA_CHANGE;
        return 0;
 }
 
@@ -712,7 +714,8 @@ static int sr_probe(struct device *dev)
 
        kref_init(&cd->kref);
 
-       disk = alloc_disk(1);
+       disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
+                                &sr_bio_compl_lkclass);
        if (!disk)
                goto fail_free;
        mutex_init(&cd->lock);
@@ -729,6 +732,7 @@ static int sr_probe(struct device *dev)
 
        disk->major = SCSI_CDROM_MAJOR;
        disk->first_minor = minor;
+       disk->minors = 1;
        sprintf(disk->disk_name, "sr%d", minor);
        disk->fops = &sr_bdops;
        disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
@@ -762,7 +766,6 @@ static int sr_probe(struct device *dev)
 
        set_capacity(disk, cd->capacity);
        disk->private_data = &cd->driver;
-       disk->queue = sdev->request_queue;
 
        if (register_cdrom(disk, &cd->cdi))
                goto fail_minor;
index c6f1454..d1abc02 100644 (file)
@@ -309,13 +309,8 @@ static char * st_incompatible(struct scsi_device* SDp)
 }
 \f
 
-static inline char *tape_name(struct scsi_tape *tape)
-{
-       return tape->disk->disk_name;
-}
-
 #define st_printk(prefix, t, fmt, a...) \
-       sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a)
+       sdev_prefix_printk(prefix, (t)->device, (t)->name, fmt, ##a)
 #ifdef DEBUG
 #define DEBC_printk(t, fmt, a...) \
        if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
@@ -363,7 +358,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
        int result = SRpnt->result;
        u8 scode;
        DEB(const char *stp;)
-       char *name = tape_name(STp);
+       char *name = STp->name;
        struct st_cmdstatus *cmdstatp;
 
        if (!result)
@@ -3841,8 +3836,9 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user
                            !capable(CAP_SYS_RAWIO))
                                i = -EPERM;
                        else
-                               i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
-                                                  file->f_mode, cmd_in, p);
+                               i = scsi_cmd_ioctl(STp->device->request_queue,
+                                                  NULL, file->f_mode, cmd_in,
+                                                  p);
                        if (i != -ENOTTY)
                                return i;
                        break;
@@ -4216,7 +4212,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
 
        i = mode << (4 - ST_NBR_MODE_BITS);
        snprintf(name, 10, "%s%s%s", rew ? "n" : "",
-                tape->disk->disk_name, st_formats[i]);
+                tape->name, st_formats[i]);
 
        dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
                            cdev_devno, &tape->modes[mode], "%s", name);
@@ -4271,7 +4267,6 @@ static void remove_cdevs(struct scsi_tape *tape)
 static int st_probe(struct device *dev)
 {
        struct scsi_device *SDp = to_scsi_device(dev);
-       struct gendisk *disk = NULL;
        struct scsi_tape *tpnt = NULL;
        struct st_modedef *STm;
        struct st_partstat *STps;
@@ -4301,27 +4296,13 @@ static int st_probe(struct device *dev)
                goto out;
        }
 
-       disk = alloc_disk(1);
-       if (!disk) {
-               sdev_printk(KERN_ERR, SDp,
-                           "st: out of memory. Device not attached.\n");
-               goto out_buffer_free;
-       }
-
        tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL);
        if (tpnt == NULL) {
                sdev_printk(KERN_ERR, SDp,
                            "st: Can't allocate device descriptor.\n");
-               goto out_put_disk;
+               goto out_buffer_free;
        }
        kref_init(&tpnt->kref);
-       tpnt->disk = disk;
-       disk->private_data = &tpnt->driver;
-       /* SCSI tape doesn't register this gendisk via add_disk().  Manually
-        * take queue reference that release_disk() expects. */
-       if (!blk_get_queue(SDp->request_queue))
-               goto out_put_disk;
-       disk->queue = SDp->request_queue;
        tpnt->driver = &st_template;
 
        tpnt->device = SDp;
@@ -4394,10 +4375,10 @@ static int st_probe(struct device *dev)
        idr_preload_end();
        if (error < 0) {
                pr_warn("st: idr allocation failed: %d\n", error);
-               goto out_put_queue;
+               goto out_free_tape;
        }
        tpnt->index = error;
-       sprintf(disk->disk_name, "st%d", tpnt->index);
+       sprintf(tpnt->name, "st%d", tpnt->index);
        tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL);
        if (tpnt->stats == NULL) {
                sdev_printk(KERN_ERR, SDp,
@@ -4414,9 +4395,9 @@ static int st_probe(struct device *dev)
        scsi_autopm_put_device(SDp);
 
        sdev_printk(KERN_NOTICE, SDp,
-                   "Attached scsi tape %s\n", tape_name(tpnt));
+                   "Attached scsi tape %s\n", tpnt->name);
        sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
-                   tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
+                   tpnt->name, tpnt->try_dio ? "yes" : "no",
                    queue_dma_alignment(SDp->request_queue) + 1);
 
        return 0;
@@ -4428,10 +4409,7 @@ out_idr_remove:
        spin_lock(&st_index_lock);
        idr_remove(&st_index_idr, tpnt->index);
        spin_unlock(&st_index_lock);
-out_put_queue:
-       blk_put_queue(disk->queue);
-out_put_disk:
-       put_disk(disk);
+out_free_tape:
        kfree(tpnt);
 out_buffer_free:
        kfree(buffer);
@@ -4470,7 +4448,6 @@ static int st_remove(struct device *dev)
 static void scsi_tape_release(struct kref *kref)
 {
        struct scsi_tape *tpnt = to_scsi_tape(kref);
-       struct gendisk *disk = tpnt->disk;
 
        tpnt->device = NULL;
 
@@ -4480,8 +4457,6 @@ static void scsi_tape_release(struct kref *kref)
                kfree(tpnt->buffer);
        }
 
-       disk->private_data = NULL;
-       put_disk(disk);
        kfree(tpnt->stats);
        kfree(tpnt);
        return;
index 9d3c38b..c0ef0d9 100644 (file)
@@ -187,7 +187,7 @@ struct scsi_tape {
        unsigned char last_cmnd[6];
        unsigned char last_sense[16];
 #endif
-       struct gendisk *disk;
+       char name[DISK_NAME_LEN];
        struct kref     kref;
        struct scsi_tape_stats *stats;
 };
index 328bb96..37506b3 100644 (file)
@@ -1199,14 +1199,24 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
                vstor_packet->vm_srb.sense_info_length);
 
        if (vstor_packet->vm_srb.scsi_status != 0 ||
-           vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
-               storvsc_log(device, STORVSC_LOGGING_ERROR,
+           vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) {
+
+               /*
+                * Log TEST_UNIT_READY errors only as warnings. Hyper-V can
+                * return errors when detecting devices using TEST_UNIT_READY,
+                * and logging these as errors produces unhelpful noise.
+                */
+               int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
+                       STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
+
+               storvsc_log(device, loglevel,
                        "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
                        request->cmd->request->tag,
                        stor_pkt->vm_srb.cdb[0],
                        vstor_packet->vm_srb.scsi_status,
                        vstor_packet->vm_srb.srb_status,
                        vstor_packet->status);
+       }
 
        if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION &&
            (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
index f2b5d34..e5ae262 100644 (file)
@@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
        int ret = 0;
 
        spin_lock_irqsave(&ctrl->txn_lock, flags);
-       ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+       ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
                                SLIM_MAX_TIDS, GFP_ATOMIC);
        if (ret < 0) {
                spin_unlock_irqrestore(&ctrl->txn_lock, flags);
@@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
                        goto slim_xfer_err;
                }
        }
-
+       /* Initialize tid to invalid value */
+       txn->tid = 0;
        need_tid = slim_tid_txn(txn->mt, txn->mc);
 
        if (need_tid) {
@@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
                        txn->mt, txn->mc, txn->la, ret);
 
 slim_xfer_err:
-       if (!clk_pause_msg && (!need_tid  || ret == -ETIMEDOUT)) {
+       if (!clk_pause_msg && (txn->tid == 0  || ret == -ETIMEDOUT)) {
                /*
                 * remove runtime-pm vote if this was TX only, or
                 * if there was error during this transaction
index c054e83..7040293 100644 (file)
@@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
                (mc == SLIM_USR_MC_GENERIC_ACK &&
                 mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
                slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
-               pm_runtime_mark_last_busy(ctrl->dev);
+               pm_runtime_mark_last_busy(ctrl->ctrl.dev);
        }
 }
 
@@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
 {
        u32 cfg = readl_relaxed(ctrl->ngd->base);
 
-       if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+       if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
+               ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
                qcom_slim_ngd_init_dma(ctrl);
 
        /* By default enable message queues */
@@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
                        dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
                        return 0;
                }
+               qcom_slim_ngd_setup(ctrl);
                return 0;
        }
 
@@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
                }
                /* controller state should be in sync with framework state */
                complete(&ctrl->qmi.qmi_comp);
-               if (!pm_runtime_enabled(ctrl->dev) ||
-                               !pm_runtime_suspended(ctrl->dev))
-                       qcom_slim_ngd_runtime_resume(ctrl->dev);
+               if (!pm_runtime_enabled(ctrl->ctrl.dev) ||
+                        !pm_runtime_suspended(ctrl->ctrl.dev))
+                       qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev);
                else
-                       pm_runtime_resume(ctrl->dev);
-               pm_runtime_mark_last_busy(ctrl->dev);
-               pm_runtime_put(ctrl->dev);
+                       pm_runtime_resume(ctrl->ctrl.dev);
+
+               pm_runtime_mark_last_busy(ctrl->ctrl.dev);
+               pm_runtime_put(ctrl->ctrl.dev);
 
                ret = slim_register_controller(&ctrl->ctrl);
                if (ret) {
@@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl,
                /* Make sure the last dma xfer is finished */
                mutex_lock(&ctrl->tx_lock);
                if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) {
-                       pm_runtime_get_noresume(ctrl->dev);
+                       pm_runtime_get_noresume(ctrl->ctrl.dev);
                        ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
                        qcom_slim_ngd_down(ctrl);
                        qcom_slim_ngd_exit_dma(ctrl);
@@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
        struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
        int ret = 0;
 
+       qcom_slim_ngd_exit_dma(ctrl);
        if (!ctrl->qmi.handle)
                return 0;
 
index f678e4d..a05e9fb 100644 (file)
@@ -13,7 +13,7 @@ obj-$(CONFIG_MACH_DOVE)               += dove/
 obj-y                          += fsl/
 obj-$(CONFIG_ARCH_GEMINI)      += gemini/
 obj-y                          += imx/
-obj-$(CONFIG_ARCH_IXP4XX)      += ixp4xx/
+obj-y                          += ixp4xx/
 obj-$(CONFIG_SOC_XWAY)         += lantiq/
 obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
 obj-y                          += mediatek/
index 3f711c1..bbae3d3 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/signal.h>
 #include <linux/device.h>
 #include <linux/spinlock.h>
+#include <linux/platform_device.h>
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <soc/fsl/qe/qe.h>
@@ -53,8 +54,8 @@ struct qe_ic {
        struct irq_chip hc_irq;
 
        /* VIRQ numbers of QE high/low irqs */
-       unsigned int virq_high;
-       unsigned int virq_low;
+       int virq_high;
+       int virq_low;
 };
 
 /*
@@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
        chip->irq_eoi(&desc->irq_data);
 }
 
-static void __init qe_ic_init(struct device_node *node)
+static int qe_ic_init(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        void (*low_handler)(struct irq_desc *desc);
        void (*high_handler)(struct irq_desc *desc);
        struct qe_ic *qe_ic;
-       struct resource res;
-       u32 ret;
+       struct resource *res;
+       struct device_node *node = pdev->dev.of_node;
 
-       ret = of_address_to_resource(node, 0, &res);
-       if (ret)
-               return;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res == NULL) {
+               dev_err(dev, "no memory resource defined\n");
+               return -ENODEV;
+       }
 
-       qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+       qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
        if (qe_ic == NULL)
-               return;
+               return -ENOMEM;
 
-       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
-                                              &qe_ic_host_ops, qe_ic);
-       if (qe_ic->irqhost == NULL) {
-               kfree(qe_ic);
-               return;
+       qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+       if (qe_ic->regs == NULL) {
+               dev_err(dev, "failed to ioremap() registers\n");
+               return -ENODEV;
        }
 
-       qe_ic->regs = ioremap(res.start, resource_size(&res));
-
        qe_ic->hc_irq = qe_ic_irq_chip;
 
-       qe_ic->virq_high = irq_of_parse_and_map(node, 0);
-       qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+       qe_ic->virq_high = platform_get_irq(pdev, 0);
+       qe_ic->virq_low = platform_get_irq(pdev, 1);
 
-       if (!qe_ic->virq_low) {
-               printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
-               kfree(qe_ic);
-               return;
-       }
-       if (qe_ic->virq_high != qe_ic->virq_low) {
+       if (qe_ic->virq_low <= 0)
+               return -ENODEV;
+
+       if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
                low_handler = qe_ic_cascade_low;
                high_handler = qe_ic_cascade_high;
        } else {
@@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node)
                high_handler = NULL;
        }
 
+       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+                                              &qe_ic_host_ops, qe_ic);
+       if (qe_ic->irqhost == NULL) {
+               dev_err(dev, "failed to add irq domain\n");
+               return -ENODEV;
+       }
+
        qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
 
        irq_set_handler_data(qe_ic->virq_low, qe_ic);
        irq_set_chained_handler(qe_ic->virq_low, low_handler);
 
-       if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
+       if (high_handler) {
                irq_set_handler_data(qe_ic->virq_high, qe_ic);
                irq_set_chained_handler(qe_ic->virq_high, high_handler);
        }
+       return 0;
 }
+static const struct of_device_id qe_ic_ids[] = {
+       { .compatible = "fsl,qe-ic"},
+       { .type = "qeic"},
+       {},
+};
 
-static int __init qe_ic_of_init(void)
+static struct platform_driver qe_ic_driver =
 {
-       struct device_node *np;
+       .driver = {
+               .name           = "qe-ic",
+               .of_match_table = qe_ic_ids,
+       },
+       .probe  = qe_ic_init,
+};
 
-       np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-       if (!np) {
-               np = of_find_node_by_type(NULL, "qeic");
-               if (!np)
-                       return -ENODEV;
-       }
-       qe_ic_init(np);
-       of_node_put(np);
+static int __init qe_ic_of_init(void)
+{
+       platform_driver_register(&qe_ic_driver);
        return 0;
 }
 subsys_initcall(qe_ic_of_init);
index 071e144..cc57a38 100644 (file)
@@ -5,8 +5,6 @@
 
 #include <linux/init.h>
 #include <linux/io.h>
-#include <linux/module.h>
-#include <linux/nvmem-consumer.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/sys_soc.h>
@@ -31,7 +29,7 @@
 
 struct imx8_soc_data {
        char *name;
-       u32 (*soc_revision)(struct device *dev);
+       u32 (*soc_revision)(void);
 };
 
 static u64 soc_uid;
@@ -52,7 +50,7 @@ static u32 imx8mq_soc_revision_from_atf(void)
 static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
 #endif
 
-static u32 __init imx8mq_soc_revision(struct device *dev)
+static u32 __init imx8mq_soc_revision(void)
 {
        struct device_node *np;
        void __iomem *ocotp_base;
@@ -77,20 +75,9 @@ static u32 __init imx8mq_soc_revision(struct device *dev)
                        rev = REV_B1;
        }
 
-       if (dev) {
-               int ret;
-
-               ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
-               if (ret) {
-                       iounmap(ocotp_base);
-                       of_node_put(np);
-                       return ret;
-               }
-       } else {
-               soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
-               soc_uid <<= 32;
-               soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
-       }
+       soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+       soc_uid <<= 32;
+       soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
 
        iounmap(ocotp_base);
        of_node_put(np);
@@ -120,7 +107,7 @@ static void __init imx8mm_soc_uid(void)
        of_node_put(np);
 }
 
-static u32 __init imx8mm_soc_revision(struct device *dev)
+static u32 __init imx8mm_soc_revision(void)
 {
        struct device_node *np;
        void __iomem *anatop_base;
@@ -138,15 +125,7 @@ static u32 __init imx8mm_soc_revision(struct device *dev)
        iounmap(anatop_base);
        of_node_put(np);
 
-       if (dev) {
-               int ret;
-
-               ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
-               if (ret)
-                       return ret;
-       } else {
-               imx8mm_soc_uid();
-       }
+       imx8mm_soc_uid();
 
        return rev;
 }
@@ -171,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
        .soc_revision = imx8mm_soc_revision,
 };
 
-static __maybe_unused const struct of_device_id imx8_machine_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
        { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
        { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
        { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
@@ -179,20 +158,12 @@ static __maybe_unused const struct of_device_id imx8_machine_match[] = {
        { }
 };
 
-static __maybe_unused const struct of_device_id imx8_soc_match[] = {
-       { .compatible = "fsl,imx8mq-soc", .data = &imx8mq_soc_data, },
-       { .compatible = "fsl,imx8mm-soc", .data = &imx8mm_soc_data, },
-       { .compatible = "fsl,imx8mn-soc", .data = &imx8mn_soc_data, },
-       { .compatible = "fsl,imx8mp-soc", .data = &imx8mp_soc_data, },
-       { }
-};
-
 #define imx8_revision(soc_rev) \
        soc_rev ? \
        kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf,  soc_rev & 0xf) : \
        "unknown"
 
-static int imx8_soc_info(struct platform_device *pdev)
+static int __init imx8_soc_init(void)
 {
        struct soc_device_attribute *soc_dev_attr;
        struct soc_device *soc_dev;
@@ -211,10 +182,7 @@ static int imx8_soc_info(struct platform_device *pdev)
        if (ret)
                goto free_soc;
 
-       if (pdev)
-               id = of_match_node(imx8_soc_match, pdev->dev.of_node);
-       else
-               id = of_match_node(imx8_machine_match, of_root);
+       id = of_match_node(imx8_soc_match, of_root);
        if (!id) {
                ret = -ENODEV;
                goto free_soc;
@@ -223,16 +191,8 @@ static int imx8_soc_info(struct platform_device *pdev)
        data = id->data;
        if (data) {
                soc_dev_attr->soc_id = data->name;
-               if (data->soc_revision) {
-                       if (pdev) {
-                               soc_rev = data->soc_revision(&pdev->dev);
-                               ret = soc_rev;
-                               if (ret < 0)
-                                       goto free_soc;
-                       } else {
-                               soc_rev = data->soc_revision(NULL);
-                       }
-               }
+               if (data->soc_revision)
+                       soc_rev = data->soc_revision();
        }
 
        soc_dev_attr->revision = imx8_revision(soc_rev);
@@ -270,24 +230,4 @@ free_soc:
        kfree(soc_dev_attr);
        return ret;
 }
-
-/* Retain device_initcall is for backward compatibility with DTS. */
-static int __init imx8_soc_init(void)
-{
-       if (of_find_matching_node_and_match(NULL, imx8_soc_match, NULL))
-               return 0;
-
-       return imx8_soc_info(NULL);
-}
 device_initcall(imx8_soc_init);
-
-static struct platform_driver imx8_soc_info_driver = {
-       .probe = imx8_soc_info,
-       .driver = {
-               .name = "imx8_soc_info",
-               .of_match_table = imx8_soc_match,
-       },
-};
-
-module_platform_driver(imx8_soc_info_driver);
-MODULE_LICENSE("GPL v2");
index 7bd1935..f490c4c 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/soc/ixp4xx/npe.h>
-#include <mach/hardware.h>
 #include <linux/soc/ixp4xx/cpu.h>
 
 #define DEBUG_MSG                      0
@@ -694,8 +693,8 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
 
                if (!(ixp4xx_read_feature_bits() &
                      (IXP4XX_FEATURE_RESET_NPEA << i))) {
-                       dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
-                                i, res->start, res->end);
+                       dev_info(dev, "NPE%d at %pR not available\n",
+                                i, res);
                        continue; /* NPE already disabled or not present */
                }
                npe->regs = devm_ioremap_resource(dev, res);
@@ -703,13 +702,12 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
                        return PTR_ERR(npe->regs);
 
                if (npe_reset(npe)) {
-                       dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
-                                i, res->start, res->end);
+                       dev_info(dev, "NPE%d at %pR does not reset\n",
+                                i, res);
                        continue;
                }
                npe->valid = 1;
-               dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
-                        i, res->start, res->end);
+               dev_info(dev, "NPE%d at %pR registered\n", i, res);
                found++;
        }
 
index 7149510..9154c70 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/soc/ixp4xx/qmgr.h>
-#include <mach/hardware.h>
 #include <linux/soc/ixp4xx/cpu.h>
 
 static struct qmgr_regs __iomem *qmgr_regs;
@@ -147,12 +146,12 @@ static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
        /* ACK - it may clear any bits so don't rely on it */
        __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
 
-       en_bitmap = qmgr_regs->irqen[0];
+       en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
        while (en_bitmap) {
                i = __fls(en_bitmap); /* number of the last "low" queue */
                en_bitmap &= ~BIT(i);
-               src = qmgr_regs->irqsrc[i >> 3];
-               stat = qmgr_regs->stat1[i >> 3];
+               src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
+               stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
                if (src & 4) /* the IRQ condition is inverted */
                        stat = ~stat;
                if (stat & BIT(src & 3)) {
@@ -172,7 +171,8 @@ static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
        /* ACK - it may clear any bits so don't rely on it */
        __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
 
-       req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+       req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
+                    __raw_readl(&qmgr_regs->statne_h);
        while (req_bitmap) {
                i = __fls(req_bitmap); /* number of the last "high" queue */
                req_bitmap &= ~BIT(i);
index 20ace65..8b53ed1 100644 (file)
@@ -15,7 +15,7 @@ config ARCH_TEGRA_2x_SOC
        select PL310_ERRATA_769419 if CACHE_L2X0
        select SOC_TEGRA_FLOWCTRL
        select SOC_TEGRA_PMC
-       select SOC_TEGRA20_VOLTAGE_COUPLER
+       select SOC_TEGRA20_VOLTAGE_COUPLER if REGULATOR
        select TEGRA_TIMER
        help
          Support for NVIDIA Tegra AP20 and T20 processors, based on the
@@ -29,7 +29,7 @@ config ARCH_TEGRA_3x_SOC
        select PL310_ERRATA_769419 if CACHE_L2X0
        select SOC_TEGRA_FLOWCTRL
        select SOC_TEGRA_PMC
-       select SOC_TEGRA30_VOLTAGE_COUPLER
+       select SOC_TEGRA30_VOLTAGE_COUPLER if REGULATOR
        select TEGRA_TIMER
        help
          Support for NVIDIA Tegra T30 processor family, based on the
@@ -155,7 +155,9 @@ config SOC_TEGRA_POWERGATE_BPMP
 config SOC_TEGRA20_VOLTAGE_COUPLER
        bool "Voltage scaling support for Tegra20 SoCs"
        depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+       depends on REGULATOR
 
 config SOC_TEGRA30_VOLTAGE_COUPLER
        bool "Voltage scaling support for Tegra30 SoCs"
        depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+       depends on REGULATOR
index e71a4c5..83e352b 100644 (file)
@@ -658,6 +658,18 @@ config SPI_ROCKCHIP
          The main usecase of this controller is to use spi flash as boot
          device.
 
+config SPI_ROCKCHIP_SFC
+       tristate "Rockchip Serial Flash Controller (SFC)"
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
+       depends on HAS_IOMEM && HAS_DMA
+       help
+         This enables support for Rockchip serial flash controller. This
+         is a specialized controller used to access SPI flash on some
+         Rockchip SOCs.
+
+         ROCKCHIP SFC supports DMA and PIO modes. When DMA is not available,
+         the driver automatically falls back to PIO mode.
+
 config SPI_RB4XX
        tristate "Mikrotik RB4XX SPI master"
        depends on SPI_MASTER && ATH79
index 13e54c4..699db95 100644 (file)
@@ -95,6 +95,7 @@ obj-$(CONFIG_SPI_QCOM_GENI)           += spi-geni-qcom.o
 obj-$(CONFIG_SPI_QCOM_QSPI)            += spi-qcom-qspi.o
 obj-$(CONFIG_SPI_QUP)                  += spi-qup.o
 obj-$(CONFIG_SPI_ROCKCHIP)             += spi-rockchip.o
+obj-$(CONFIG_SPI_ROCKCHIP_SFC)         += spi-rockchip-sfc.o
 obj-$(CONFIG_SPI_RB4XX)                        += spi-rb4xx.o
 obj-$(CONFIG_MACH_REALTEK_RTL)         += spi-realtek-rtl.o
 obj-$(CONFIG_SPI_RPCIF)                        += spi-rpc-if.o
index 37eab10..7d709a8 100644 (file)
@@ -143,12 +143,12 @@ static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
 }
 #endif /* CONFIG_DEBUG_FS */
 
-static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
+static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned int reg)
 {
        return readl(bs->regs + reg);
 }
 
-static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
+static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned int reg,
                                 u32 val)
 {
        writel(val, bs->regs + reg);
index a2de235..101cc71 100644 (file)
@@ -325,7 +325,15 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
        f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
        f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
        f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-       f_pdata->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
+
+       /*
+        * For an op to be DTR, cmd phase along with every other non-empty
+        * phase should have dtr field set to 1. If an op phase has zero
+        * nbytes, ignore its dtr field; otherwise, check its dtr field.
+        */
+       f_pdata->dtr = op->cmd.dtr &&
+                      (!op->addr.nbytes || op->addr.dtr) &&
+                      (!op->data.nbytes || op->data.dtr);
 
        switch (op->data.buswidth) {
        case 0:
@@ -1228,8 +1236,15 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
 {
        bool all_true, all_false;
 
-       all_true = op->cmd.dtr && op->addr.dtr && op->dummy.dtr &&
-                  op->data.dtr;
+       /*
+        * op->dummy.dtr is required for converting nbytes into ncycles.
+        * Also, don't check the dtr field of the op phase having zero nbytes.
+        */
+       all_true = op->cmd.dtr &&
+                  (!op->addr.nbytes || op->addr.dtr) &&
+                  (!op->dummy.nbytes || op->dummy.dtr) &&
+                  (!op->data.nbytes || op->data.dtr);
+
        all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
                    !op->data.dtr;
 
index 8996115..263ce90 100644 (file)
@@ -444,7 +444,7 @@ static int mcfqspi_remove(struct platform_device *pdev)
        mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
 
        mcfqspi_cs_teardown(mcfqspi);
-       clk_disable(mcfqspi->clk);
+       clk_disable_unprepare(mcfqspi->clk);
 
        return 0;
 }
index e114e6f..d112c2c 100644 (file)
@@ -213,12 +213,6 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
         * line for the controller
         */
        if (spi->cs_gpiod) {
-               /*
-                * FIXME: is this code ever executed? This host does not
-                * set SPI_MASTER_GPIO_SS so this chipselect callback should
-                * not get called from the SPI core when we are using
-                * GPIOs for chip select.
-                */
                if (value == BITBANG_CS_ACTIVE)
                        gpiod_set_value(spi->cs_gpiod, 1);
                else
@@ -945,7 +939,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
        master->bus_num = pdev->id;
        master->num_chipselect = pdata->num_chipselect;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
-       master->flags = SPI_MASTER_MUST_RX;
+       master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
        master->setup = davinci_spi_setup;
        master->cleanup = davinci_spi_cleanup;
        master->can_dma = davinci_spi_can_dma;
index aa67655..5896a7b 100644 (file)
@@ -550,7 +550,7 @@ static int ep93xx_spi_prepare_hardware(struct spi_master *master)
        u32 val;
        int ret;
 
-       ret = clk_enable(espi->clk);
+       ret = clk_prepare_enable(espi->clk);
        if (ret)
                return ret;
 
@@ -570,7 +570,7 @@ static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
        val &= ~SSPCR1_SSE;
        writel(val, espi->mmio + SSPCR1);
 
-       clk_disable(espi->clk);
+       clk_disable_unprepare(espi->clk);
 
        return 0;
 }
index 87f8829..829770b 100644 (file)
 
 #define SPI_FSI_BASE                   0x70000
 #define SPI_FSI_INIT_TIMEOUT_MS                1000
-#define SPI_FSI_MAX_XFR_SIZE           2048
-#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED        8
+#define SPI_FSI_MAX_RX_SIZE            8
+#define SPI_FSI_MAX_TX_SIZE            40
 
 #define SPI_FSI_ERROR                  0x0
 #define SPI_FSI_COUNTER_CFG            0x1
-#define  SPI_FSI_COUNTER_CFG_LOOPS(x)   (((u64)(x) & 0xffULL) << 32)
-#define  SPI_FSI_COUNTER_CFG_N2_RX      BIT_ULL(8)
-#define  SPI_FSI_COUNTER_CFG_N2_TX      BIT_ULL(9)
-#define  SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
-#define  SPI_FSI_COUNTER_CFG_N2_RELOAD  BIT_ULL(11)
 #define SPI_FSI_CFG1                   0x2
 #define SPI_FSI_CLOCK_CFG              0x3
 #define  SPI_FSI_CLOCK_CFG_MM_ENABLE    BIT_ULL(32)
@@ -76,8 +71,6 @@ struct fsi_spi {
        struct device *dev;     /* SPI controller device */
        struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
        u32 base;
-       size_t max_xfr_size;
-       bool restricted;
 };
 
 struct fsi_spi_sequence {
@@ -241,7 +234,7 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
        return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
 }
 
-static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
 {
        /*
         * Add the next byte of instruction to the 8-byte sequence register.
@@ -251,8 +244,6 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
         */
        seq->data |= (u64)val << seq->bit;
        seq->bit -= 8;
-
-       return ((64 - seq->bit) / 8) - 2;
 }
 
 static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
@@ -261,71 +252,11 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
        seq->data = 0ULL;
 }
 
-static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
-                                    struct fsi_spi_sequence *seq,
-                                    struct spi_transfer *transfer)
-{
-       int loops;
-       int idx;
-       int rc;
-       u8 val = 0;
-       u8 len = min(transfer->len, 8U);
-       u8 rem = transfer->len % len;
-
-       loops = transfer->len / len;
-
-       if (transfer->tx_buf) {
-               val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
-               idx = fsi_spi_sequence_add(seq, val);
-
-               if (rem)
-                       rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
-       } else if (transfer->rx_buf) {
-               val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
-               idx = fsi_spi_sequence_add(seq, val);
-
-               if (rem)
-                       rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
-       } else {
-               return -EINVAL;
-       }
-
-       if (ctx->restricted && loops > 1) {
-               dev_warn(ctx->dev,
-                        "Transfer too large; no branches permitted.\n");
-               return -EINVAL;
-       }
-
-       if (loops > 1) {
-               u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
-
-               fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
-
-               if (transfer->rx_buf)
-                       cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
-                               SPI_FSI_COUNTER_CFG_N2_TX |
-                               SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
-                               SPI_FSI_COUNTER_CFG_N2_RELOAD;
-
-               rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
-               if (rc)
-                       return rc;
-       } else {
-               fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
-       }
-
-       if (rem)
-               fsi_spi_sequence_add(seq, rem);
-
-       return 0;
-}
-
 static int fsi_spi_transfer_data(struct fsi_spi *ctx,
                                 struct spi_transfer *transfer)
 {
        int rc = 0;
        u64 status = 0ULL;
-       u64 cfg = 0ULL;
 
        if (transfer->tx_buf) {
                int nb;
@@ -363,16 +294,6 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
                u64 in = 0ULL;
                u8 *rx = transfer->rx_buf;
 
-               rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
-               if (rc)
-                       return rc;
-
-               if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
-                       rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
-                       if (rc)
-                               return rc;
-               }
-
                while (transfer->len > recv) {
                        do {
                                rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
@@ -439,6 +360,10 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
                }
        } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
 
+       rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
+       if (rc)
+               return rc;
+
        rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
        if (rc)
                return rc;
@@ -459,6 +384,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
 {
        int rc;
        u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
+       unsigned int len;
        struct spi_transfer *transfer;
        struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
 
@@ -471,8 +397,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                struct spi_transfer *next = NULL;
 
                /* Sequencer must do shift out (tx) first. */
-               if (!transfer->tx_buf ||
-                   transfer->len > (ctx->max_xfr_size + 8)) {
+               if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
                        rc = -EINVAL;
                        goto error;
                }
@@ -486,9 +411,13 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                fsi_spi_sequence_init(&seq);
                fsi_spi_sequence_add(&seq, seq_slave);
 
-               rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
-               if (rc)
-                       goto error;
+               len = transfer->len;
+               while (len > 8) {
+                       fsi_spi_sequence_add(&seq,
+                                            SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+                       len -= 8;
+               }
+               fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
 
                if (!list_is_last(&transfer->transfer_list,
                                  &mesg->transfers)) {
@@ -496,7 +425,9 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
 
                        /* Sequencer can only do shift in (rx) after tx. */
                        if (next->rx_buf) {
-                               if (next->len > ctx->max_xfr_size) {
+                               u8 shift;
+
+                               if (next->len > SPI_FSI_MAX_RX_SIZE) {
                                        rc = -EINVAL;
                                        goto error;
                                }
@@ -504,10 +435,8 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                                dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
                                        next->len);
 
-                               rc = fsi_spi_sequence_transfer(ctx, &seq,
-                                                              next);
-                               if (rc)
-                                       goto error;
+                               shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
+                               fsi_spi_sequence_add(&seq, shift);
                        } else {
                                next = NULL;
                        }
@@ -541,9 +470,7 @@ error:
 
 static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
 {
-       struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
-
-       return ctx->max_xfr_size;
+       return SPI_FSI_MAX_RX_SIZE;
 }
 
 static int fsi_spi_probe(struct device *dev)
@@ -582,14 +509,6 @@ static int fsi_spi_probe(struct device *dev)
                ctx->fsi = fsi;
                ctx->base = base + SPI_FSI_BASE;
 
-               if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
-                       ctx->restricted = true;
-                       ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
-               } else {
-                       ctx->restricted = false;
-                       ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
-               }
-
                rc = devm_spi_register_controller(dev, ctlr);
                if (rc)
                        spi_controller_put(ctlr);
index fb45e6a..fd004c9 100644 (file)
@@ -530,6 +530,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
                goto err_rx_dma_buf;
        }
 
+       memset(&cfg, 0, sizeof(cfg));
        cfg.src_addr = phy_addr + SPI_POPR;
        cfg.dst_addr = phy_addr + SPI_PUSHR;
        cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
index b3861fb..2f51421 100644 (file)
@@ -549,12 +549,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
         */
        spin_lock_irq(&mas->lock);
        geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
-
-       /*
-        * TX_WATERMARK_REG should be set after SPI configuration and
-        * setting up GENI SE engine, as driver starts data transfer
-        * for the watermark interrupt.
-        */
        if (m_cmd & SPI_TX_ONLY) {
                if (geni_spi_handle_tx(mas))
                        writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
index 4aee3db..8d8df51 100644 (file)
@@ -505,7 +505,9 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
                                      struct spi_message *msg)
 {
        struct spi_device *spi = msg->spi;
+       struct spi_transfer *xfer;
        u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
+       u32 min_speed_hz = ~0U;
        u32 testreg, delay;
        u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
@@ -577,9 +579,21 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
         * be asserted before the SCLK polarity changes, which would disrupt
         * the SPI communication as the device on the other end would consider
         * the change of SCLK polarity as a clock tick already.
+        *
+        * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
+        * callback, iterate over all the transfers in spi_message, find the
+        * one with lowest bus frequency, and use that bus frequency for the
+        * delay calculation. In case all transfers have speed_hz == 0, then
+        * min_speed_hz is ~0 and the resulting delay is zero.
         */
-       delay = (2 * 1000000) / spi_imx->spi_bus_clk;
-       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (!xfer->speed_hz)
+                       continue;
+               min_speed_hz = min(xfer->speed_hz, min_speed_hz);
+       }
+
+       delay = (2 * 1000000) / min_speed_hz;
+       if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
                udelay(delay);
        else                    /* SCLK is _very_ slow */
                usleep_range(delay, delay + 10);
@@ -1038,12 +1052,8 @@ static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
 
 static void spi_imx_push(struct spi_imx_data *spi_imx)
 {
-       unsigned int burst_len, fifo_words;
+       unsigned int burst_len;
 
-       if (spi_imx->dynamic_burst)
-               fifo_words = 4;
-       else
-               fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
        /*
         * Reload the FIFO when the remaining bytes to be transferred in the
         * current burst is 0. This only applies when bits_per_word is a
@@ -1062,7 +1072,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
 
                        spi_imx->remainder = burst_len;
                } else {
-                       spi_imx->remainder = fifo_words;
+                       spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
                }
        }
 
@@ -1070,8 +1080,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
                if (!spi_imx->count)
                        break;
                if (spi_imx->dynamic_burst &&
-                   spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
-                                                    fifo_words))
+                   spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
                        break;
                spi_imx->tx(spi_imx);
                spi_imx->txfifo++;
@@ -1181,6 +1190,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
         * dynamic_burst in that case.
         */
        if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
+           !(spi->mode & SPI_CS_WORD) &&
            (spi_imx->bits_per_word == 8 ||
            spi_imx->bits_per_word == 16 ||
            spi_imx->bits_per_word == 32)) {
@@ -1616,6 +1626,15 @@ static int spi_imx_probe(struct platform_device *pdev)
            is_imx53_ecspi(spi_imx))
                spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
 
+       if (is_imx51_ecspi(spi_imx) &&
+           device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
+               /*
+                * When using HW-CS implementing SPI_CS_WORD can be done by just
+                * setting the burst length to the word size. This is
+                * considerably faster than manually controlling the CS.
+                */
+               spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
+
        spi_imx->spi_drctl = spi_drctl;
 
        init_completion(&spi_imx->xfer_done);
index b2c4621..c208efe 100644 (file)
@@ -785,6 +785,8 @@ static int meson_spicc_remove(struct platform_device *pdev)
        clk_disable_unprepare(spicc->core);
        clk_disable_unprepare(spicc->pclk);
 
+       spi_master_put(spicc->master);
+
        return 0;
 }
 
index 68dca8c..386e8c8 100644 (file)
@@ -42,8 +42,9 @@
 #define SPI_CFG1_CS_IDLE_OFFSET           0
 #define SPI_CFG1_PACKET_LOOP_OFFSET       8
 #define SPI_CFG1_PACKET_LENGTH_OFFSET     16
-#define SPI_CFG1_GET_TICK_DLY_OFFSET      30
+#define SPI_CFG1_GET_TICK_DLY_OFFSET      29
 
+#define SPI_CFG1_GET_TICK_DLY_MASK        0xe0000000
 #define SPI_CFG1_CS_IDLE_MASK             0xff
 #define SPI_CFG1_PACKET_LOOP_MASK         0xff00
 #define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
@@ -90,6 +91,8 @@ struct mtk_spi_compatible {
        bool enhance_timing;
        /* some IC support DMA addr extension */
        bool dma_ext;
+       /* some IC no need unprepare SPI clk */
+       bool no_need_unprepare;
 };
 
 struct mtk_spi {
@@ -104,6 +107,7 @@ struct mtk_spi {
        struct scatterlist *tx_sgl, *rx_sgl;
        u32 tx_sgl_len, rx_sgl_len;
        const struct mtk_spi_compatible *dev_comp;
+       u32 spi_clk_hz;
 };
 
 static const struct mtk_spi_compatible mtk_common_compat;
@@ -135,12 +139,21 @@ static const struct mtk_spi_compatible mt8183_compat = {
        .enhance_timing = true,
 };
 
+static const struct mtk_spi_compatible mt6893_compat = {
+       .need_pad_sel = true,
+       .must_tx = true,
+       .enhance_timing = true,
+       .dma_ext = true,
+       .no_need_unprepare = true,
+};
+
 /*
  * A piece of default chip info unless the platform
  * supplies it.
  */
 static const struct mtk_chip_config mtk_default_chip_info = {
        .sample_sel = 0,
+       .tick_delay = 0,
 };
 
 static const struct of_device_id mtk_spi_of_match[] = {
@@ -174,6 +187,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
        { .compatible = "mediatek,mt8192-spi",
                .data = (void *)&mt6765_compat,
        },
+       { .compatible = "mediatek,mt6893-spi",
+               .data = (void *)&mt6893_compat,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
@@ -192,6 +208,65 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
        writel(reg_val, mdata->base + SPI_CMD_REG);
 }
 
+static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
+{
+       struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+       struct spi_delay *cs_setup = &spi->cs_setup;
+       struct spi_delay *cs_hold = &spi->cs_hold;
+       struct spi_delay *cs_inactive = &spi->cs_inactive;
+       u32 setup, hold, inactive;
+       u32 reg_val;
+       int delay;
+
+       delay = spi_delay_to_ns(cs_setup, NULL);
+       if (delay < 0)
+               return delay;
+       setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       delay = spi_delay_to_ns(cs_hold, NULL);
+       if (delay < 0)
+               return delay;
+       hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       delay = spi_delay_to_ns(cs_inactive, NULL);
+       if (delay < 0)
+               return delay;
+       inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       setup    = setup ? setup : 1;
+       hold     = hold ? hold : 1;
+       inactive = inactive ? inactive : 1;
+
+       reg_val = readl(mdata->base + SPI_CFG0_REG);
+       if (mdata->dev_comp->enhance_timing) {
+               hold = min_t(u32, hold, 0x10000);
+               setup = min_t(u32, setup, 0x10000);
+               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+               reg_val |= (((hold - 1) & 0xffff)
+                          << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+               reg_val |= (((setup - 1) & 0xffff)
+                          << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+       } else {
+               hold = min_t(u32, hold, 0x100);
+               setup = min_t(u32, setup, 0x100);
+               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
+               reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
+               reg_val |= (((setup - 1) & 0xff)
+                           << SPI_CFG0_CS_SETUP_OFFSET);
+       }
+       writel(reg_val, mdata->base + SPI_CFG0_REG);
+
+       inactive = min_t(u32, inactive, 0x100);
+       reg_val = readl(mdata->base + SPI_CFG1_REG);
+       reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+       reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+       writel(reg_val, mdata->base + SPI_CFG1_REG);
+
+       return 0;
+}
+
 static int mtk_spi_prepare_message(struct spi_master *master,
                                   struct spi_message *msg)
 {
@@ -261,6 +336,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
                writel(mdata->pad_sel[spi->chip_select],
                       mdata->base + SPI_PAD_SEL_REG);
 
+       /* tick delay */
+       reg_val = readl(mdata->base + SPI_CFG1_REG);
+       reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
+       reg_val |= ((chip_config->tick_delay & 0x7)
+               << SPI_CFG1_GET_TICK_DLY_OFFSET);
+       writel(reg_val, mdata->base + SPI_CFG1_REG);
+
+       /* set hw cs timing */
+       mtk_spi_set_hw_cs_timing(spi);
        return 0;
 }
 
@@ -287,12 +371,11 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
 static void mtk_spi_prepare_transfer(struct spi_master *master,
                                     struct spi_transfer *xfer)
 {
-       u32 spi_clk_hz, div, sck_time, reg_val;
+       u32 div, sck_time, reg_val;
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
-       spi_clk_hz = clk_get_rate(mdata->spi_clk);
-       if (xfer->speed_hz < spi_clk_hz / 2)
-               div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
+       if (xfer->speed_hz < mdata->spi_clk_hz / 2)
+               div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
        else
                div = 1;
 
@@ -426,24 +509,15 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
        mtk_spi_prepare_transfer(master, xfer);
        mtk_spi_setup_packet(master);
 
-       cnt = xfer->len / 4;
-       if (xfer->tx_buf)
+       if (xfer->tx_buf) {
+               cnt = xfer->len / 4;
                iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
-
-       if (xfer->rx_buf)
-               ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
-
-       remainder = xfer->len % 4;
-       if (remainder > 0) {
-               reg_val = 0;
-               if (xfer->tx_buf) {
+               remainder = xfer->len % 4;
+               if (remainder > 0) {
+                       reg_val = 0;
                        memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
                        writel(reg_val, mdata->base + SPI_TX_DATA_REG);
                }
-               if (xfer->rx_buf) {
-                       reg_val = readl(mdata->base + SPI_RX_DATA_REG);
-                       memcpy(xfer->rx_buf + (cnt * 4), &reg_val, remainder);
-               }
        }
 
        mtk_spi_enable_transfer(master);
@@ -516,52 +590,6 @@ static bool mtk_spi_can_dma(struct spi_master *master,
                (unsigned long)xfer->rx_buf % 4 == 0);
 }
 
-static int mtk_spi_set_hw_cs_timing(struct spi_device *spi,
-                                   struct spi_delay *setup,
-                                   struct spi_delay *hold,
-                                   struct spi_delay *inactive)
-{
-       struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
-       u16 setup_dly, hold_dly, inactive_dly;
-       u32 reg_val;
-
-       if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
-           (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
-           (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
-               dev_err(&spi->dev,
-                       "Invalid delay unit, should be SPI_DELAY_UNIT_SCK\n");
-               return -EINVAL;
-       }
-
-       setup_dly = setup ? setup->value : 1;
-       hold_dly = hold ? hold->value : 1;
-       inactive_dly = inactive ? inactive->value : 1;
-
-       reg_val = readl(mdata->base + SPI_CFG0_REG);
-       if (mdata->dev_comp->enhance_timing) {
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold_dly - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup_dly - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-       } else {
-               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold_dly - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup_dly - 1) & 0xff)
-                           << SPI_CFG0_CS_SETUP_OFFSET);
-       }
-       writel(reg_val, mdata->base + SPI_CFG0_REG);
-
-       reg_val = readl(mdata->base + SPI_CFG1_REG);
-       reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
-       reg_val |= (((inactive_dly - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
-       writel(reg_val, mdata->base + SPI_CFG1_REG);
-
-       return 0;
-}
-
 static int mtk_spi_setup(struct spi_device *spi)
 {
        struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
@@ -799,7 +827,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
                goto err_put_master;
        }
 
-       clk_disable_unprepare(mdata->spi_clk);
+       mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
+
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_disable(mdata->spi_clk);
+       else
+               clk_disable_unprepare(mdata->spi_clk);
 
        pm_runtime_enable(&pdev->dev);
 
@@ -867,6 +900,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
 
        mtk_spi_reset(mdata);
 
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_unprepare(mdata->spi_clk);
+
        return 0;
 }
 
@@ -915,7 +951,10 @@ static int mtk_spi_runtime_suspend(struct device *dev)
        struct spi_master *master = dev_get_drvdata(dev);
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
-       clk_disable_unprepare(mdata->spi_clk);
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_disable(mdata->spi_clk);
+       else
+               clk_disable_unprepare(mdata->spi_clk);
 
        return 0;
 }
@@ -926,7 +965,10 @@ static int mtk_spi_runtime_resume(struct device *dev)
        struct mtk_spi *mdata = spi_master_get_devdata(master);
        int ret;
 
-       ret = clk_prepare_enable(mdata->spi_clk);
+       if (mdata->dev_comp->no_need_unprepare)
+               ret = clk_enable(mdata->spi_clk);
+       else
+               ret = clk_prepare_enable(mdata->spi_clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
                return ret;
index 37dfc6e..9708b78 100644 (file)
@@ -167,10 +167,17 @@ err_put_ctlr:
        return ret;
 }
 
+static const struct spi_device_id spi_mux_id[] = {
+       { "spi-mux" },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, spi_mux_id);
+
 static const struct of_device_id spi_mux_of_match[] = {
        { .compatible = "spi-mux" },
        { }
 };
+MODULE_DEVICE_TABLE(of, spi_mux_of_match);
 
 static struct spi_driver spi_mux_driver = {
        .probe  = spi_mux_probe,
@@ -178,6 +185,7 @@ static struct spi_driver spi_mux_driver = {
                .name   = "spi-mux",
                .of_match_table = spi_mux_of_match,
        },
+       .id_table = spi_mux_id,
 };
 
 module_spi_driver(spi_mux_driver);
index 96b4182..4588994 100644 (file)
@@ -335,8 +335,10 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
 static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
                                     const struct spi_mem_op *op)
 {
-       if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
-           op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
+       bool all_false;
+
+       if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
+           op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
                return false;
 
        if (op->data.nbytes && op->dummy.nbytes &&
@@ -346,7 +348,13 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
        if (op->addr.nbytes > 7)
                return false;
 
-       return spi_mem_default_supports_op(mem, op);
+       all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
+                   !op->data.dtr;
+
+       if (all_false)
+               return spi_mem_default_supports_op(mem, op);
+       else
+               return spi_mem_dtr_supports_op(mem, op);
 }
 
 static int mxic_spi_mem_exec_op(struct spi_mem *mem,
@@ -355,14 +363,15 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
        struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
        int nio = 1, i, ret;
        u32 ss_ctrl;
-       u8 addr[8];
-       u8 opcode = op->cmd.opcode;
+       u8 addr[8], cmd[2];
 
        ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
        if (ret)
                return ret;
 
-       if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
+       if (mem->spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
+               nio = 8;
+       else if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
                nio = 4;
        else if (mem->spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
                nio = 2;
@@ -374,19 +383,26 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
               mxic->regs + HC_CFG);
        writel(HC_EN_BIT, mxic->regs + HC_EN);
 
-       ss_ctrl = OP_CMD_BYTES(1) | OP_CMD_BUSW(fls(op->cmd.buswidth) - 1);
+       ss_ctrl = OP_CMD_BYTES(op->cmd.nbytes) |
+                 OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
+                 (op->cmd.dtr ? OP_CMD_DDR : 0);
 
        if (op->addr.nbytes)
                ss_ctrl |= OP_ADDR_BYTES(op->addr.nbytes) |
-                          OP_ADDR_BUSW(fls(op->addr.buswidth) - 1);
+                          OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
+                          (op->addr.dtr ? OP_ADDR_DDR : 0);
 
        if (op->dummy.nbytes)
                ss_ctrl |= OP_DUMMY_CYC(op->dummy.nbytes);
 
        if (op->data.nbytes) {
-               ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1);
-               if (op->data.dir == SPI_MEM_DATA_IN)
+               ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
+                          (op->data.dtr ? OP_DATA_DDR : 0);
+               if (op->data.dir == SPI_MEM_DATA_IN) {
                        ss_ctrl |= OP_READ;
+                       if (op->data.dtr)
+                               ss_ctrl |= OP_DQS_EN;
+               }
        }
 
        writel(ss_ctrl, mxic->regs + SS_CTRL(mem->spi->chip_select));
@@ -394,7 +410,10 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
        writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
               mxic->regs + HC_CFG);
 
-       ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1);
+       for (i = 0; i < op->cmd.nbytes; i++)
+               cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
+
+       ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
        if (ret)
                goto out;
 
@@ -567,7 +586,8 @@ static int mxic_spi_probe(struct platform_device *pdev)
        master->bits_per_word_mask = SPI_BPW_MASK(8);
        master->mode_bits = SPI_CPOL | SPI_CPHA |
                        SPI_RX_DUAL | SPI_TX_DUAL |
-                       SPI_RX_QUAD | SPI_TX_QUAD;
+                       SPI_RX_QUAD | SPI_TX_QUAD |
+                       SPI_RX_OCTAL | SPI_TX_OCTAL;
 
        mxic_spi_hw_init(mxic);
 
index 34b31ab..e8de3cb 100644 (file)
@@ -328,8 +328,16 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 static void orion_spi_set_cs(struct spi_device *spi, bool enable)
 {
        struct orion_spi *orion_spi;
+       void __iomem *ctrl_reg;
+       u32 val;
 
        orion_spi = spi_master_get_devdata(spi->master);
+       ctrl_reg = spi_reg(orion_spi, ORION_SPI_IF_CTRL_REG);
+
+       val = readl(ctrl_reg);
+
+       /* Clear existing chip-select and assertion state */
+       val &= ~(ORION_SPI_CS_MASK | 0x1);
 
        /*
         * If this line is using a GPIO to control chip select, this internal
@@ -338,9 +346,7 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
         * as it is handled by a GPIO, but that doesn't matter. What we need
         * is to deassert the old chip select and assert some other chip select.
         */
-       orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
-       orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
-                         ORION_SPI_CS(spi->chip_select));
+       val |= ORION_SPI_CS(spi->chip_select);
 
        /*
         * Chip select logic is inverted from spi_set_cs(). For lines using a
@@ -350,9 +356,13 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
         * doesn't matter.
         */
        if (!enable)
-               orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
-       else
-               orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+               val |= 0x1;
+
+       /*
+        * To avoid toggling unwanted chip selects update the register
+        * with a single write.
+        */
+       writel(val, ctrl_reg);
 }
 
 static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
index 104bde1..5eb7b61 100644 (file)
@@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
        struct dma_slave_config cfg;
        int ret;
 
+       memset(&cfg, 0, sizeof(cfg));
        cfg.device_fc = true;
        cfg.src_addr = pic32s->dma_base + buf_offset;
        cfg.dst_addr = pic32s->dma_base + buf_offset;
index 974e307..1573f6d 100644 (file)
@@ -594,24 +594,29 @@ static int u32_reader(struct driver_data *drv_data)
 
 static void reset_sccr1(struct driver_data *drv_data)
 {
-       struct chip_data *chip =
-               spi_get_ctldata(drv_data->controller->cur_msg->spi);
-       u32 sccr1_reg;
+       u32 mask = drv_data->int_cr1 | drv_data->dma_cr1, threshold;
+       struct chip_data *chip;
+
+       if (drv_data->controller->cur_msg) {
+               chip = spi_get_ctldata(drv_data->controller->cur_msg->spi);
+               threshold = chip->threshold;
+       } else {
+               threshold = 0;
+       }
 
-       sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
-               sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
+               mask |= QUARK_X1000_SSCR1_RFT;
                break;
        case CE4100_SSP:
-               sccr1_reg &= ~CE4100_SSCR1_RFT;
+               mask |= CE4100_SSCR1_RFT;
                break;
        default:
-               sccr1_reg &= ~SSCR1_RFT;
+               mask |= SSCR1_RFT;
                break;
        }
-       sccr1_reg |= chip->threshold;
-       pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+
+       pxa2xx_spi_update(drv_data, SSCR1, mask, threshold);
 }
 
 static void int_stop_and_reset(struct driver_data *drv_data)
@@ -724,11 +729,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 
 static void handle_bad_msg(struct driver_data *drv_data)
 {
+       int_stop_and_reset(drv_data);
        pxa2xx_spi_off(drv_data);
-       clear_SSCR1_bits(drv_data, drv_data->int_cr1);
-       if (!pxa25x_ssp_comp(drv_data))
-               pxa2xx_spi_write(drv_data, SSTO, 0);
-       write_SSSR_CS(drv_data, drv_data->clear_sr);
 
        dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
 }
@@ -1156,13 +1158,10 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
 {
        struct driver_data *drv_data = spi_controller_get_devdata(controller);
 
+       int_stop_and_reset(drv_data);
+
        /* Disable the SSP */
        pxa2xx_spi_off(drv_data);
-       /* Clear and disable interrupts and service requests */
-       write_SSSR_CS(drv_data, drv_data->clear_sr);
-       clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
-       if (!pxa25x_ssp_comp(drv_data))
-               pxa2xx_spi_write(drv_data, SSTO, 0);
 
        /*
         * Stop the DMA if running. Note DMA callback handler may have unset
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
new file mode 100644 (file)
index 0000000..a46b385
--- /dev/null
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip Serial Flash Controller Driver
+ *
+ * Copyright (c) 2017-2021, Rockchip Inc.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *        Chris Morgan <macroalpha82@gmail.com>
+ *        Jon Lin <Jon.lin@rock-chips.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi-mem.h>
+
+/* System control */
+#define SFC_CTRL                       0x0
+#define  SFC_CTRL_PHASE_SEL_NEGETIVE   BIT(1)
+#define  SFC_CTRL_CMD_BITS_SHIFT       8
+#define  SFC_CTRL_ADDR_BITS_SHIFT      10
+#define  SFC_CTRL_DATA_BITS_SHIFT      12
+
+/* Interrupt mask */
+#define SFC_IMR                                0x4
+#define  SFC_IMR_RX_FULL               BIT(0)
+#define  SFC_IMR_RX_UFLOW              BIT(1)
+#define  SFC_IMR_TX_OFLOW              BIT(2)
+#define  SFC_IMR_TX_EMPTY              BIT(3)
+#define  SFC_IMR_TRAN_FINISH           BIT(4)
+#define  SFC_IMR_BUS_ERR               BIT(5)
+#define  SFC_IMR_NSPI_ERR              BIT(6)
+#define  SFC_IMR_DMA                   BIT(7)
+
+/* Interrupt clear */
+#define SFC_ICLR                       0x8
+#define  SFC_ICLR_RX_FULL              BIT(0)
+#define  SFC_ICLR_RX_UFLOW             BIT(1)
+#define  SFC_ICLR_TX_OFLOW             BIT(2)
+#define  SFC_ICLR_TX_EMPTY             BIT(3)
+#define  SFC_ICLR_TRAN_FINISH          BIT(4)
+#define  SFC_ICLR_BUS_ERR              BIT(5)
+#define  SFC_ICLR_NSPI_ERR             BIT(6)
+#define  SFC_ICLR_DMA                  BIT(7)
+
+/* FIFO threshold level */
+#define SFC_FTLR                       0xc
+#define  SFC_FTLR_TX_SHIFT             0
+#define  SFC_FTLR_TX_MASK              0x1f
+#define  SFC_FTLR_RX_SHIFT             8
+#define  SFC_FTLR_RX_MASK              0x1f
+
+/* Reset FSM and FIFO */
+#define SFC_RCVR                       0x10
+#define  SFC_RCVR_RESET                        BIT(0)
+
+/* Enhanced mode */
+#define SFC_AX                         0x14
+
+/* Address Bit number */
+#define SFC_ABIT                       0x18
+
+/* Interrupt status */
+#define SFC_ISR                                0x1c
+#define  SFC_ISR_RX_FULL_SHIFT         BIT(0)
+#define  SFC_ISR_RX_UFLOW_SHIFT                BIT(1)
+#define  SFC_ISR_TX_OFLOW_SHIFT                BIT(2)
+#define  SFC_ISR_TX_EMPTY_SHIFT                BIT(3)
+#define  SFC_ISR_TX_FINISH_SHIFT       BIT(4)
+#define  SFC_ISR_BUS_ERR_SHIFT         BIT(5)
+#define  SFC_ISR_NSPI_ERR_SHIFT                BIT(6)
+#define  SFC_ISR_DMA_SHIFT             BIT(7)
+
+/* FIFO status */
+#define SFC_FSR                                0x20
+#define  SFC_FSR_TX_IS_FULL            BIT(0)
+#define  SFC_FSR_TX_IS_EMPTY           BIT(1)
+#define  SFC_FSR_RX_IS_EMPTY           BIT(2)
+#define  SFC_FSR_RX_IS_FULL            BIT(3)
+#define  SFC_FSR_TXLV_MASK             GENMASK(12, 8)
+#define  SFC_FSR_TXLV_SHIFT            8
+#define  SFC_FSR_RXLV_MASK             GENMASK(20, 16)
+#define  SFC_FSR_RXLV_SHIFT            16
+
+/* FSM status */
+#define SFC_SR                         0x24
+#define  SFC_SR_IS_IDLE                        0x0
+#define  SFC_SR_IS_BUSY                        0x1
+
+/* Raw interrupt status */
+#define SFC_RISR                       0x28
+#define  SFC_RISR_RX_FULL              BIT(0)
+#define  SFC_RISR_RX_UNDERFLOW         BIT(1)
+#define  SFC_RISR_TX_OVERFLOW          BIT(2)
+#define  SFC_RISR_TX_EMPTY             BIT(3)
+#define  SFC_RISR_TRAN_FINISH          BIT(4)
+#define  SFC_RISR_BUS_ERR              BIT(5)
+#define  SFC_RISR_NSPI_ERR             BIT(6)
+#define  SFC_RISR_DMA                  BIT(7)
+
+/* Version */
+#define SFC_VER                                0x2C
+#define  SFC_VER_3                     0x3
+#define  SFC_VER_4                     0x4
+#define  SFC_VER_5                     0x5
+
+/* Delay line controller resiter */
+#define SFC_DLL_CTRL0                  0x3C
+#define SFC_DLL_CTRL0_SCLK_SMP_DLL     BIT(15)
+#define SFC_DLL_CTRL0_DLL_MAX_VER4     0xFFU
+#define SFC_DLL_CTRL0_DLL_MAX_VER5     0x1FFU
+
+/* Master trigger */
+#define SFC_DMA_TRIGGER                        0x80
+#define SFC_DMA_TRIGGER_START          1
+
+/* Src or Dst addr for master */
+#define SFC_DMA_ADDR                   0x84
+
+/* Length control register extension 32GB */
+#define SFC_LEN_CTRL                   0x88
+#define SFC_LEN_CTRL_TRB_SEL           1
+#define SFC_LEN_EXT                    0x8C
+
+/* Command */
+#define SFC_CMD                                0x100
+#define  SFC_CMD_IDX_SHIFT             0
+#define  SFC_CMD_DUMMY_SHIFT           8
+#define  SFC_CMD_DIR_SHIFT             12
+#define  SFC_CMD_DIR_RD                        0
+#define  SFC_CMD_DIR_WR                        1
+#define  SFC_CMD_ADDR_SHIFT            14
+#define  SFC_CMD_ADDR_0BITS            0
+#define  SFC_CMD_ADDR_24BITS           1
+#define  SFC_CMD_ADDR_32BITS           2
+#define  SFC_CMD_ADDR_XBITS            3
+#define  SFC_CMD_TRAN_BYTES_SHIFT      16
+#define  SFC_CMD_CS_SHIFT              30
+
+/* Address */
+#define SFC_ADDR                       0x104
+
+/* Data */
+#define SFC_DATA                       0x108
+
+/* The controller and documentation reports that it supports up to 4 CS
+ * devices (0-3), however I have only been able to test a single CS (CS 0)
+ * due to the configuration of my device.
+ */
+#define SFC_MAX_CHIPSELECT_NUM         4
+
+/* The SFC can transfer max 16KB - 1 at one time
+ * we set it to 15.5KB here for alignment.
+ */
+#define SFC_MAX_IOSIZE_VER3            (512 * 31)
+
+/* DMA is only enabled for large data transmission */
+#define SFC_DMA_TRANS_THRETHOLD                (0x40)
+
+/* Maximum clock values from datasheet suggest keeping clock value under
+ * 150MHz. No minimum or average value is suggested.
+ */
+#define SFC_MAX_SPEED          (150 * 1000 * 1000)
+
+struct rockchip_sfc {
+       struct device *dev;
+       void __iomem *regbase;
+       struct clk *hclk;
+       struct clk *clk;
+       u32 frequency;
+       /* virtual mapped addr for dma_buffer */
+       void *buffer;
+       dma_addr_t dma_buffer;
+       struct completion cp;
+       bool use_dma;
+       u32 max_iosize;
+       u16 version;
+};
+
+static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
+{
+       int err;
+       u32 status;
+
+       writel_relaxed(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
+
+       err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
+                                !(status & SFC_RCVR_RESET), 20,
+                                jiffies_to_usecs(HZ));
+       if (err)
+               dev_err(sfc->dev, "SFC reset never finished\n");
+
+       /* Still need to clear the masked interrupt from RISR */
+       writel_relaxed(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+
+       dev_dbg(sfc->dev, "reset\n");
+
+       return err;
+}
+
+static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
+{
+       return  (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
+}
+
+static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
+{
+       return SFC_MAX_IOSIZE_VER3;
+}
+
+static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
+{
+       u32 reg;
+
+       /* Enable transfer complete interrupt */
+       reg = readl(sfc->regbase + SFC_IMR);
+       reg &= ~mask;
+       writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static void rockchip_sfc_irq_mask(struct rockchip_sfc *sfc, u32 mask)
+{
+       u32 reg;
+
+       /* Disable transfer finish interrupt */
+       reg = readl(sfc->regbase + SFC_IMR);
+       reg |= mask;
+       writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static int rockchip_sfc_init(struct rockchip_sfc *sfc)
+{
+       writel(0, sfc->regbase + SFC_CTRL);
+       writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+       rockchip_sfc_irq_mask(sfc, 0xFFFFFFFF);
+       if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
+               writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
+
+       return 0;
+}
+
+static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+                                status & SFC_FSR_TXLV_MASK, 0,
+                                timeout_us);
+       if (ret) {
+               dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
+
+               return -ETIMEDOUT;
+       }
+
+       return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
+}
+
+static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+                                status & SFC_FSR_RXLV_MASK, 0,
+                                timeout_us);
+       if (ret) {
+               dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
+
+               return -ETIMEDOUT;
+       }
+
+       return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
+}
+
+static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
+{
+       if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
+               /*
+                * SFC not support output DUMMY cycles right after CMD cycles, so
+                * treat it as ADDR cycles.
+                */
+               op->addr.nbytes = op->dummy.nbytes;
+               op->addr.buswidth = op->dummy.buswidth;
+               op->addr.val = 0xFFFFFFFFF;
+
+               op->dummy.nbytes = 0;
+       }
+}
+
+static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
+                                  struct spi_mem *mem,
+                                  const struct spi_mem_op *op,
+                                  u32 len)
+{
+       u32 ctrl = 0, cmd = 0;
+
+       /* set CMD */
+       cmd = op->cmd.opcode;
+       ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
+
+       /* set ADDR */
+       if (op->addr.nbytes) {
+               if (op->addr.nbytes == 4) {
+                       cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
+               } else if (op->addr.nbytes == 3) {
+                       cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
+               } else {
+                       cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
+                       writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+               }
+
+               ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
+       }
+
+       /* set DUMMY */
+       if (op->dummy.nbytes) {
+               if (op->dummy.buswidth == 4)
+                       cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
+               else if (op->dummy.buswidth == 2)
+                       cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
+               else
+                       cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
+       }
+
+       /* set DATA */
+       if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
+               writel(len, sfc->regbase + SFC_LEN_EXT);
+       else
+               cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
+       if (len) {
+               if (op->data.dir == SPI_MEM_DATA_OUT)
+                       cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+               ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
+       }
+       if (!len && op->addr.nbytes)
+               cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+       /* set the Controller */
+       ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
+       cmd |= mem->spi->chip_select << SFC_CMD_CS_SHIFT;
+
+       dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
+               op->addr.nbytes, op->addr.buswidth,
+               op->dummy.nbytes, op->dummy.buswidth);
+       dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
+               ctrl, cmd, op->addr.val, len);
+
+       writel(ctrl, sfc->regbase + SFC_CTRL);
+       writel(cmd, sfc->regbase + SFC_CMD);
+       if (op->addr.nbytes)
+               writel(op->addr.val, sfc->regbase + SFC_ADDR);
+
+       return 0;
+}
+
+static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
+{
+       u8 bytes = len & 0x3;
+       u32 dwords;
+       int tx_level;
+       u32 write_words;
+       u32 tmp = 0;
+
+       dwords = len >> 2;
+       while (dwords) {
+               tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+               if (tx_level < 0)
+                       return tx_level;
+               write_words = min_t(u32, tx_level, dwords);
+               iowrite32_rep(sfc->regbase + SFC_DATA, buf, write_words);
+               buf += write_words << 2;
+               dwords -= write_words;
+       }
+
+       /* write the rest non word aligned bytes */
+       if (bytes) {
+               tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+               if (tx_level < 0)
+                       return tx_level;
+               memcpy(&tmp, buf, bytes);
+               writel(tmp, sfc->regbase + SFC_DATA);
+       }
+
+       return len;
+}
+
+static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
+{
+       u8 bytes = len & 0x3;
+       u32 dwords;
+       u8 read_words;
+       int rx_level;
+       int tmp;
+
+       /* word aligned access only */
+       dwords = len >> 2;
+       while (dwords) {
+               rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+               if (rx_level < 0)
+                       return rx_level;
+               read_words = min_t(u32, rx_level, dwords);
+               ioread32_rep(sfc->regbase + SFC_DATA, buf, read_words);
+               buf += read_words << 2;
+               dwords -= read_words;
+       }
+
+       /* read the rest non word aligned bytes */
+       if (bytes) {
+               rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+               if (rx_level < 0)
+                       return rx_level;
+               tmp = readl(sfc->regbase + SFC_DATA);
+               memcpy(buf, &tmp, bytes);
+       }
+
+       return len;
+}
+
+static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
+{
+       writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+       writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
+       writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
+
+       return len;
+}
+
+static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
+                                      const struct spi_mem_op *op, u32 len)
+{
+       dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
+       else
+               return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
+}
+
+static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
+                                     const struct spi_mem_op *op, u32 len)
+{
+       int ret;
+
+       dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               memcpy(sfc->buffer, op->data.buf.out, len);
+
+       ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
+       if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
+               dev_err(sfc->dev, "DMA wait for transfer finish timeout\n");
+               ret = -ETIMEDOUT;
+       }
+       rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
+       if (op->data.dir == SPI_MEM_DATA_IN)
+               memcpy(op->data.buf.in, sfc->buffer, len);
+
+       return ret;
+}
+
+static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
+                                !(status & SFC_SR_IS_BUSY),
+                                20, timeout_us);
+       if (ret) {
+               dev_err(sfc->dev, "wait sfc idle timeout\n");
+               rockchip_sfc_reset(sfc);
+
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+       struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+       u32 len = op->data.nbytes;
+       int ret;
+
+       if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
+               ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
+               if (ret)
+                       return ret;
+               sfc->frequency = mem->spi->max_speed_hz;
+               dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
+                       sfc->frequency, clk_get_rate(sfc->clk));
+       }
+
+       rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
+       rockchip_sfc_xfer_setup(sfc, mem, op, len);
+       if (len) {
+               if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
+                       init_completion(&sfc->cp);
+                       rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA);
+                       ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
+               } else {
+                       ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
+               }
+
+               if (ret != len) {
+                       dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
+
+                       return -EIO;
+               }
+       }
+
+       return rockchip_sfc_xfer_done(sfc, 100000);
+}
+
+static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+       struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+
+       op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
+
+       return 0;
+}
+
+static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
+       .exec_op = rockchip_sfc_exec_mem_op,
+       .adjust_op_size = rockchip_sfc_adjust_op_size,
+};
+
+static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
+{
+       struct rockchip_sfc *sfc = dev_id;
+       u32 reg;
+
+       reg = readl(sfc->regbase + SFC_RISR);
+
+       /* Clear interrupt */
+       writel_relaxed(reg, sfc->regbase + SFC_ICLR);
+
+       if (reg & SFC_RISR_DMA) {
+               complete(&sfc->cp);
+
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static int rockchip_sfc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct spi_master *master;
+       struct resource *res;
+       struct rockchip_sfc *sfc;
+       int ret;
+
+       master = devm_spi_alloc_master(&pdev->dev, sizeof(*sfc));
+       if (!master)
+               return -ENOMEM;
+
+       master->flags = SPI_MASTER_HALF_DUPLEX;
+       master->mem_ops = &rockchip_sfc_mem_ops;
+       master->dev.of_node = pdev->dev.of_node;
+       master->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
+       master->max_speed_hz = SFC_MAX_SPEED;
+       master->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
+
+       sfc = spi_master_get_devdata(master);
+       sfc->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       sfc->regbase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(sfc->regbase))
+               return PTR_ERR(sfc->regbase);
+
+       sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
+       if (IS_ERR(sfc->clk)) {
+               dev_err(&pdev->dev, "Failed to get sfc interface clk\n");
+               return PTR_ERR(sfc->clk);
+       }
+
+       sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
+       if (IS_ERR(sfc->hclk)) {
+               dev_err(&pdev->dev, "Failed to get sfc ahb clk\n");
+               return PTR_ERR(sfc->hclk);
+       }
+
+       sfc->use_dma = !of_property_read_bool(sfc->dev->of_node,
+                                             "rockchip,sfc-no-dma");
+
+       if (sfc->use_dma) {
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+               if (ret) {
+                       dev_warn(dev, "Unable to set dma mask\n");
+                       return ret;
+               }
+
+               sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
+                                                 &sfc->dma_buffer,
+                                                 GFP_KERNEL);
+               if (!sfc->buffer)
+                       return -ENOMEM;
+       }
+
+       ret = clk_prepare_enable(sfc->hclk);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to enable ahb clk\n");
+               goto err_hclk;
+       }
+
+       ret = clk_prepare_enable(sfc->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to enable interface clk\n");
+               goto err_clk;
+       }
+
+       /* Find the irq */
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(dev, "Failed to get the irq\n");
+               goto err_irq;
+       }
+
+       ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
+                              0, pdev->name, sfc);
+       if (ret) {
+               dev_err(dev, "Failed to request irq\n");
+
+               return ret;
+       }
+
+       ret = rockchip_sfc_init(sfc);
+       if (ret)
+               goto err_irq;
+
+       sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+       sfc->version = rockchip_sfc_get_version(sfc);
+
+       ret = spi_register_master(master);
+       if (ret)
+               goto err_irq;
+
+       return 0;
+
+err_irq:
+       clk_disable_unprepare(sfc->clk);
+err_clk:
+       clk_disable_unprepare(sfc->hclk);
+err_hclk:
+       return ret;
+}
+
+static int rockchip_sfc_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
+
+       spi_unregister_master(master);
+
+       clk_disable_unprepare(sfc->clk);
+       clk_disable_unprepare(sfc->hclk);
+
+       return 0;
+}
+
+static const struct of_device_id rockchip_sfc_dt_ids[] = {
+       { .compatible = "rockchip,sfc"},
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rockchip_sfc_dt_ids);
+
+static struct platform_driver rockchip_sfc_driver = {
+       .driver = {
+               .name   = "rockchip-sfc",
+               .of_match_table = rockchip_sfc_dt_ids,
+       },
+       .probe  = rockchip_sfc_probe,
+       .remove = rockchip_sfc_remove,
+};
+module_platform_driver(rockchip_sfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Rockchip Serial Flash Controller Driver");
+MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_AUTHOR("Jon Lin <Jon.lin@rock-chips.com>");
index ab19068..1edbf44 100644 (file)
 
 /*
  * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
- * The slave devices address offset is always 0x8000 and size is 4K.
+ * ADI supports 12/14bit address for r2p0, and additional 17bit for r3p0 or
+ * later versions. Since bit[1:0] are zero, so the spec describe them as
+ * 10/12/15bit address mode.
+ * The 10bit mode supports sigle slave, 12/15bit mode supports 3 slave, the
+ * high two bits is slave_id.
+ * The slave devices address offset is 0x8000 for 10/12bit address mode,
+ * and 0x20000 for 15bit mode.
  */
-#define ADI_SLAVE_ADDR_SIZE            SZ_4K
-#define ADI_SLAVE_OFFSET               0x8000
+#define ADI_10BIT_SLAVE_ADDR_SIZE      SZ_4K
+#define ADI_10BIT_SLAVE_OFFSET         0x8000
+#define ADI_12BIT_SLAVE_ADDR_SIZE      SZ_16K
+#define ADI_12BIT_SLAVE_OFFSET         0x8000
+#define ADI_15BIT_SLAVE_ADDR_SIZE      SZ_128K
+#define ADI_15BIT_SLAVE_OFFSET         0x20000
 
 /* Timeout (ms) for the trylock of hardware spinlocks */
 #define ADI_HWSPINLOCK_TIMEOUT         5000
 
 #define ADI_FIFO_DRAIN_TIMEOUT         1000
 #define ADI_READ_TIMEOUT               2000
-#define REG_ADDR_LOW_MASK              GENMASK(11, 0)
+
+/*
+ * Read back address from REG_ADI_RD_DATA bit[30:16] which maps to:
+ * REG_ADI_RD_CMD bit[14:0] for r2p0
+ * REG_ADI_RD_CMD bit[16:2] for r3p0
+ */
+#define RDBACK_ADDR_MASK_R2            GENMASK(14, 0)
+#define RDBACK_ADDR_MASK_R3            GENMASK(16, 2)
+#define RDBACK_ADDR_SHIFT_R3           2
 
 /* Registers definitions for PMIC watchdog controller */
-#define REG_WDG_LOAD_LOW               0x80
-#define REG_WDG_LOAD_HIGH              0x84
-#define REG_WDG_CTRL                   0x88
-#define REG_WDG_LOCK                   0xa0
+#define REG_WDG_LOAD_LOW               0x0
+#define REG_WDG_LOAD_HIGH              0x4
+#define REG_WDG_CTRL                   0x8
+#define REG_WDG_LOCK                   0x20
 
 /* Bits definitions for register REG_WDG_CTRL */
 #define BIT_WDG_RUN                    BIT(1)
 #define BIT_WDG_NEW                    BIT(2)
 #define BIT_WDG_RST                    BIT(3)
 
+/* Bits definitions for register REG_MODULE_EN */
+#define BIT_WDG_EN                     BIT(2)
+
 /* Registers definitions for PMIC */
 #define PMIC_RST_STATUS                        0xee8
 #define PMIC_MODULE_EN                 0xc08
 #define PMIC_CLK_EN                    0xc18
-#define BIT_WDG_EN                     BIT(2)
+#define PMIC_WDG_BASE                  0x80
 
 /* Definition of PMIC reset status register */
 #define HWRST_STATUS_SECURITY          0x02
 #define HWRST_STATUS_WATCHDOG          0xf0
 
 /* Use default timeout 50 ms that converts to watchdog values */
-#define WDG_LOAD_VAL                   ((50 * 1000) / 32768)
+#define WDG_LOAD_VAL                   ((50 * 32768) / 1000)
 #define WDG_LOAD_MASK                  GENMASK(15, 0)
 #define WDG_UNLOCK_KEY                 0xe551
 
+struct sprd_adi_wdg {
+       u32 base;
+       u32 rst_sts;
+       u32 wdg_en;
+       u32 wdg_clk;
+};
+
+struct sprd_adi_data {
+       u32 slave_offset;
+       u32 slave_addr_size;
+       int (*read_check)(u32 val, u32 reg);
+       int (*restart)(struct notifier_block *this,
+                      unsigned long mode, void *cmd);
+       void (*wdg_rst)(void *p);
+};
+
 struct sprd_adi {
        struct spi_controller   *ctlr;
        struct device           *dev;
@@ -115,26 +152,21 @@ struct sprd_adi {
        unsigned long           slave_vbase;
        unsigned long           slave_pbase;
        struct notifier_block   restart_handler;
+       const struct sprd_adi_data *data;
 };
 
-static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
+static int sprd_adi_check_addr(struct sprd_adi *sadi, u32 reg)
 {
-       if (paddr < sadi->slave_pbase || paddr >
-           (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
+       if (reg >= sadi->data->slave_addr_size) {
                dev_err(sadi->dev,
-                       "slave physical address is incorrect, addr = 0x%x\n",
-                       paddr);
+                       "slave address offset is incorrect, reg = 0x%x\n",
+                       reg);
                return -EINVAL;
        }
 
        return 0;
 }
 
-static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
-{
-       return (paddr - sadi->slave_pbase + sadi->slave_vbase);
-}
-
 static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
 {
        u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
@@ -161,11 +193,35 @@ static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
        return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
 }
 
-static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
+static int sprd_adi_read_check(u32 val, u32 addr)
+{
+       u32 rd_addr;
+
+       rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
+
+       if (rd_addr != addr) {
+               pr_err("ADI read error, addr = 0x%x, val = 0x%x\n", addr, val);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int sprd_adi_read_check_r2(u32 val, u32 reg)
+{
+       return sprd_adi_read_check(val, reg & RDBACK_ADDR_MASK_R2);
+}
+
+static int sprd_adi_read_check_r3(u32 val, u32 reg)
+{
+       return sprd_adi_read_check(val, (reg & RDBACK_ADDR_MASK_R3) >> RDBACK_ADDR_SHIFT_R3);
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg, u32 *read_val)
 {
        int read_timeout = ADI_READ_TIMEOUT;
        unsigned long flags;
-       u32 val, rd_addr;
+       u32 val;
        int ret = 0;
 
        if (sadi->hwlock) {
@@ -178,11 +234,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
                }
        }
 
+       ret = sprd_adi_check_addr(sadi, reg);
+       if (ret)
+               goto out;
+
        /*
-        * Set the physical register address need to read into RD_CMD register,
+        * Set the slave address offset need to read into RD_CMD register,
         * then ADI controller will start to transfer automatically.
         */
-       writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
+       writel_relaxed(reg, sadi->base + REG_ADI_RD_CMD);
 
        /*
         * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
@@ -205,18 +265,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
        }
 
        /*
-        * The return value includes data and read register address, from bit 0
-        * to bit 15 are data, and from bit 16 to bit 30 are read register
-        * address. Then we can check the returned register address to validate
-        * data.
+        * The return value before adi r5p0 includes data and read register
+        * address, from bit 0to bit 15 are data, and from bit 16 to bit 30
+        * are read register address. Then we can check the returned register
+        * address to validate data.
         */
-       rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
-
-       if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
-               dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
-                       reg_paddr, val);
-               ret = -EIO;
-               goto out;
+       if (sadi->data->read_check) {
+               ret = sadi->data->read_check(val, reg);
+               if (ret < 0)
+                       goto out;
        }
 
        *read_val = val & RD_VALUE_MASK;
@@ -227,9 +284,8 @@ out:
        return ret;
 }
 
-static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
+static int sprd_adi_write(struct sprd_adi *sadi, u32 reg, u32 val)
 {
-       unsigned long reg = sprd_adi_to_vaddr(sadi, reg_paddr);
        u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
        unsigned long flags;
        int ret;
@@ -244,6 +300,10 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
                }
        }
 
+       ret = sprd_adi_check_addr(sadi, reg);
+       if (ret)
+               goto out;
+
        ret = sprd_adi_drain_fifo(sadi);
        if (ret < 0)
                goto out;
@@ -254,7 +314,8 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
         */
        do {
                if (!sprd_adi_fifo_is_full(sadi)) {
-                       writel_relaxed(val, (void __iomem *)reg);
+                       /* we need virtual register address to write. */
+                       writel_relaxed(val, (void __iomem *)(sadi->slave_vbase + reg));
                        break;
                }
 
@@ -277,60 +338,41 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
                                 struct spi_transfer *t)
 {
        struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
-       u32 phy_reg, val;
+       u32 reg, val;
        int ret;
 
        if (t->rx_buf) {
-               phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
-
-               ret = sprd_adi_check_paddr(sadi, phy_reg);
-               if (ret)
-                       return ret;
-
-               ret = sprd_adi_read(sadi, phy_reg, &val);
-               if (ret)
-                       return ret;
-
+               reg = *(u32 *)t->rx_buf;
+               ret = sprd_adi_read(sadi, reg, &val);
                *(u32 *)t->rx_buf = val;
        } else if (t->tx_buf) {
                u32 *p = (u32 *)t->tx_buf;
-
-               /*
-                * Get the physical register address need to write and convert
-                * the physical address to virtual address. Since we need
-                * virtual register address to write.
-                */
-               phy_reg = *p++ + sadi->slave_pbase;
-               ret = sprd_adi_check_paddr(sadi, phy_reg);
-               if (ret)
-                       return ret;
-
+               reg = *p++;
                val = *p;
-               ret = sprd_adi_write(sadi, phy_reg, val);
-               if (ret)
-                       return ret;
+               ret = sprd_adi_write(sadi, reg, val);
        } else {
                dev_err(sadi->dev, "no buffer for transfer\n");
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       return 0;
+       return ret;
 }
 
-static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
+static void sprd_adi_set_wdt_rst_mode(void *p)
 {
 #if IS_ENABLED(CONFIG_SPRD_WATCHDOG)
        u32 val;
+       struct sprd_adi *sadi = (struct sprd_adi *)p;
 
-       /* Set default watchdog reboot mode */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       /* Init watchdog reset mode */
+       sprd_adi_read(sadi, PMIC_RST_STATUS, &val);
        val |= HWRST_STATUS_WATCHDOG;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+       sprd_adi_write(sadi, PMIC_RST_STATUS, val);
 #endif
 }
 
-static int sprd_adi_restart_handler(struct notifier_block *this,
-                                   unsigned long mode, void *cmd)
+static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
+                                 void *cmd, struct sprd_adi_wdg *wdg)
 {
        struct sprd_adi *sadi = container_of(this, struct sprd_adi,
                                             restart_handler);
@@ -366,40 +408,40 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
                reboot_mode = HWRST_STATUS_NORMAL;
 
        /* Record the reboot mode */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       sprd_adi_read(sadi, wdg->rst_sts, &val);
        val &= ~HWRST_STATUS_WATCHDOG;
        val |= reboot_mode;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+       sprd_adi_write(sadi, wdg->rst_sts, val);
 
        /* Enable the interface clock of the watchdog */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_MODULE_EN, &val);
+       sprd_adi_read(sadi, wdg->wdg_en, &val);
        val |= BIT_WDG_EN;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_MODULE_EN, val);
+       sprd_adi_write(sadi, wdg->wdg_en, val);
 
        /* Enable the work clock of the watchdog */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_CLK_EN, &val);
+       sprd_adi_read(sadi, wdg->wdg_clk, &val);
        val |= BIT_WDG_EN;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_CLK_EN, val);
+       sprd_adi_write(sadi, wdg->wdg_clk, val);
 
        /* Unlock the watchdog */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, WDG_UNLOCK_KEY);
 
-       sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+       sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
        val |= BIT_WDG_NEW;
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
 
        /* Load the watchdog timeout value, 50ms is always enough. */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_HIGH, 0);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_LOW,
                       WDG_LOAD_VAL & WDG_LOAD_MASK);
 
        /* Start the watchdog to reset system */
-       sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+       sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
        val |= BIT_WDG_RUN | BIT_WDG_RST;
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
 
        /* Lock the watchdog */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
 
        mdelay(1000);
 
@@ -407,6 +449,19 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
+static int sprd_adi_restart_sc9860(struct notifier_block *this,
+                                          unsigned long mode, void *cmd)
+{
+       struct sprd_adi_wdg wdg = {
+               .base = PMIC_WDG_BASE,
+               .rst_sts = PMIC_RST_STATUS,
+               .wdg_en = PMIC_MODULE_EN,
+               .wdg_clk = PMIC_CLK_EN,
+       };
+
+       return sprd_adi_restart(this, mode, cmd, &wdg);
+}
+
 static void sprd_adi_hw_init(struct sprd_adi *sadi)
 {
        struct device_node *np = sadi->dev->of_node;
@@ -458,10 +513,11 @@ static void sprd_adi_hw_init(struct sprd_adi *sadi)
 static int sprd_adi_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
+       const struct sprd_adi_data *data;
        struct spi_controller *ctlr;
        struct sprd_adi *sadi;
        struct resource *res;
-       u32 num_chipselect;
+       u16 num_chipselect;
        int ret;
 
        if (!np) {
@@ -469,6 +525,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "no matching driver data found\n");
+               return -EINVAL;
+       }
+
        pdev->id = of_alias_get_id(np, "spi");
        num_chipselect = of_get_child_count(np);
 
@@ -486,10 +548,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
                goto put_ctlr;
        }
 
-       sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
-       sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
+       sadi->slave_vbase = (unsigned long)sadi->base +
+                           data->slave_offset;
+       sadi->slave_pbase = res->start + data->slave_offset;
        sadi->ctlr = ctlr;
        sadi->dev = &pdev->dev;
+       sadi->data = data;
        ret = of_hwspin_lock_get_id(np, 0);
        if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
                sadi->hwlock =
@@ -510,7 +574,9 @@ static int sprd_adi_probe(struct platform_device *pdev)
        }
 
        sprd_adi_hw_init(sadi);
-       sprd_adi_set_wdt_rst_mode(sadi);
+
+       if (sadi->data->wdg_rst)
+               sadi->data->wdg_rst(sadi);
 
        ctlr->dev.of_node = pdev->dev.of_node;
        ctlr->bus_num = pdev->id;
@@ -525,12 +591,14 @@ static int sprd_adi_probe(struct platform_device *pdev)
                goto put_ctlr;
        }
 
-       sadi->restart_handler.notifier_call = sprd_adi_restart_handler;
-       sadi->restart_handler.priority = 128;
-       ret = register_restart_handler(&sadi->restart_handler);
-       if (ret) {
-               dev_err(&pdev->dev, "can not register restart handler\n");
-               goto put_ctlr;
+       if (sadi->data->restart) {
+               sadi->restart_handler.notifier_call = sadi->data->restart;
+               sadi->restart_handler.priority = 128;
+               ret = register_restart_handler(&sadi->restart_handler);
+               if (ret) {
+                       dev_err(&pdev->dev, "can not register restart handler\n");
+                       goto put_ctlr;
+               }
        }
 
        return 0;
@@ -549,9 +617,38 @@ static int sprd_adi_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct sprd_adi_data sc9860_data = {
+       .slave_offset = ADI_10BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r2,
+       .restart = sprd_adi_restart_sc9860,
+       .wdg_rst = sprd_adi_set_wdt_rst_mode,
+};
+
+static struct sprd_adi_data sc9863_data = {
+       .slave_offset = ADI_12BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_12BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r3,
+};
+
+static struct sprd_adi_data ums512_data = {
+       .slave_offset = ADI_15BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_15BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r3,
+};
+
 static const struct of_device_id sprd_adi_of_match[] = {
        {
                .compatible = "sprd,sc9860-adi",
+               .data = &sc9860_data,
+       },
+       {
+               .compatible = "sprd,sc9863-adi",
+               .data = &sc9863_data,
+       },
+       {
+               .compatible = "sprd,ums512-adi",
+               .data = &ums512_data,
        },
        { },
 };
index 05618a6..9bd3fd1 100644 (file)
 #define SPI_3WIRE_TX           3
 #define SPI_3WIRE_RX           4
 
+#define STM32_SPI_AUTOSUSPEND_DELAY            1       /* 1 ms */
+
 /*
  * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
  * without fifo buffers.
@@ -568,29 +570,30 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
 /**
  * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
  * @spi: pointer to the spi controller data structure
- * @flush: boolean indicating that FIFO should be flushed
  *
  * Write in rx_buf depends on remaining bytes to avoid to write beyond
  * rx_buf end.
  */
-static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
+static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi)
 {
        u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
        u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
 
        while ((spi->rx_len > 0) &&
               ((sr & STM32H7_SPI_SR_RXP) ||
-               (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
+               ((sr & STM32H7_SPI_SR_EOT) &&
+                ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
                u32 offs = spi->cur_xferlen - spi->rx_len;
 
                if ((spi->rx_len >= sizeof(u32)) ||
-                   (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
+                   (sr & STM32H7_SPI_SR_RXWNE)) {
                        u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
 
                        *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
                        spi->rx_len -= sizeof(u32);
                } else if ((spi->rx_len >= sizeof(u16)) ||
-                          (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
+                          (!(sr & STM32H7_SPI_SR_RXWNE) &&
+                           (rxplvl >= 2 || spi->cur_bpw > 8))) {
                        u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
 
                        *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
@@ -606,8 +609,8 @@ static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
                rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
        }
 
-       dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
-               flush ? "(flush)" : "", spi->rx_len);
+       dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
+               __func__, spi->rx_len, sr);
 }
 
 /**
@@ -674,18 +677,12 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
  * stm32h7_spi_disable - Disable SPI controller
  * @spi: pointer to the spi controller data structure
  *
- * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
- * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
- * RX-Fifo.
- * Normally, if TSIZE has been configured, we should relax the hardware at the
- * reception of the EOT interrupt. But in case of error, EOT will not be
- * raised. So the subsystem unprepare_message call allows us to properly
- * complete the transfer from an hardware point of view.
+ * RX-Fifo is flushed when SPI controller is disabled.
  */
 static void stm32h7_spi_disable(struct stm32_spi *spi)
 {
        unsigned long flags;
-       u32 cr1, sr;
+       u32 cr1;
 
        dev_dbg(spi->dev, "disable controller\n");
 
@@ -698,25 +695,6 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
                return;
        }
 
-       /* Wait on EOT or suspend the flow */
-       if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
-                                             sr, !(sr & STM32H7_SPI_SR_EOT),
-                                             10, 100000) < 0) {
-               if (cr1 & STM32H7_SPI_CR1_CSTART) {
-                       writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
-                                      spi->base + STM32H7_SPI_CR1);
-                       if (readl_relaxed_poll_timeout_atomic(
-                                               spi->base + STM32H7_SPI_SR,
-                                               sr, !(sr & STM32H7_SPI_SR_SUSP),
-                                               10, 100000) < 0)
-                               dev_warn(spi->dev,
-                                        "Suspend request timeout\n");
-               }
-       }
-
-       if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
-               stm32h7_spi_read_rxfifo(spi, true);
-
        if (spi->cur_usedma && spi->dma_tx)
                dmaengine_terminate_all(spi->dma_tx);
        if (spi->cur_usedma && spi->dma_rx)
@@ -911,7 +889,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
                if (__ratelimit(&rs))
                        dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, false);
+                       stm32h7_spi_read_rxfifo(spi);
                /*
                 * If communication is suspended while using DMA, it means
                 * that something went wrong, so stop the current transfer
@@ -932,8 +910,10 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
 
        if (sr & STM32H7_SPI_SR_EOT) {
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, true);
-               end = true;
+                       stm32h7_spi_read_rxfifo(spi);
+               if (!spi->cur_usedma ||
+                   (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX))
+                       end = true;
        }
 
        if (sr & STM32H7_SPI_SR_TXP)
@@ -942,7 +922,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
 
        if (sr & STM32H7_SPI_SR_RXP)
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, false);
+                       stm32h7_spi_read_rxfifo(spi);
 
        writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
 
@@ -1041,42 +1021,17 @@ static void stm32f4_spi_dma_tx_cb(void *data)
 }
 
 /**
- * stm32f4_spi_dma_rx_cb - dma callback
+ * stm32_spi_dma_rx_cb - dma callback
  * @data: pointer to the spi controller data structure
  *
  * DMA callback is called when the transfer is complete for DMA RX channel.
  */
-static void stm32f4_spi_dma_rx_cb(void *data)
+static void stm32_spi_dma_rx_cb(void *data)
 {
        struct stm32_spi *spi = data;
 
        spi_finalize_current_transfer(spi->master);
-       stm32f4_spi_disable(spi);
-}
-
-/**
- * stm32h7_spi_dma_cb - dma callback
- * @data: pointer to the spi controller data structure
- *
- * DMA callback is called when the transfer is complete or when an error
- * occurs. If the transfer is complete, EOT flag is raised.
- */
-static void stm32h7_spi_dma_cb(void *data)
-{
-       struct stm32_spi *spi = data;
-       unsigned long flags;
-       u32 sr;
-
-       spin_lock_irqsave(&spi->lock, flags);
-
-       sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
-
-       spin_unlock_irqrestore(&spi->lock, flags);
-
-       if (!(sr & STM32H7_SPI_SR_EOT))
-               dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
-
-       /* Now wait for EOT, or SUSP or OVR in case of error */
+       spi->cfg->disable(spi);
 }
 
 /**
@@ -1242,11 +1197,13 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
  */
 static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
 {
-       /* Enable the interrupts relative to the end of transfer */
-       stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
-                                                STM32H7_SPI_IER_TXTFIE |
-                                                STM32H7_SPI_IER_OVRIE |
-                                                STM32H7_SPI_IER_MODFIE);
+       uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
+
+       /* Enable the interrupts */
+       if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)
+               ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE;
+
+       stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
 
        stm32_spi_enable(spi);
 
@@ -1645,10 +1602,6 @@ static int stm32_spi_transfer_one(struct spi_master *master,
        struct stm32_spi *spi = spi_master_get_devdata(master);
        int ret;
 
-       /* Don't do anything on 0 bytes transfers */
-       if (transfer->len == 0)
-               return 0;
-
        spi->tx_buf = transfer->tx_buf;
        spi->rx_buf = transfer->rx_buf;
        spi->tx_len = spi->tx_buf ? transfer->len : 0;
@@ -1762,7 +1715,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
        .set_mode = stm32f4_spi_set_mode,
        .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
        .dma_tx_cb = stm32f4_spi_dma_tx_cb,
-       .dma_rx_cb = stm32f4_spi_dma_rx_cb,
+       .dma_rx_cb = stm32_spi_dma_rx_cb,
        .transfer_one_irq = stm32f4_spi_transfer_one_irq,
        .irq_handler_event = stm32f4_spi_irq_event,
        .irq_handler_thread = stm32f4_spi_irq_thread,
@@ -1782,8 +1735,11 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
        .set_data_idleness = stm32h7_spi_data_idleness,
        .set_number_of_data = stm32h7_spi_number_of_data,
        .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
-       .dma_rx_cb = stm32h7_spi_dma_cb,
-       .dma_tx_cb = stm32h7_spi_dma_cb,
+       .dma_rx_cb = stm32_spi_dma_rx_cb,
+       /*
+        * dma_tx_cb is not necessary since in case of TX, dma is followed by
+        * SPI access hence handling is performed within the SPI interrupt
+        */
        .transfer_one_irq = stm32h7_spi_transfer_one_irq,
        .irq_handler_thread = stm32h7_spi_irq_thread,
        .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
@@ -1927,6 +1883,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
        if (spi->dma_tx || spi->dma_rx)
                master->can_dma = stm32_spi_can_dma;
 
+       pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        STM32_SPI_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
@@ -1938,6 +1897,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
                goto err_pm_disable;
        }
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
+
        dev_info(&pdev->dev, "driver initialized\n");
 
        return 0;
@@ -1946,6 +1908,7 @@ err_pm_disable:
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
 err_dma_release:
        if (spi->dma_tx)
                dma_release_channel(spi->dma_tx);
@@ -1970,6 +1933,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
+
        if (master->dma_tx)
                dma_release_channel(master->dma_tx);
        if (master->dma_rx)
index 5131141..e9de1d9 100644 (file)
@@ -717,12 +717,12 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
        dma_release_channel(dma_chan);
 }
 
-static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
-                                     struct spi_delay *setup,
-                                     struct spi_delay *hold,
-                                     struct spi_delay *inactive)
+static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
 {
        struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+       struct spi_delay *setup = &spi->cs_setup;
+       struct spi_delay *hold = &spi->cs_hold;
+       struct spi_delay *inactive = &spi->cs_inactive;
        u8 setup_dly, hold_dly, inactive_dly;
        u32 setup_hold;
        u32 spi_cs_timing;
index 6a726c9..ebd27f8 100644 (file)
@@ -1061,33 +1061,12 @@ static int tegra_slink_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Can not get clock %d\n", ret);
                goto exit_free_master;
        }
-       ret = clk_prepare(tspi->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
-               goto exit_free_master;
-       }
-       ret = clk_enable(tspi->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
-               goto exit_clk_unprepare;
-       }
-
-       spi_irq = platform_get_irq(pdev, 0);
-       tspi->irq = spi_irq;
-       ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
-                       tegra_slink_isr_thread, IRQF_ONESHOT,
-                       dev_name(&pdev->dev), tspi);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
-                                       tspi->irq);
-               goto exit_clk_disable;
-       }
 
        tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
        if (IS_ERR(tspi->rst)) {
                dev_err(&pdev->dev, "can not get reset\n");
                ret = PTR_ERR(tspi->rst);
-               goto exit_free_irq;
+               goto exit_free_master;
        }
 
        tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
@@ -1095,7 +1074,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
 
        ret = tegra_slink_init_dma_param(tspi, true);
        if (ret < 0)
-               goto exit_free_irq;
+               goto exit_free_master;
        ret = tegra_slink_init_dma_param(tspi, false);
        if (ret < 0)
                goto exit_rx_dma_free;
@@ -1106,16 +1085,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
        init_completion(&tspi->xfer_completion);
 
        pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev)) {
-               ret = tegra_slink_runtime_resume(&pdev->dev);
-               if (ret)
-                       goto exit_pm_disable;
-       }
-
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0) {
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret) {
                dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
-               pm_runtime_put_noidle(&pdev->dev);
                goto exit_pm_disable;
        }
 
@@ -1123,33 +1095,43 @@ static int tegra_slink_probe(struct platform_device *pdev)
        udelay(2);
        reset_control_deassert(tspi->rst);
 
+       spi_irq = platform_get_irq(pdev, 0);
+       tspi->irq = spi_irq;
+       ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+                                  tegra_slink_isr_thread, IRQF_ONESHOT,
+                                  dev_name(&pdev->dev), tspi);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+                       tspi->irq);
+               goto exit_pm_put;
+       }
+
        tspi->def_command_reg  = SLINK_M_S;
        tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
        tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
        tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
-       pm_runtime_put(&pdev->dev);
 
        master->dev.of_node = pdev->dev.of_node;
-       ret = devm_spi_register_master(&pdev->dev, master);
+       ret = spi_register_master(master);
        if (ret < 0) {
                dev_err(&pdev->dev, "can not register to master err %d\n", ret);
-               goto exit_pm_disable;
+               goto exit_free_irq;
        }
+
+       pm_runtime_put(&pdev->dev);
+
        return ret;
 
+exit_free_irq:
+       free_irq(spi_irq, tspi);
+exit_pm_put:
+       pm_runtime_put(&pdev->dev);
 exit_pm_disable:
        pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               tegra_slink_runtime_suspend(&pdev->dev);
+
        tegra_slink_deinit_dma_param(tspi, false);
 exit_rx_dma_free:
        tegra_slink_deinit_dma_param(tspi, true);
-exit_free_irq:
-       free_irq(spi_irq, tspi);
-exit_clk_disable:
-       clk_disable(tspi->clk);
-exit_clk_unprepare:
-       clk_unprepare(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1160,10 +1142,11 @@ static int tegra_slink_remove(struct platform_device *pdev)
        struct spi_master *master = platform_get_drvdata(pdev);
        struct tegra_slink_data *tspi = spi_master_get_devdata(master);
 
+       spi_unregister_master(master);
+
        free_irq(tspi->irq, tspi);
 
-       clk_disable(tspi->clk);
-       clk_unprepare(tspi->clk);
+       pm_runtime_disable(&pdev->dev);
 
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
@@ -1171,10 +1154,6 @@ static int tegra_slink_remove(struct platform_device *pdev)
        if (tspi->rx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, true);
 
-       pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               tegra_slink_runtime_suspend(&pdev->dev);
-
        return 0;
 }
 
index 9262c64..cfa222c 100644 (file)
@@ -545,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
@@ -563,7 +563,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
@@ -579,7 +579,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
 
@@ -603,7 +603,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
index c991811..65d14af 100644 (file)
@@ -58,6 +58,10 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
        const struct spi_device *spi = to_spi_device(dev);
        int len;
 
+       len = of_device_modalias(dev, buf, PAGE_SIZE);
+       if (len != -ENODEV)
+               return len;
+
        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
        if (len != -ENODEV)
                return len;
@@ -842,9 +846,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
            !spi->controller->set_cs_timing) {
                if (activate)
-                       spi_delay_exec(&spi->controller->cs_setup, NULL);
+                       spi_delay_exec(&spi->cs_setup, NULL);
                else
-                       spi_delay_exec(&spi->controller->cs_hold, NULL);
+                       spi_delay_exec(&spi->cs_hold, NULL);
        }
 
        if (spi->mode & SPI_CS_HIGH)
@@ -887,7 +891,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
            !spi->controller->set_cs_timing) {
                if (!activate)
-                       spi_delay_exec(&spi->controller->cs_inactive, NULL);
+                       spi_delay_exec(&spi->cs_inactive, NULL);
        }
 }
 
index 6f5fe50..c8a6256 100644 (file)
@@ -1904,8 +1904,8 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
        dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n",
                atomisp_subdev_source_pad(vdev), asd->index);
 
-       BUG_ON(!rt_mutex_is_locked(&isp->mutex));
-       BUG_ON(!mutex_is_locked(&isp->streamoff_mutex));
+       lockdep_assert_held(&isp->mutex);
+       lockdep_assert_held(&isp->streamoff_mutex);
 
        if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
                dev_dbg(isp->dev, "unsupported v4l2 buf type\n");
diff --git a/drivers/staging/media/av7110/audio.h b/drivers/staging/media/av7110/audio.h
deleted file mode 100644 (file)
index 2f869da..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
-/*
- * audio.h - DEPRECATED MPEG-TS audio decoder API
- *
- * NOTE: should not be used on future drivers
- *
- * Copyright (C) 2000 Ralph  Metzler <ralph@convergence.de>
- *                  & Marcus Metzler <marcus@convergence.de>
- *                    for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Lesser Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- */
-
-#ifndef _DVBAUDIO_H_
-#define _DVBAUDIO_H_
-
-#include <linux/types.h>
-
-typedef enum {
-       AUDIO_SOURCE_DEMUX, /* Select the demux as the main source */
-       AUDIO_SOURCE_MEMORY /* Select internal memory as the main source */
-} audio_stream_source_t;
-
-
-typedef enum {
-       AUDIO_STOPPED,      /* Device is stopped */
-       AUDIO_PLAYING,      /* Device is currently playing */
-       AUDIO_PAUSED        /* Device is paused */
-} audio_play_state_t;
-
-
-typedef enum {
-       AUDIO_STEREO,
-       AUDIO_MONO_LEFT,
-       AUDIO_MONO_RIGHT,
-       AUDIO_MONO,
-       AUDIO_STEREO_SWAPPED
-} audio_channel_select_t;
-
-
-typedef struct audio_mixer {
-       unsigned int volume_left;
-       unsigned int volume_right;
-  /* what else do we need? bass, pass-through, ... */
-} audio_mixer_t;
-
-
-typedef struct audio_status {
-       int                    AV_sync_state;  /* sync audio and video? */
-       int                    mute_state;     /* audio is muted */
-       audio_play_state_t     play_state;     /* current playback state */
-       audio_stream_source_t  stream_source;  /* current stream source */
-       audio_channel_select_t channel_select; /* currently selected channel */
-       int                    bypass_mode;    /* pass on audio data to */
-       audio_mixer_t          mixer_state;    /* current mixer state */
-} audio_status_t;                              /* separate decoder hardware */
-
-
-/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
-#define AUDIO_CAP_DTS    1
-#define AUDIO_CAP_LPCM   2
-#define AUDIO_CAP_MP1    4
-#define AUDIO_CAP_MP2    8
-#define AUDIO_CAP_MP3   16
-#define AUDIO_CAP_AAC   32
-#define AUDIO_CAP_OGG   64
-#define AUDIO_CAP_SDDS 128
-#define AUDIO_CAP_AC3  256
-
-#define AUDIO_STOP                 _IO('o', 1)
-#define AUDIO_PLAY                 _IO('o', 2)
-#define AUDIO_PAUSE                _IO('o', 3)
-#define AUDIO_CONTINUE             _IO('o', 4)
-#define AUDIO_SELECT_SOURCE        _IO('o', 5)
-#define AUDIO_SET_MUTE             _IO('o', 6)
-#define AUDIO_SET_AV_SYNC          _IO('o', 7)
-#define AUDIO_SET_BYPASS_MODE      _IO('o', 8)
-#define AUDIO_CHANNEL_SELECT       _IO('o', 9)
-#define AUDIO_GET_STATUS           _IOR('o', 10, audio_status_t)
-
-#define AUDIO_GET_CAPABILITIES     _IOR('o', 11, unsigned int)
-#define AUDIO_CLEAR_BUFFER         _IO('o',  12)
-#define AUDIO_SET_ID               _IO('o', 13)
-#define AUDIO_SET_MIXER            _IOW('o', 14, audio_mixer_t)
-#define AUDIO_SET_STREAMTYPE       _IO('o', 15)
-#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
-
-#endif /* _DVBAUDIO_H_ */
index b8e8fc8..809d938 100644 (file)
@@ -9,12 +9,11 @@
 #include <linux/input.h>
 #include <linux/time.h>
 
-#include "video.h"
-#include "audio.h"
-#include "osd.h"
-
+#include <linux/dvb/video.h>
+#include <linux/dvb/audio.h>
 #include <linux/dvb/dmx.h>
 #include <linux/dvb/ca.h>
+#include <linux/dvb/osd.h>
 #include <linux/dvb/net.h>
 #include <linux/mutex.h>
 
diff --git a/drivers/staging/media/av7110/osd.h b/drivers/staging/media/av7110/osd.h
deleted file mode 100644 (file)
index 858997c..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
-/*
- * osd.h - DEPRECATED On Screen Display API
- *
- * NOTE: should not be used on future drivers
- *
- * Copyright (C) 2001 Ralph  Metzler <ralph@convergence.de>
- *                  & Marcus Metzler <marcus@convergence.de>
- *                    for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Lesser Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- */
-
-#ifndef _DVBOSD_H_
-#define _DVBOSD_H_
-
-#include <linux/compiler.h>
-
-typedef enum {
-       /* All functions return -2 on "not open" */
-       OSD_Close = 1,  /* () */
-       /*
-        * Disables OSD and releases the buffers
-        * returns 0 on success
-        */
-       OSD_Open,       /* (x0,y0,x1,y1,BitPerPixel[2/4/8](color&0x0F),mix[0..15](color&0xF0)) */
-       /*
-        * Opens OSD with this size and bit depth
-        * returns 0 on success, -1 on DRAM allocation error, -2 on "already open"
-        */
-       OSD_Show,       /* () */
-       /*
-        * enables OSD mode
-        * returns 0 on success
-        */
-       OSD_Hide,       /* () */
-       /*
-        * disables OSD mode
-        * returns 0 on success
-        */
-       OSD_Clear,      /* () */
-       /*
-        * Sets all pixel to color 0
-        * returns 0 on success
-        */
-       OSD_Fill,       /* (color) */
-       /*
-        * Sets all pixel to color <col>
-        * returns 0 on success
-        */
-       OSD_SetColor,   /* (color,R{x0},G{y0},B{x1},opacity{y1}) */
-       /*
-        * set palette entry <num> to <r,g,b>, <mix> and <trans> apply
-        * R,G,B: 0..255
-        * R=Red, G=Green, B=Blue
-        * opacity=0:      pixel opacity 0% (only video pixel shows)
-        * opacity=1..254: pixel opacity as specified in header
-        * opacity=255:    pixel opacity 100% (only OSD pixel shows)
-        * returns 0 on success, -1 on error
-        */
-       OSD_SetPalette, /* (firstcolor{color},lastcolor{x0},data) */
-       /*
-        * Set a number of entries in the palette
-        * sets the entries "firstcolor" through "lastcolor" from the array "data"
-        * data has 4 byte for each color:
-        * R,G,B, and a opacity value: 0->transparent, 1..254->mix, 255->pixel
-        */
-       OSD_SetTrans,   /* (transparency{color}) */
-       /*
-        * Sets transparency of mixed pixel (0..15)
-        * returns 0 on success
-        */
-       OSD_SetPixel,   /* (x0,y0,color) */
-       /*
-        * sets pixel <x>,<y> to color number <col>
-        * returns 0 on success, -1 on error
-        */
-       OSD_GetPixel,   /* (x0,y0) */
-       /* returns color number of pixel <x>,<y>,  or -1 */
-       OSD_SetRow,     /* (x0,y0,x1,data) */
-       /*
-        * fills pixels x0,y through  x1,y with the content of data[]
-        * returns 0 on success, -1 on clipping all pixel (no pixel drawn)
-        */
-       OSD_SetBlock,   /* (x0,y0,x1,y1,increment{color},data) */
-       /*
-        * fills pixels x0,y0 through  x1,y1 with the content of data[]
-        * inc contains the width of one line in the data block,
-        * inc<=0 uses blockwidth as linewidth
-        * returns 0 on success, -1 on clipping all pixel
-        */
-       OSD_FillRow,    /* (x0,y0,x1,color) */
-       /*
-        * fills pixels x0,y through  x1,y with the color <col>
-        * returns 0 on success, -1 on clipping all pixel
-        */
-       OSD_FillBlock,  /* (x0,y0,x1,y1,color) */
-       /*
-        * fills pixels x0,y0 through  x1,y1 with the color <col>
-        * returns 0 on success, -1 on clipping all pixel
-        */
-       OSD_Line,       /* (x0,y0,x1,y1,color) */
-       /*
-        * draw a line from x0,y0 to x1,y1 with the color <col>
-        * returns 0 on success
-        */
-       OSD_Query,      /* (x0,y0,x1,y1,xasp{color}}), yasp=11 */
-       /*
-        * fills parameters with the picture dimensions and the pixel aspect ratio
-        * returns 0 on success
-        */
-       OSD_Test,       /* () */
-       /*
-        * draws a test picture. for debugging purposes only
-        * returns 0 on success
-        * TODO: remove "test" in final version
-        */
-       OSD_Text,       /* (x0,y0,size,color,text) */
-       OSD_SetWindow,  /* (x0) set window with number 0<x0<8 as current */
-       OSD_MoveWindow, /* move current window to (x0, y0) */
-       OSD_OpenRaw,    /* Open other types of OSD windows */
-} OSD_Command;
-
-typedef struct osd_cmd_s {
-       OSD_Command cmd;
-       int x0;
-       int y0;
-       int x1;
-       int y1;
-       int color;
-       void __user *data;
-} osd_cmd_t;
-
-/* OSD_OpenRaw: set 'color' to desired window type */
-typedef enum {
-       OSD_BITMAP1,           /* 1 bit bitmap */
-       OSD_BITMAP2,           /* 2 bit bitmap */
-       OSD_BITMAP4,           /* 4 bit bitmap */
-       OSD_BITMAP8,           /* 8 bit bitmap */
-       OSD_BITMAP1HR,         /* 1 Bit bitmap half resolution */
-       OSD_BITMAP2HR,         /* 2 bit bitmap half resolution */
-       OSD_BITMAP4HR,         /* 4 bit bitmap half resolution */
-       OSD_BITMAP8HR,         /* 8 bit bitmap half resolution */
-       OSD_YCRCB422,          /* 4:2:2 YCRCB Graphic Display */
-       OSD_YCRCB444,          /* 4:4:4 YCRCB Graphic Display */
-       OSD_YCRCB444HR,        /* 4:4:4 YCRCB graphic half resolution */
-       OSD_VIDEOTSIZE,        /* True Size Normal MPEG Video Display */
-       OSD_VIDEOHSIZE,        /* MPEG Video Display Half Resolution */
-       OSD_VIDEOQSIZE,        /* MPEG Video Display Quarter Resolution */
-       OSD_VIDEODSIZE,        /* MPEG Video Display Double Resolution */
-       OSD_VIDEOTHSIZE,       /* True Size MPEG Video Display Half Resolution */
-       OSD_VIDEOTQSIZE,       /* True Size MPEG Video Display Quarter Resolution*/
-       OSD_VIDEOTDSIZE,       /* True Size MPEG Video Display Double Resolution */
-       OSD_VIDEONSIZE,        /* Full Size MPEG Video Display */
-       OSD_CURSOR             /* Cursor */
-} osd_raw_window_t;
-
-typedef struct osd_cap_s {
-       int  cmd;
-#define OSD_CAP_MEMSIZE         1  /* memory size */
-       long val;
-} osd_cap_t;
-
-
-#define OSD_SEND_CMD            _IOW('o', 160, osd_cmd_t)
-#define OSD_GET_CAPABILITY      _IOR('o', 161, osd_cap_t)
-
-#endif
diff --git a/drivers/staging/media/av7110/video.h b/drivers/staging/media/av7110/video.h
deleted file mode 100644 (file)
index 179f1ec..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
-/*
- * video.h - DEPRECATED MPEG-TS video decoder API
- *
- * NOTE: should not be used on future drivers
- *
- * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
- *                  & Ralph  Metzler <ralph@convergence.de>
- *                    for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- */
-
-#ifndef _UAPI_DVBVIDEO_H_
-#define _UAPI_DVBVIDEO_H_
-
-#include <linux/types.h>
-#ifndef __KERNEL__
-#include <time.h>
-#endif
-
-typedef enum {
-       VIDEO_FORMAT_4_3,     /* Select 4:3 format */
-       VIDEO_FORMAT_16_9,    /* Select 16:9 format. */
-       VIDEO_FORMAT_221_1    /* 2.21:1 */
-} video_format_t;
-
-
-typedef enum {
-       VIDEO_PAN_SCAN,       /* use pan and scan format */
-       VIDEO_LETTER_BOX,     /* use letterbox format */
-       VIDEO_CENTER_CUT_OUT  /* use center cut out format */
-} video_displayformat_t;
-
-typedef struct {
-       int w;
-       int h;
-       video_format_t aspect_ratio;
-} video_size_t;
-
-typedef enum {
-       VIDEO_SOURCE_DEMUX, /* Select the demux as the main source */
-       VIDEO_SOURCE_MEMORY /* If this source is selected, the stream
-                              comes from the user through the write
-                              system call */
-} video_stream_source_t;
-
-
-typedef enum {
-       VIDEO_STOPPED, /* Video is stopped */
-       VIDEO_PLAYING, /* Video is currently playing */
-       VIDEO_FREEZED  /* Video is freezed */
-} video_play_state_t;
-
-
-/* Decoder commands */
-#define VIDEO_CMD_PLAY        (0)
-#define VIDEO_CMD_STOP        (1)
-#define VIDEO_CMD_FREEZE      (2)
-#define VIDEO_CMD_CONTINUE    (3)
-
-/* Flags for VIDEO_CMD_FREEZE */
-#define VIDEO_CMD_FREEZE_TO_BLACK      (1 << 0)
-
-/* Flags for VIDEO_CMD_STOP */
-#define VIDEO_CMD_STOP_TO_BLACK                (1 << 0)
-#define VIDEO_CMD_STOP_IMMEDIATELY     (1 << 1)
-
-/* Play input formats: */
-/* The decoder has no special format requirements */
-#define VIDEO_PLAY_FMT_NONE         (0)
-/* The decoder requires full GOPs */
-#define VIDEO_PLAY_FMT_GOP          (1)
-
-/* The structure must be zeroed before use by the application
-   This ensures it can be extended safely in the future. */
-struct video_command {
-       __u32 cmd;
-       __u32 flags;
-       union {
-               struct {
-                       __u64 pts;
-               } stop;
-
-               struct {
-                       /* 0 or 1000 specifies normal speed,
-                          1 specifies forward single stepping,
-                          -1 specifies backward single stepping,
-                          >1: playback at speed/1000 of the normal speed,
-                          <-1: reverse playback at (-speed/1000) of the normal speed. */
-                       __s32 speed;
-                       __u32 format;
-               } play;
-
-               struct {
-                       __u32 data[16];
-               } raw;
-       };
-};
-
-/* FIELD_UNKNOWN can be used if the hardware does not know whether
-   the Vsync is for an odd, even or progressive (i.e. non-interlaced)
-   field. */
-#define VIDEO_VSYNC_FIELD_UNKNOWN      (0)
-#define VIDEO_VSYNC_FIELD_ODD          (1)
-#define VIDEO_VSYNC_FIELD_EVEN         (2)
-#define VIDEO_VSYNC_FIELD_PROGRESSIVE  (3)
-
-struct video_event {
-       __s32 type;
-#define VIDEO_EVENT_SIZE_CHANGED       1
-#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
-#define VIDEO_EVENT_DECODER_STOPPED    3
-#define VIDEO_EVENT_VSYNC              4
-       /* unused, make sure to use atomic time for y2038 if it ever gets used */
-       long timestamp;
-       union {
-               video_size_t size;
-               unsigned int frame_rate;        /* in frames per 1000sec */
-               unsigned char vsync_field;      /* unknown/odd/even/progressive */
-       } u;
-};
-
-
-struct video_status {
-       int                   video_blank;   /* blank video on freeze? */
-       video_play_state_t    play_state;    /* current state of playback */
-       video_stream_source_t stream_source; /* current source (demux/memory) */
-       video_format_t        video_format;  /* current aspect ratio of stream*/
-       video_displayformat_t display_format;/* selected cropping mode */
-};
-
-
-struct video_still_picture {
-       char __user *iFrame;        /* pointer to a single iframe in memory */
-       __s32 size;
-};
-
-
-typedef __u16 video_attributes_t;
-/*   bits: descr. */
-/*   15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
-/*   13-12 TV system (0=525/60, 1=625/50) */
-/*   11-10 Aspect ratio (0=4:3, 3=16:9) */
-/*    9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
-/*    7    line 21-1 data present in GOP (1=yes, 0=no) */
-/*    6    line 21-2 data present in GOP (1=yes, 0=no) */
-/*    5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
-/*    2    source letterboxed (1=yes, 0=no) */
-/*    0    film/camera mode (0=
- *camera, 1=film (625/50 only)) */
-
-
-/* bit definitions for capabilities: */
-/* can the hardware decode MPEG1 and/or MPEG2? */
-#define VIDEO_CAP_MPEG1   1
-#define VIDEO_CAP_MPEG2   2
-/* can you send a system and/or program stream to video device?
-   (you still have to open the video and the audio device but only
-    send the stream to the video device) */
-#define VIDEO_CAP_SYS     4
-#define VIDEO_CAP_PROG    8
-/* can the driver also handle SPU, NAVI and CSS encoded data?
-   (CSS API is not present yet) */
-#define VIDEO_CAP_SPU    16
-#define VIDEO_CAP_NAVI   32
-#define VIDEO_CAP_CSS    64
-
-
-#define VIDEO_STOP                 _IO('o', 21)
-#define VIDEO_PLAY                 _IO('o', 22)
-#define VIDEO_FREEZE               _IO('o', 23)
-#define VIDEO_CONTINUE             _IO('o', 24)
-#define VIDEO_SELECT_SOURCE        _IO('o', 25)
-#define VIDEO_SET_BLANK            _IO('o', 26)
-#define VIDEO_GET_STATUS           _IOR('o', 27, struct video_status)
-#define VIDEO_GET_EVENT            _IOR('o', 28, struct video_event)
-#define VIDEO_SET_DISPLAY_FORMAT   _IO('o', 29)
-#define VIDEO_STILLPICTURE         _IOW('o', 30, struct video_still_picture)
-#define VIDEO_FAST_FORWARD         _IO('o', 31)
-#define VIDEO_SLOWMOTION           _IO('o', 32)
-#define VIDEO_GET_CAPABILITIES     _IOR('o', 33, unsigned int)
-#define VIDEO_CLEAR_BUFFER         _IO('o',  34)
-#define VIDEO_SET_STREAMTYPE       _IO('o', 36)
-#define VIDEO_SET_FORMAT           _IO('o', 37)
-#define VIDEO_GET_SIZE             _IOR('o', 55, video_size_t)
-
-/**
- * VIDEO_GET_PTS
- *
- * Read the 33 bit presentation time stamp as defined
- * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
- *
- * The PTS should belong to the currently played
- * frame if possible, but may also be a value close to it
- * like the PTS of the last decoded frame or the last PTS
- * extracted by the PES parser.
- */
-#define VIDEO_GET_PTS              _IOR('o', 57, __u64)
-
-/* Read the number of displayed frames since the decoder was started */
-#define VIDEO_GET_FRAME_COUNT     _IOR('o', 58, __u64)
-
-#define VIDEO_COMMAND             _IOWR('o', 59, struct video_command)
-#define VIDEO_TRY_COMMAND         _IOWR('o', 60, struct video_command)
-
-#endif /* _UAPI_DVBVIDEO_H_ */
index 691030e..f9bdf4e 100644 (file)
@@ -422,7 +422,6 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
                        dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
                                slot);
                        mt7621_control_assert(port);
-                       clk_disable_unprepare(port->clk);
                        port->enabled = false;
 
                        if (slot == 0) {
index 2297427..4eff3fd 100644 (file)
 #define FWBUFF_ALIGN_SZ 512
 #define MAX_DUMP_FWSZ (48 * 1024)
 
+static void rtl871x_load_fw_fail(struct _adapter *adapter)
+{
+       struct usb_device *udev = adapter->dvobjpriv.pusbdev;
+       struct device *dev = &udev->dev;
+       struct device *parent = dev->parent;
+
+       complete(&adapter->rtl8712_fw_ready);
+
+       dev_err(&udev->dev, "r8712u: Firmware request failed\n");
+
+       if (parent)
+               device_lock(parent);
+
+       device_release_driver(dev);
+
+       if (parent)
+               device_unlock(parent);
+}
+
 static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
 {
        struct _adapter *adapter = context;
 
        if (!firmware) {
-               struct usb_device *udev = adapter->dvobjpriv.pusbdev;
-               struct usb_interface *usb_intf = adapter->pusb_intf;
-
-               dev_err(&udev->dev, "r8712u: Firmware request failed\n");
-               usb_put_dev(udev);
-               usb_set_intfdata(usb_intf, NULL);
-               r8712_free_drv_sw(adapter);
-               adapter->dvobj_deinit(adapter);
-               complete(&adapter->rtl8712_fw_ready);
-               free_netdev(adapter->pnetdev);
+               rtl871x_load_fw_fail(adapter);
                return;
        }
        adapter->fw = firmware;
index 5901026..d5fc902 100644 (file)
@@ -1820,3 +1820,11 @@ void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction)
                break;
        }
 }
+
+void r8712_flush_led_works(struct _adapter *padapter)
+{
+       struct led_priv *pledpriv = &padapter->ledpriv;
+
+       flush_work(&pledpriv->SwLed0.BlinkWorkItem);
+       flush_work(&pledpriv->SwLed1.BlinkWorkItem);
+}
index ee19c87..2f07681 100644 (file)
@@ -112,6 +112,7 @@ struct led_priv {
 void r8712_InitSwLeds(struct _adapter *padapter);
 void r8712_DeInitSwLeds(struct _adapter *padapter);
 void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction);
+void r8712_flush_led_works(struct _adapter *padapter);
 
 #endif
 
index 23cff43..cd6d9ff 100644 (file)
@@ -224,3 +224,11 @@ void r8712_unregister_cmd_alive(struct _adapter *padapter)
        }
        mutex_unlock(&pwrctrl->mutex_lock);
 }
+
+void r8712_flush_rwctrl_works(struct _adapter *padapter)
+{
+       struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
+
+       flush_work(&pwrctrl->SetPSModeWorkItem);
+       flush_work(&pwrctrl->rpwm_workitem);
+}
index bf6623c..b35b9c7 100644 (file)
@@ -108,5 +108,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
 void r8712_set_ps_mode(struct _adapter *padapter, uint ps_mode,
                        uint smart_ps);
 void r8712_set_rpwm(struct _adapter *padapter, u8 val8);
+void r8712_flush_rwctrl_works(struct _adapter *padapter);
 
 #endif  /* __RTL871X_PWRCTRL_H_ */
index 2434b13..505ebeb 100644 (file)
@@ -591,35 +591,30 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
 {
        struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
        struct usb_device *udev = interface_to_usbdev(pusb_intf);
+       struct _adapter *padapter = netdev_priv(pnetdev);
+
+       /* never exit with a firmware callback pending */
+       wait_for_completion(&padapter->rtl8712_fw_ready);
+       usb_set_intfdata(pusb_intf, NULL);
+       release_firmware(padapter->fw);
+       if (drvpriv.drv_registered)
+               padapter->surprise_removed = true;
+       if (pnetdev->reg_state != NETREG_UNINITIALIZED)
+               unregister_netdev(pnetdev); /* will call netdev_close() */
+       r8712_flush_rwctrl_works(padapter);
+       r8712_flush_led_works(padapter);
+       udelay(1);
+       /* Stop driver mlme relation timer */
+       r8712_stop_drv_timers(padapter);
+       r871x_dev_unload(padapter);
+       r8712_free_drv_sw(padapter);
+       free_netdev(pnetdev);
+
+       /* decrease the reference count of the usb device structure
+        * when disconnect
+        */
+       usb_put_dev(udev);
 
-       if (pnetdev) {
-               struct _adapter *padapter = netdev_priv(pnetdev);
-
-               /* never exit with a firmware callback pending */
-               wait_for_completion(&padapter->rtl8712_fw_ready);
-               pnetdev = usb_get_intfdata(pusb_intf);
-               usb_set_intfdata(pusb_intf, NULL);
-               if (!pnetdev)
-                       goto firmware_load_fail;
-               release_firmware(padapter->fw);
-               if (drvpriv.drv_registered)
-                       padapter->surprise_removed = true;
-               if (pnetdev->reg_state != NETREG_UNINITIALIZED)
-                       unregister_netdev(pnetdev); /* will call netdev_close() */
-               flush_scheduled_work();
-               udelay(1);
-               /* Stop driver mlme relation timer */
-               r8712_stop_drv_timers(padapter);
-               r871x_dev_unload(padapter);
-               r8712_free_drv_sw(padapter);
-               free_netdev(pnetdev);
-
-               /* decrease the reference count of the usb device structure
-                * when disconnect
-                */
-               usb_put_dev(udev);
-       }
-firmware_load_fail:
        /* If we didn't unplug usb dongle and remove/insert module, driver
         * fails on sitesurvey for the first time when device is up.
         * Reset usb port for sitesurvey fail issue.
index a884673..7eae820 100644 (file)
@@ -5,6 +5,7 @@ config RTL8723BS
        depends on m
        select WIRELESS_EXT
        select WEXT_PRIV
+       select CRYPTO_LIB_ARC4
        help
        This option enables support for RTL8723BS SDIO drivers, such as
        the wifi found on the 1st gen Intel Compute Stick, the CHIP
index 2dd251c..a545832 100644 (file)
@@ -909,6 +909,8 @@ void sd_int_dpc(struct adapter *adapter)
                                } else {
                                        rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
                                }
+                       } else {
+                               kfree(c2h_evt);
                        }
                } else {
                        /* Error handling for malloc fail */
index 6e6eb83..945f03d 100644 (file)
@@ -184,7 +184,7 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
        struct optee_msg_arg *ma;
 
        shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
-                           TEE_SHM_MAPPED);
+                           TEE_SHM_MAPPED | TEE_SHM_PRIV);
        if (IS_ERR(shm))
                return shm;
 
@@ -416,11 +416,13 @@ void optee_enable_shm_cache(struct optee *optee)
 }
 
 /**
- * optee_disable_shm_cache() - Disables caching of some shared memory allocation
- *                           in OP-TEE
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ *                               allocation in OP-TEE
  * @optee:     main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ *             kernel, are safe to dereference, and should be freed
  */
-void optee_disable_shm_cache(struct optee *optee)
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
 {
        struct optee_call_waiter w;
 
@@ -439,6 +441,13 @@ void optee_disable_shm_cache(struct optee *optee)
                if (res.result.status == OPTEE_SMC_RETURN_OK) {
                        struct tee_shm *shm;
 
+                       /*
+                        * Shared memory references that were not mapped by
+                        * this kernel must be ignored to prevent a crash.
+                        */
+                       if (!is_mapped)
+                               continue;
+
                        shm = reg_pair_to_ptr(res.result.shm_upper32,
                                              res.result.shm_lower32);
                        tee_shm_free(shm);
@@ -449,6 +458,27 @@ void optee_disable_shm_cache(struct optee *optee)
        optee_cq_wait_final(&optee->call_queue, &w);
 }
 
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ *                             allocations in OP-TEE
+ * @optee:     main service struct
+ */
+void optee_disable_shm_cache(struct optee *optee)
+{
+       return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ *                                      allocations in OP-TEE which are not
+ *                                      currently mapped
+ * @optee:     main service struct
+ */
+void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+       return __optee_disable_shm_cache(optee, false);
+}
+
 #define PAGELIST_ENTRIES_PER_PAGE                              \
        ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
 
index ddb8f9e..5ce13b0 100644 (file)
@@ -6,6 +6,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/arm-smccc.h>
+#include <linux/crash_dump.h>
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -277,7 +278,8 @@ static void optee_release(struct tee_context *ctx)
        if (!ctxdata)
                return;
 
-       shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
+       shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg),
+                           TEE_SHM_MAPPED | TEE_SHM_PRIV);
        if (!IS_ERR(shm)) {
                arg = tee_shm_get_va(shm, 0);
                /*
@@ -572,6 +574,13 @@ static optee_invoke_fn *get_invoke_func(struct device *dev)
        return ERR_PTR(-EINVAL);
 }
 
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+
 static int optee_remove(struct platform_device *pdev)
 {
        struct optee *optee = platform_get_drvdata(pdev);
@@ -602,6 +611,18 @@ static int optee_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+       optee_disable_shm_cache(platform_get_drvdata(pdev));
+}
+
 static int optee_probe(struct platform_device *pdev)
 {
        optee_invoke_fn *invoke_fn;
@@ -612,6 +633,16 @@ static int optee_probe(struct platform_device *pdev)
        u32 sec_caps;
        int rc;
 
+       /*
+        * The kernel may have crashed at the same time that all available
+        * secure world threads were suspended and we cannot reschedule the
+        * suspended threads without access to the crashed kernel's wait_queue.
+        * Therefore, we cannot reliably initialize the OP-TEE driver in the
+        * kdump kernel.
+        */
+       if (is_kdump_kernel())
+               return -ENODEV;
+
        invoke_fn = get_invoke_func(&pdev->dev);
        if (IS_ERR(invoke_fn))
                return PTR_ERR(invoke_fn);
@@ -686,6 +717,15 @@ static int optee_probe(struct platform_device *pdev)
        optee->memremaped_shm = memremaped_shm;
        optee->pool = pool;
 
+       /*
+        * Ensure that there are no pre-existing shm objects before enabling
+        * the shm cache so that there's no chance of receiving an invalid
+        * address during shutdown. This could occur, for example, if we're
+        * kexec booting from an older kernel that did not properly cleanup the
+        * shm cache.
+        */
+       optee_disable_unmapped_shm_cache(optee);
+
        optee_enable_shm_cache(optee);
 
        if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
@@ -728,6 +768,7 @@ MODULE_DEVICE_TABLE(of, optee_dt_match);
 static struct platform_driver optee_driver = {
        .probe  = optee_probe,
        .remove = optee_remove,
+       .shutdown = optee_shutdown,
        .driver = {
                .name = "optee",
                .of_match_table = optee_dt_match,
index e25b216..dbdd367 100644 (file)
@@ -159,6 +159,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
 
 void optee_enable_shm_cache(struct optee *optee);
 void optee_disable_shm_cache(struct optee *optee);
+void optee_disable_unmapped_shm_cache(struct optee *optee);
 
 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
                       struct page **pages, size_t num_pages,
index 1849180..efbaff7 100644 (file)
@@ -314,7 +314,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
                shm = cmd_alloc_suppl(ctx, sz);
                break;
        case OPTEE_RPC_SHM_TYPE_KERNEL:
-               shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
+               shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
                break;
        default:
                arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -502,7 +502,8 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
 
        switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
        case OPTEE_SMC_RPC_FUNC_ALLOC:
-               shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
+               shm = tee_shm_alloc(ctx, param->a1,
+                                   TEE_SHM_MAPPED | TEE_SHM_PRIV);
                if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
                        reg_pair_from_64(&param->a1, &param->a2, pa);
                        reg_pair_from_64(&param->a4, &param->a5,
index d767eeb..c41a9a5 100644 (file)
@@ -27,13 +27,19 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
        shm->paddr = page_to_phys(page);
        shm->size = PAGE_SIZE << order;
 
-       if (shm->flags & TEE_SHM_DMA_BUF) {
+       /*
+        * Shared memory private to the OP-TEE driver doesn't need
+        * to be registered with OP-TEE.
+        */
+       if (!(shm->flags & TEE_SHM_PRIV)) {
                unsigned int nr_pages = 1 << order, i;
                struct page **pages;
 
                pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
-               if (!pages)
-                       return -ENOMEM;
+               if (!pages) {
+                       rc = -ENOMEM;
+                       goto err;
+               }
 
                for (i = 0; i < nr_pages; i++) {
                        pages[i] = page;
@@ -44,15 +50,21 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
                rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
                                        (unsigned long)shm->kaddr);
                kfree(pages);
+               if (rc)
+                       goto err;
        }
 
+       return 0;
+
+err:
+       __free_pages(page, order);
        return rc;
 }
 
 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
                         struct tee_shm *shm)
 {
-       if (shm->flags & TEE_SHM_DMA_BUF)
+       if (!(shm->flags & TEE_SHM_PRIV))
                optee_shm_unregister(shm->ctx, shm);
 
        free_pages((unsigned long)shm->kaddr, get_order(shm->size));
index 00472f5..8a9384a 100644 (file)
@@ -117,7 +117,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                return ERR_PTR(-EINVAL);
        }
 
-       if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
+       if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
                dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
                return ERR_PTR(-EINVAL);
        }
@@ -193,6 +193,24 @@ err_dev_put:
 }
 EXPORT_SYMBOL_GPL(tee_shm_alloc);
 
+/**
+ * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
+ * @ctx:       Context that allocates the shared memory
+ * @size:      Requested size of shared memory
+ *
+ * The returned memory registered in secure world and is suitable to be
+ * passed as a memory buffer in parameter argument to
+ * tee_client_invoke_func(). The memory allocated is later freed with a
+ * call to tee_shm_free().
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
+{
+       return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+
 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
                                 size_t length, u32 flags)
 {
index 99abdc0..dab7e8f 100644 (file)
@@ -569,13 +569,18 @@ static void notify_thresholds(__u64 msr_val)
                platform_thermal_notify(msr_val);
 }
 
+void __weak notify_hwp_interrupt(void)
+{
+       wrmsrl_safe(MSR_HWP_STATUS, 0);
+}
+
 /* Thermal transition interrupt handler */
 void intel_thermal_interrupt(void)
 {
        __u64 msr_val;
 
        if (static_cpu_has(X86_FEATURE_HWP))
-               wrmsrl_safe(MSR_HWP_STATUS, 0);
+               notify_hwp_interrupt();
 
        rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
 
index 53f427b..01e7bed 100644 (file)
@@ -12,4 +12,7 @@ extern int (*platform_thermal_notify)(__u64 msr_val);
  * callback has rate control */
 extern bool (*platform_thermal_package_rate_control)(void);
 
+/* Handle HWP interrupt */
+extern void notify_hwp_interrupt(void);
+
 #endif /* _INTEL_THERMAL_INTERRUPT_H */
index 83b1ef3..10d6b22 100644 (file)
@@ -1875,18 +1875,6 @@ static struct attribute *switch_attrs[] = {
        NULL,
 };
 
-static bool has_port(const struct tb_switch *sw, enum tb_port_type type)
-{
-       const struct tb_port *port;
-
-       tb_switch_for_each_port(sw, port) {
-               if (!port->disabled && port->config.type == type)
-                       return true;
-       }
-
-       return false;
-}
-
 static umode_t switch_attr_is_visible(struct kobject *kobj,
                                      struct attribute *attr, int n)
 {
@@ -1895,8 +1883,7 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
 
        if (attr == &dev_attr_authorized.attr) {
                if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
-                   sw->tb->security_level == TB_SECURITY_DPONLY ||
-                   !has_port(sw, TB_TYPE_PCIE_UP))
+                   sw->tb->security_level == TB_SECURITY_DPONLY)
                        return 0;
        } else if (attr == &dev_attr_device.attr) {
                if (!sw->device)
index 4caab87..2350fb3 100644 (file)
@@ -329,6 +329,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
        unsigned int iir, lsr;
+       unsigned long flags;
        unsigned int space, count;
 
        iir = serial_port_in(port, UART_IIR);
@@ -336,7 +337,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
        if (iir & UART_IIR_NO_INT)
                return 0;
 
-       spin_lock(&port->lock);
+       spin_lock_irqsave(&port->lock, flags);
 
        lsr = serial_port_in(port, UART_LSR);
 
@@ -370,7 +371,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
        if (lsr & UART_LSR_THRE)
                serial8250_tx_chars(up);
 
-       uart_unlock_and_check_sysrq(port);
+       uart_unlock_and_check_sysrq_irqrestore(port, flags);
 
        return 1;
 }
index 4e75d2e..fc65a22 100644 (file)
@@ -30,10 +30,11 @@ struct fsl8250_data {
 int fsl8250_handle_irq(struct uart_port *port)
 {
        unsigned char lsr, orig_lsr;
+       unsigned long flags;
        unsigned int iir;
        struct uart_8250_port *up = up_to_u8250p(port);
 
-       spin_lock(&up->port.lock);
+       spin_lock_irqsave(&up->port.lock, flags);
 
        iir = port->serial_in(port, UART_IIR);
        if (iir & UART_IIR_NO_INT) {
@@ -82,7 +83,7 @@ int fsl8250_handle_irq(struct uart_port *port)
 
        up->lsr_saved_flags = orig_lsr;
 
-       uart_unlock_and_check_sysrq(&up->port);
+       uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
 
        return 1;
 }
index f7d3023..fb65dc6 100644 (file)
@@ -93,10 +93,13 @@ static void mtk8250_dma_rx_complete(void *param)
        struct dma_tx_state state;
        int copied, total, cnt;
        unsigned char *ptr;
+       unsigned long flags;
 
        if (data->rx_status == DMA_RX_SHUTDOWN)
                return;
 
+       spin_lock_irqsave(&up->port.lock, flags);
+
        dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
        total = dma->rx_size - state.residue;
        cnt = total;
@@ -120,6 +123,8 @@ static void mtk8250_dma_rx_complete(void *param)
        tty_flip_buffer_push(tty_port);
 
        mtk8250_rx_dma(up);
+
+       spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
 static void mtk8250_rx_dma(struct uart_8250_port *up)
index 75827b6..a808c28 100644 (file)
@@ -3836,6 +3836,12 @@ static const struct pci_device_id blacklist[] = {
        { PCI_VDEVICE(INTEL, 0x0f0c), },
        { PCI_VDEVICE(INTEL, 0x228a), },
        { PCI_VDEVICE(INTEL, 0x228c), },
+       { PCI_VDEVICE(INTEL, 0x4b96), },
+       { PCI_VDEVICE(INTEL, 0x4b97), },
+       { PCI_VDEVICE(INTEL, 0x4b98), },
+       { PCI_VDEVICE(INTEL, 0x4b99), },
+       { PCI_VDEVICE(INTEL, 0x4b9a), },
+       { PCI_VDEVICE(INTEL, 0x4b9b), },
        { PCI_VDEVICE(INTEL, 0x9ce3), },
        { PCI_VDEVICE(INTEL, 0x9ce4), },
 
@@ -3996,6 +4002,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
                if (pci_match_id(pci_use_msi, dev)) {
                        dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
                        pci_set_master(dev);
+                       uart.port.flags &= ~UPF_SHARE_IRQ;
                        rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
                } else {
                        dev_dbg(&dev->dev, "Using legacy interrupts\n");
index 2164290..1da29a2 100644 (file)
@@ -311,7 +311,11 @@ static const struct serial8250_config uart_config[] = {
 /* Uart divisor latch read */
 static int default_serial_dl_read(struct uart_8250_port *up)
 {
-       return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
+       /* Assign these in pieces to truncate any bits above 7.  */
+       unsigned char dll = serial_in(up, UART_DLL);
+       unsigned char dlm = serial_in(up, UART_DLM);
+
+       return dll | dlm << 8;
 }
 
 /* Uart divisor latch write */
@@ -1297,9 +1301,11 @@ static void autoconfig(struct uart_8250_port *up)
        serial_out(up, UART_LCR, 0);
 
        serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-       scratch = serial_in(up, UART_IIR) >> 6;
 
-       switch (scratch) {
+       /* Assign this as it is to truncate any bits above 7.  */
+       scratch = serial_in(up, UART_IIR);
+
+       switch (scratch >> 6) {
        case 0:
                autoconfig_8250(up);
                break;
@@ -1893,11 +1899,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
        unsigned char status;
        struct uart_8250_port *up = up_to_u8250p(port);
        bool skip_rx = false;
+       unsigned long flags;
 
        if (iir & UART_IIR_NO_INT)
                return 0;
 
-       spin_lock(&port->lock);
+       spin_lock_irqsave(&port->lock, flags);
 
        status = serial_port_in(port, UART_LSR);
 
@@ -1923,7 +1930,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
                (up->ier & UART_IER_THRI))
                serial8250_tx_chars(up);
 
-       uart_unlock_and_check_sysrq(port);
+       uart_unlock_and_check_sysrq_irqrestore(port, flags);
 
        return 1;
 }
index 508128d..f0e5da7 100644 (file)
@@ -1415,7 +1415,7 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
 
 static unsigned int lpuart32_get_mctrl(struct uart_port *port)
 {
-       unsigned int mctrl = 0;
+       unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
        u32 reg;
 
        reg = lpuart32_read(port, UARTCTRL);
index 0c1e4df..ef11860 100644 (file)
@@ -1293,7 +1293,8 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
                freq = uartclk;
        if (freq == 0) {
                dev_err(dev, "Cannot get clock rate\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_clk;
        }
 
        if (xtal) {
index 2220327..eba5b9e 100644 (file)
@@ -1045,9 +1045,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
 
        if (tup->cdata->fifo_mode_enable_status) {
                ret = tegra_uart_wait_fifo_mode_enabled(tup);
-               dev_err(tup->uport.dev, "FIFO mode not enabled\n");
-               if (ret < 0)
+               if (ret < 0) {
+                       dev_err(tup->uport.dev,
+                               "Failed to enable FIFO mode: %d\n", ret);
                        return ret;
+               }
        } else {
                /*
                 * For all tegra devices (up to t210), there is a hardware
index ef981d3..cb72393 100644 (file)
@@ -2059,7 +2059,7 @@ static void restore_cur(struct vc_data *vc)
 
 enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
        EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
-       ESpalette, ESosc };
+       ESpalette, ESosc, ESapc, ESpm, ESdcs };
 
 /* console_lock is held (except via vc_init()) */
 static void reset_terminal(struct vc_data *vc, int do_clear)
@@ -2133,20 +2133,28 @@ static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
                vc->vc_translate = set_translate(*charset, vc);
 }
 
+/* is this state an ANSI control string? */
+static bool ansi_control_string(unsigned int state)
+{
+       if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
+               return true;
+       return false;
+}
+
 /* console_lock is held */
 static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
 {
        /*
         *  Control characters can be used in the _middle_
-        *  of an escape sequence.
+        *  of an escape sequence, aside from ANSI control strings.
         */
-       if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */
+       if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
                return;
        switch (c) {
        case 0:
                return;
        case 7:
-               if (vc->vc_state == ESosc)
+               if (ansi_control_string(vc->vc_state))
                        vc->vc_state = ESnormal;
                else if (vc->vc_bell_duration)
                        kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
@@ -2207,6 +2215,12 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                case ']':
                        vc->vc_state = ESnonstd;
                        return;
+               case '_':
+                       vc->vc_state = ESapc;
+                       return;
+               case '^':
+                       vc->vc_state = ESpm;
+                       return;
                case '%':
                        vc->vc_state = ESpercent;
                        return;
@@ -2224,6 +2238,9 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                        if (vc->state.x < VC_TABSTOPS_COUNT)
                                set_bit(vc->state.x, vc->vc_tab_stop);
                        return;
+               case 'P':
+                       vc->vc_state = ESdcs;
+                       return;
                case 'Z':
                        respond_ID(tty);
                        return;
@@ -2520,8 +2537,14 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                vc_setGx(vc, 1, c);
                vc->vc_state = ESnormal;
                return;
+       case ESapc:
+               return;
        case ESosc:
                return;
+       case ESpm:
+               return;
+       case ESdcs:
+               return;
        default:
                vc->vc_state = ESnormal;
        }
index 0e0cd9e..3639bb6 100644 (file)
@@ -246,6 +246,8 @@ int vt_waitactive(int n)
  *
  * XXX It should at least call into the driver, fbdev's definitely need to
  * restore their engine state. --BenH
+ *
+ * Called with the console lock held.
  */
 static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
 {
@@ -262,7 +264,6 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
                return -EINVAL;
        }
 
-       /* FIXME: this needs the console lock extending */
        if (vc->vc_mode == mode)
                return 0;
 
@@ -271,12 +272,10 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
                return 0;
 
        /* explicitly blank/unblank the screen if switching modes */
-       console_lock();
        if (mode == KD_TEXT)
                do_unblank_screen(1);
        else
                do_blank_screen(1);
-       console_unlock();
 
        return 0;
 }
@@ -378,7 +377,10 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
                if (!perm)
                        return -EPERM;
 
-               return vt_kdsetmode(vc, arg);
+               console_lock();
+               ret = vt_kdsetmode(vc, arg);
+               console_unlock();
+               return ret;
 
        case KDGETMODE:
                return put_user(vc->vc_mode, (int __user *)arg);
index 02ec7ab..e29989d 100644 (file)
@@ -731,6 +731,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
                request->actual = 0;
                priv_dev->status_completion_no_call = true;
                priv_dev->pending_status_request = request;
+               usb_gadget_set_state(&priv_dev->gadget, USB_STATE_CONFIGURED);
                spin_unlock_irqrestore(&priv_dev->lock, flags);
 
                /*
index c23f53e..27df0c6 100644 (file)
@@ -1882,7 +1882,7 @@ static int __cdnsp_gadget_init(struct cdns *cdns)
        pdev->gadget.name = "cdnsp-gadget";
        pdev->gadget.speed = USB_SPEED_UNKNOWN;
        pdev->gadget.sg_supported = 1;
-       pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
+       pdev->gadget.max_speed = max_speed;
        pdev->gadget.lpm_capable = 1;
 
        pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
index 783ca8f..f740fa6 100644 (file)
@@ -383,8 +383,8 @@ struct cdnsp_intr_reg {
 #define IMAN_IE                        BIT(1)
 #define IMAN_IP                        BIT(0)
 /* bits 2:31 need to be preserved */
-#define IMAN_IE_SET(p)         (((p) & IMAN_IE) | 0x2)
-#define IMAN_IE_CLEAR(p)       (((p) & IMAN_IE) & ~(0x2))
+#define IMAN_IE_SET(p)         ((p) | IMAN_IE)
+#define IMAN_IE_CLEAR(p)       ((p) & ~IMAN_IE)
 
 /* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
 /*
index 6897274..1b14384 100644 (file)
@@ -1932,15 +1932,13 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                }
 
                if (enqd_len + trb_buff_len >= full_len) {
-                       if (need_zero_pkt && zero_len_trb) {
-                               zero_len_trb = true;
-                       } else {
-                               field &= ~TRB_CHAIN;
-                               field |= TRB_IOC;
-                               more_trbs_coming = false;
-                               need_zero_pkt = false;
-                               preq->td.last_trb = ring->enqueue;
-                       }
+                       if (need_zero_pkt)
+                               zero_len_trb = !zero_len_trb;
+
+                       field &= ~TRB_CHAIN;
+                       field |= TRB_IOC;
+                       more_trbs_coming = false;
+                       preq->td.last_trb = ring->enqueue;
                }
 
                /* Only set interrupt on short packet for OUT endpoints. */
@@ -1955,7 +1953,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);
 
-               cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt,
+               cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
                                lower_32_bits(send_addr),
                                upper_32_bits(send_addr),
                                length_field,
index 74d5a9c..73f419a 100644 (file)
@@ -2324,17 +2324,10 @@ static void usbtmc_interrupt(struct urb *urb)
                dev_err(dev, "overflow with length %d, actual length is %d\n",
                        data->iin_wMaxPacketSize, urb->actual_length);
                fallthrough;
-       case -ECONNRESET:
-       case -ENOENT:
-       case -ESHUTDOWN:
-       case -EILSEQ:
-       case -ETIME:
-       case -EPIPE:
+       default:
                /* urb terminated, clean up */
                dev_dbg(dev, "urb terminated, status: %d\n", status);
                return;
-       default:
-               dev_err(dev, "unknown status received: %d\n", status);
        }
 exit:
        rv = usb_submit_urb(urb, GFP_ATOMIC);
index 3740cf9..0697fde 100644 (file)
@@ -193,7 +193,11 @@ static void otg_start_hnp_polling(struct otg_fsm *fsm)
        if (!fsm->host_req_flag)
                return;
 
-       INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+       if (!fsm->hnp_work_inited) {
+               INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+               fsm->hnp_work_inited = true;
+       }
+
        schedule_delayed_work(&fsm->hnp_polling_work,
                                        msecs_to_jiffies(T_HOST_REQ_POLL));
 }
index 45f2bc0..ccb68fe 100644 (file)
@@ -940,19 +940,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
 
 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
 {
-       struct dwc3_trb         *tmp;
        u8                      trbs_left;
 
        /*
-        * If enqueue & dequeue are equal than it is either full or empty.
-        *
-        * One way to know for sure is if the TRB right before us has HWO bit
-        * set or not. If it has, then we're definitely full and can't fit any
-        * more transfers in our ring.
+        * If the enqueue & dequeue are equal then the TRB ring is either full
+        * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
+        * pending to be processed by the driver.
         */
        if (dep->trb_enqueue == dep->trb_dequeue) {
-               tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
-               if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+               /*
+                * If there is any request remained in the started_list at
+                * this point, that means there is no TRB available.
+                */
+               if (!list_empty(&dep->started_list))
                        return 0;
 
                return DWC3_TRB_NUM - 1;
@@ -2243,10 +2243,19 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
                ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
                                msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
-               if (ret == 0) {
-                       dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
-                       return -ETIMEDOUT;
-               }
+               if (ret == 0)
+                       dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
+       }
+
+       /*
+        * Avoid issuing a runtime resume if the device is already in the
+        * suspended state during gadget disconnect.  DWC3 gadget was already
+        * halted/stopped during runtime suspend.
+        */
+       if (!is_on) {
+               pm_runtime_barrier(dwc->dev);
+               if (pm_runtime_suspended(dwc->dev))
+                       return 0;
        }
 
        /*
@@ -2447,6 +2456,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
        /* begin to receive SETUP packets */
        dwc->ep0state = EP0_SETUP_PHASE;
        dwc->link_state = DWC3_LINK_STATE_SS_DIS;
+       dwc->delayed_status = false;
        dwc3_ep0_out_start(dwc);
 
        dwc3_gadget_enable_irq(dwc);
index 02683ac..bb476e1 100644 (file)
@@ -41,6 +41,7 @@ struct f_hidg {
        unsigned char                   bInterfaceSubClass;
        unsigned char                   bInterfaceProtocol;
        unsigned char                   protocol;
+       unsigned char                   idle;
        unsigned short                  report_desc_length;
        char                            *report_desc;
        unsigned short                  report_length;
@@ -338,6 +339,11 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
 
        spin_lock_irqsave(&hidg->write_spinlock, flags);
 
+       if (!hidg->req) {
+               spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+               return -ESHUTDOWN;
+       }
+
 #define WRITE_COND (!hidg->write_pending)
 try_again:
        /* write queue */
@@ -358,8 +364,14 @@ try_again:
        count  = min_t(unsigned, count, hidg->report_length);
 
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
-       status = copy_from_user(req->buf, buffer, count);
 
+       if (!req) {
+               ERROR(hidg->func.config->cdev, "hidg->req is NULL\n");
+               status = -ESHUTDOWN;
+               goto release_write_pending;
+       }
+
+       status = copy_from_user(req->buf, buffer, count);
        if (status != 0) {
                ERROR(hidg->func.config->cdev,
                        "copy_from_user error\n");
@@ -387,14 +399,17 @@ try_again:
 
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
+       if (!hidg->in_ep->enabled) {
+               ERROR(hidg->func.config->cdev, "in_ep is disabled\n");
+               status = -ESHUTDOWN;
+               goto release_write_pending;
+       }
+
        status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
-       if (status < 0) {
-               ERROR(hidg->func.config->cdev,
-                       "usb_ep_queue error on int endpoint %zd\n", status);
+       if (status < 0)
                goto release_write_pending;
-       } else {
+       else
                status = count;
-       }
 
        return status;
 release_write_pending:
@@ -523,6 +538,14 @@ static int hidg_setup(struct usb_function *f,
                goto respond;
                break;
 
+       case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+                 | HID_REQ_GET_IDLE):
+               VDBG(cdev, "get_idle\n");
+               length = min_t(unsigned int, length, 1);
+               ((u8 *) req->buf)[0] = hidg->idle;
+               goto respond;
+               break;
+
        case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
                  | HID_REQ_SET_REPORT):
                VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
@@ -546,6 +569,14 @@ static int hidg_setup(struct usb_function *f,
                goto stall;
                break;
 
+       case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+                 | HID_REQ_SET_IDLE):
+               VDBG(cdev, "set_idle\n");
+               length = 0;
+               hidg->idle = value >> 8;
+               goto respond;
+               break;
+
        case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
                  | USB_REQ_GET_DESCRIPTOR):
                switch (value >> 8) {
@@ -773,6 +804,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
        hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
        hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
        hidg->protocol = HID_REPORT_PROTOCOL;
+       hidg->idle = 1;
        hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
        hidg_ss_in_comp_desc.wBytesPerInterval =
                                cpu_to_le16(hidg->report_length);
index 018dd09..9e5c950 100644 (file)
@@ -230,7 +230,13 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
        int status = req->status;
 
        /* i/f shutting down */
-       if (!prm->fb_ep_enabled || req->status == -ESHUTDOWN)
+       if (!prm->fb_ep_enabled) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+               return;
+       }
+
+       if (req->status == -ESHUTDOWN)
                return;
 
        /*
@@ -388,8 +394,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
        if (!prm->ep_enabled)
                return;
 
-       prm->ep_enabled = false;
-
        audio_dev = uac->audio_dev;
        params = &audio_dev->params;
 
@@ -407,6 +411,8 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
                }
        }
 
+       prm->ep_enabled = false;
+
        if (usb_ep_disable(ep))
                dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
 }
@@ -418,15 +424,16 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
        if (!prm->fb_ep_enabled)
                return;
 
-       prm->fb_ep_enabled = false;
-
        if (prm->req_fback) {
-               usb_ep_dequeue(ep, prm->req_fback);
-               kfree(prm->req_fback->buf);
-               usb_ep_free_request(ep, prm->req_fback);
+               if (usb_ep_dequeue(ep, prm->req_fback)) {
+                       kfree(prm->req_fback->buf);
+                       usb_ep_free_request(ep, prm->req_fback);
+               }
                prm->req_fback = NULL;
        }
 
+       prm->fb_ep_enabled = false;
+
        if (usb_ep_disable(ep))
                dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
 }
index 34f4db5..d2a2b20 100644 (file)
@@ -1255,12 +1255,14 @@ static int max3420_probe(struct spi_device *spi)
        err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0,
                               "max3420", udc);
        if (err < 0)
-               return err;
+               goto del_gadget;
 
        udc->thread_task = kthread_create(max3420_thread, udc,
                                          "max3420-thread");
-       if (IS_ERR(udc->thread_task))
-               return PTR_ERR(udc->thread_task);
+       if (IS_ERR(udc->thread_task)) {
+               err = PTR_ERR(udc->thread_task);
+               goto del_gadget;
+       }
 
        irq = of_irq_get_byname(spi->dev.of_node, "vbus");
        if (irq <= 0) { /* no vbus irq implies self-powered design */
@@ -1280,10 +1282,14 @@ static int max3420_probe(struct spi_device *spi)
                err = devm_request_irq(&spi->dev, irq,
                                       max3420_vbus_handler, 0, "vbus", udc);
                if (err < 0)
-                       return err;
+                       goto del_gadget;
        }
 
        return 0;
+
+del_gadget:
+       usb_del_gadget_udc(&udc->gadget);
+       return err;
 }
 
 static int max3420_remove(struct spi_device *spi)
index 9bbd7dd..a24aea3 100644 (file)
@@ -611,8 +611,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
        if (ohci_at91->wakeup)
                enable_irq_wake(hcd->irq);
 
-       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
-
        ret = ohci_suspend(hcd, ohci_at91->wakeup);
        if (ret) {
                if (ohci_at91->wakeup)
@@ -632,7 +630,10 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
                /* flush the writes */
                (void) ohci_readl (ohci, &ohci->regs->control);
                msleep(1);
+               ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
                at91_stop_clock(ohci_at91);
+       } else {
+               ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
        }
 
        return ret;
@@ -644,6 +645,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
 
+       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
+
        if (ohci_at91->wakeup)
                disable_irq_wake(hcd->irq);
        else
@@ -651,8 +654,6 @@ ohci_hcd_at91_drv_resume(struct device *dev)
 
        ohci_resume(hcd, false);
 
-       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
-
        return 0;
 }
 
index 5923844..ef5e91a 100644 (file)
@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
                        return 0;
 
                case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
-                       return 0;
+                       dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
+                       return -ENOENT;
 
                case RENESAS_ROM_STATUS_ERROR: /* Error State */
                default: /* All other states are marked as "Reserved states" */
@@ -224,14 +225,6 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
        u8 fw_state;
        int err;
 
-       /* Check if device has ROM and loaded, if so skip everything */
-       err = renesas_check_rom(pdev);
-       if (err) { /* we have rom */
-               err = renesas_check_rom_state(pdev);
-               if (!err)
-                       return err;
-       }
-
        /*
         * Test if the device is actually needing the firmware. As most
         * BIOSes will initialize the device for us. If the device is
@@ -591,21 +584,39 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev,
                        (struct xhci_driver_data *)id->driver_data;
        const char *fw_name = driver_data->firmware;
        const struct firmware *fw;
+       bool has_rom;
        int err;
 
+       /* Check if device has ROM and loaded, if so skip everything */
+       has_rom = renesas_check_rom(pdev);
+       if (has_rom) {
+               err = renesas_check_rom_state(pdev);
+               if (!err)
+                       return 0;
+               else if (err != -ENOENT)
+                       has_rom = false;
+       }
+
        err = renesas_fw_check_running(pdev);
        /* Continue ahead, if the firmware is already running. */
        if (err == 0)
                return 0;
 
+       /* no firmware interface available */
        if (err != 1)
-               return err;
+               return has_rom ? 0 : err;
 
        pci_dev_get(pdev);
-       err = request_firmware(&fw, fw_name, &pdev->dev);
+       err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
        pci_dev_put(pdev);
        if (err) {
-               dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
+               if (has_rom) {
+                       dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
+                                fw_name);
+                       return 0;
+               }
+               dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
+                       fw_name, err);
                return err;
        }
 
index 640a46f..f086960 100644 (file)
@@ -35,6 +35,7 @@ struct omap2430_glue {
        struct device           *control_otghs;
        unsigned int            is_runtime_suspended:1;
        unsigned int            needs_resume:1;
+       unsigned int            phy_suspended:1;
 };
 #define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
@@ -458,8 +459,10 @@ static int omap2430_runtime_suspend(struct device *dev)
 
        omap2430_low_level_exit(musb);
 
-       phy_power_off(musb->phy);
-       phy_exit(musb->phy);
+       if (!glue->phy_suspended) {
+               phy_power_off(musb->phy);
+               phy_exit(musb->phy);
+       }
 
        glue->is_runtime_suspended = 1;
 
@@ -474,8 +477,10 @@ static int omap2430_runtime_resume(struct device *dev)
        if (!musb)
                return 0;
 
-       phy_init(musb->phy);
-       phy_power_on(musb->phy);
+       if (!glue->phy_suspended) {
+               phy_init(musb->phy);
+               phy_power_on(musb->phy);
+       }
 
        omap2430_low_level_init(musb);
        musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -489,7 +494,21 @@ static int omap2430_runtime_resume(struct device *dev)
        return 0;
 }
 
+/* I2C and SPI PHYs need to be suspended before the glue layer */
 static int omap2430_suspend(struct device *dev)
+{
+       struct omap2430_glue *glue = dev_get_drvdata(dev);
+       struct musb *musb = glue_to_musb(glue);
+
+       phy_power_off(musb->phy);
+       phy_exit(musb->phy);
+       glue->phy_suspended = 1;
+
+       return 0;
+}
+
+/* Glue layer needs to be suspended after musb_suspend() */
+static int omap2430_suspend_late(struct device *dev)
 {
        struct omap2430_glue *glue = dev_get_drvdata(dev);
 
@@ -501,7 +520,7 @@ static int omap2430_suspend(struct device *dev)
        return omap2430_runtime_suspend(dev);
 }
 
-static int omap2430_resume(struct device *dev)
+static int omap2430_resume_early(struct device *dev)
 {
        struct omap2430_glue *glue = dev_get_drvdata(dev);
 
@@ -513,10 +532,24 @@ static int omap2430_resume(struct device *dev)
        return omap2430_runtime_resume(dev);
 }
 
+static int omap2430_resume(struct device *dev)
+{
+       struct omap2430_glue *glue = dev_get_drvdata(dev);
+       struct musb *musb = glue_to_musb(glue);
+
+       phy_init(musb->phy);
+       phy_power_on(musb->phy);
+       glue->phy_suspended = 0;
+
+       return 0;
+}
+
 static const struct dev_pm_ops omap2430_pm_ops = {
        .runtime_suspend = omap2430_runtime_suspend,
        .runtime_resume = omap2430_runtime_resume,
        .suspend = omap2430_suspend,
+       .suspend_late = omap2430_suspend_late,
+       .resume_early = omap2430_resume_early,
        .resume = omap2430_resume,
 };
 
index 4a1f3a9..33bbb34 100644 (file)
@@ -219,6 +219,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
        { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
index add602b..755858c 100644 (file)
 /* Vardaan Enterprises Serial Interface VEUSB422R3 */
 #define FTDI_VARDAAN_PID       0xF070
 
+/* Auto-M3 Ltd. - OP-COM USB V2 - OBD interface Adapter */
+#define FTDI_AUTO_M3_OP_COM_V2_PID     0x4f50
+
 /*
  * Xsens Technologies BV products (http://www.xsens.com).
  */
index 0fbe253..29c765c 100644 (file)
@@ -1203,6 +1203,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff),    /* Telit FN980 (PCIe) */
          .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff),    /* Telit FD980 */
+         .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2072,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) | RSVD(5) },
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     /* Fibocom NL678 series */
          .driver_info = RSVD(6) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    /* Fibocom FG150 Diag */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          /* Fibocom FG150 AT */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
index 2f2f504..930b3d5 100644 (file)
@@ -418,24 +418,34 @@ static int pl2303_detect_type(struct usb_serial *serial)
        bcdDevice = le16_to_cpu(desc->bcdDevice);
        bcdUSB = le16_to_cpu(desc->bcdUSB);
 
-       switch (bcdDevice) {
-       case 0x100:
-               /*
-                * Assume it's an HXN-type if the device doesn't support the old read
-                * request value.
-                */
-               if (bcdUSB == 0x200 && !pl2303_supports_hx_status(serial))
-                       return TYPE_HXN;
+       switch (bcdUSB) {
+       case 0x110:
+               switch (bcdDevice) {
+               case 0x300:
+                       return TYPE_HX;
+               case 0x400:
+                       return TYPE_HXD;
+               default:
+                       return TYPE_HX;
+               }
                break;
-       case 0x300:
-               if (bcdUSB == 0x200)
+       case 0x200:
+               switch (bcdDevice) {
+               case 0x100:
+               case 0x305:
+                       /*
+                        * Assume it's an HXN-type if the device doesn't
+                        * support the old read request value.
+                        */
+                       if (!pl2303_supports_hx_status(serial))
+                               return TYPE_HXN;
+                       break;
+               case 0x300:
                        return TYPE_TA;
-
-               return TYPE_HX;
-       case 0x400:
-               return TYPE_HXD;
-       case 0x500:
-               return TYPE_TB;
+               case 0x500:
+                       return TYPE_TB;
+               }
+               break;
        }
 
        dev_err(&serial->interface->dev,
index 5b22a1c..5d05de6 100644 (file)
@@ -341,6 +341,7 @@ struct tcpm_port {
        bool vbus_source;
        bool vbus_charge;
 
+       /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
        bool send_discover;
        bool op_vsafe5v;
 
@@ -370,6 +371,7 @@ struct tcpm_port {
        struct hrtimer send_discover_timer;
        struct kthread_work send_discover_work;
        bool state_machine_running;
+       /* Set to true when VDM State Machine has following actions. */
        bool vdm_sm_running;
 
        struct completion tx_complete;
@@ -1431,6 +1433,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
        /* Set ready, vdm state machine will actually send */
        port->vdm_retries = 0;
        port->vdm_state = VDM_STATE_READY;
+       port->vdm_sm_running = true;
 
        mod_vdm_delayed_work(port, 0);
 }
@@ -1673,7 +1676,6 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                                rlen = 1;
                        } else {
                                tcpm_register_partner_altmodes(port);
-                               port->vdm_sm_running = false;
                        }
                        break;
                case CMD_ENTER_MODE:
@@ -1721,14 +1723,12 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                                      (VDO_SVDM_VERS(svdm_version));
                        break;
                }
-               port->vdm_sm_running = false;
                break;
        default:
                response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
                rlen = 1;
                response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
                              (VDO_SVDM_VERS(svdm_version));
-               port->vdm_sm_running = false;
                break;
        }
 
@@ -1737,6 +1737,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
        return rlen;
 }
 
+static void tcpm_pd_handle_msg(struct tcpm_port *port,
+                              enum pd_msg_request message,
+                              enum tcpm_ams ams);
+
 static void tcpm_handle_vdm_request(struct tcpm_port *port,
                                    const __le32 *payload, int cnt)
 {
@@ -1764,11 +1768,25 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
                port->vdm_state = VDM_STATE_DONE;
        }
 
-       if (PD_VDO_SVDM(p[0])) {
+       if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
+               /*
+                * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
+                * advance because we are dropping the lock but may send VDMs soon.
+                * For the cases of INIT received:
+                *  - If no response to send, it will be cleared later in this function.
+                *  - If there are responses to send, it will be cleared in the state machine.
+                * For the cases of RSP received:
+                *  - If no further INIT to send, it will be cleared later in this function.
+                *  - Otherwise, it will be cleared in the state machine if timeout or it will go
+                *    back here until no further INIT to send.
+                * For the cases of unknown type received:
+                *  - We will send NAK and the flag will be cleared in the state machine.
+                */
+               port->vdm_sm_running = true;
                rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
        } else {
                if (port->negotiated_rev >= PD_REV30)
-                       tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
        }
 
        /*
@@ -1833,6 +1851,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
 
        if (rlen > 0)
                tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
+       else
+               port->vdm_sm_running = false;
 }
 
 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
@@ -1898,8 +1918,10 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                 * if there's traffic or we're not in PDO ready state don't send
                 * a VDM.
                 */
-               if (port->state != SRC_READY && port->state != SNK_READY)
+               if (port->state != SRC_READY && port->state != SNK_READY) {
+                       port->vdm_sm_running = false;
                        break;
+               }
 
                /* TODO: AMS operation for Unstructured VDM */
                if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
@@ -2471,10 +2493,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
                                           NONE_AMS);
                break;
        case PD_DATA_VENDOR_DEF:
-               if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
-                       tcpm_handle_vdm_request(port, msg->payload, cnt);
-               else if (port->negotiated_rev > PD_REV20)
-                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+               tcpm_handle_vdm_request(port, msg->payload, cnt);
                break;
        case PD_DATA_BIST:
                port->bist_request = le32_to_cpu(msg->payload[0]);
@@ -2555,10 +2574,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                                                       TYPEC_PWR_MODE_PD,
                                                                       port->pps_data.active,
                                                                       port->supply_voltage);
-                               /* Set VDM running flag ASAP */
-                               if (port->data_role == TYPEC_HOST &&
-                                   port->send_discover)
-                                       port->vdm_sm_running = true;
                                tcpm_set_state(port, SNK_READY, 0);
                        } else {
                                /*
@@ -2596,14 +2611,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                switch (port->state) {
                case SNK_NEGOTIATE_CAPABILITIES:
                        /* USB PD specification, Figure 8-43 */
-                       if (port->explicit_contract) {
+                       if (port->explicit_contract)
                                next_state = SNK_READY;
-                               if (port->data_role == TYPEC_HOST &&
-                                   port->send_discover)
-                                       port->vdm_sm_running = true;
-                       } else {
+                       else
                                next_state = SNK_WAIT_CAPABILITIES;
-                       }
 
                        /* Threshold was relaxed before sending Request. Restore it back. */
                        tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
@@ -2618,10 +2629,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                        port->pps_status = (type == PD_CTRL_WAIT ?
                                            -EAGAIN : -EOPNOTSUPP);
 
-                       if (port->data_role == TYPEC_HOST &&
-                           port->send_discover)
-                               port->vdm_sm_running = true;
-
                        /* Threshold was relaxed before sending Request. Restore it back. */
                        tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
                                                               port->pps_data.active,
@@ -2697,10 +2704,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                        }
                        break;
                case DR_SWAP_SEND:
-                       if (port->data_role == TYPEC_DEVICE &&
-                           port->send_discover)
-                               port->vdm_sm_running = true;
-
                        tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
                        break;
                case PR_SWAP_SEND:
@@ -2738,7 +2741,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->vdm_sm_running) {
+                       if (port->send_discover) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2754,7 +2757,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->vdm_sm_running) {
+                       if (port->send_discover) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2763,7 +2766,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                }
                break;
        case PD_CTRL_VCONN_SWAP:
-               if (port->vdm_sm_running) {
+               if (port->send_discover) {
                        tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                        break;
                }
@@ -4479,18 +4482,20 @@ static void run_state_machine(struct tcpm_port *port)
        /* DR_Swap states */
        case DR_SWAP_SEND:
                tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
+               if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+                       port->send_discover = true;
                tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
                                    PD_T_SENDER_RESPONSE);
                break;
        case DR_SWAP_ACCEPT:
                tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
-               /* Set VDM state machine running flag ASAP */
-               if (port->data_role == TYPEC_DEVICE && port->send_discover)
-                       port->vdm_sm_running = true;
+               if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+                       port->send_discover = true;
                tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
                break;
        case DR_SWAP_SEND_TIMEOUT:
                tcpm_swap_complete(port, -ETIMEDOUT);
+               port->send_discover = false;
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
                break;
@@ -4502,7 +4507,6 @@ static void run_state_machine(struct tcpm_port *port)
                } else {
                        tcpm_set_roles(port, true, port->pwr_role,
                                       TYPEC_HOST);
-                       port->send_discover = true;
                }
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
@@ -4645,8 +4649,6 @@ static void run_state_machine(struct tcpm_port *port)
                break;
        case VCONN_SWAP_SEND_TIMEOUT:
                tcpm_swap_complete(port, -ETIMEDOUT);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case VCONN_SWAP_START:
@@ -4662,14 +4664,10 @@ static void run_state_machine(struct tcpm_port *port)
        case VCONN_SWAP_TURN_ON_VCONN:
                tcpm_set_vconn(port, true);
                tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case VCONN_SWAP_TURN_OFF_VCONN:
                tcpm_set_vconn(port, false);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
 
@@ -4677,8 +4675,6 @@ static void run_state_machine(struct tcpm_port *port)
        case PR_SWAP_CANCEL:
        case VCONN_SWAP_CANCEL:
                tcpm_swap_complete(port, port->swap_status);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                if (port->pwr_role == TYPEC_SOURCE)
                        tcpm_set_state(port, SRC_READY, 0);
                else
@@ -5028,9 +5024,6 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
        switch (port->state) {
        case SNK_TRANSITION_SINK_VBUS:
                port->explicit_contract = true;
-               /* Set the VDM flag ASAP */
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, SNK_READY, 0);
                break;
        case SNK_DISCOVERY:
@@ -5369,7 +5362,7 @@ EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
 void tcpm_sink_frs(struct tcpm_port *port)
 {
        spin_lock(&port->pd_event_lock);
-       port->pd_events = TCPM_FRS_EVENT;
+       port->pd_events |= TCPM_FRS_EVENT;
        spin_unlock(&port->pd_event_lock);
        kthread_queue_work(port->wq, &port->event_work);
 }
@@ -5378,7 +5371,7 @@ EXPORT_SYMBOL_GPL(tcpm_sink_frs);
 void tcpm_sourcing_vbus(struct tcpm_port *port)
 {
        spin_lock(&port->pd_event_lock);
-       port->pd_events = TCPM_SOURCING_VBUS;
+       port->pd_events |= TCPM_SOURCING_VBUS;
        spin_unlock(&port->pd_event_lock);
        kthread_queue_work(port->wq, &port->event_work);
 }
@@ -5425,15 +5418,18 @@ static void tcpm_send_discover_work(struct kthread_work *work)
        if (!port->send_discover)
                goto unlock;
 
+       if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
+               port->send_discover = false;
+               goto unlock;
+       }
+
        /* Retry if the port is not idle */
        if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
                mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
                goto unlock;
        }
 
-       /* Only send the Message if the port is host for PD rev2.0 */
-       if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
-               tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+       tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
 
 unlock:
        mutex_unlock(&port->lock);
index 21b78f1..351c6cf 100644 (file)
@@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
                                    dev, &ifc_vdpa_ops, NULL);
-       if (adapter == NULL) {
+       if (IS_ERR(adapter)) {
                IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
-               return -ENOMEM;
+               return PTR_ERR(adapter);
        }
 
        pci_set_master(pdev);
index dcee603..e59135f 100644 (file)
@@ -512,11 +512,6 @@ out:
        mutex_unlock(&mr->mkey_mtx);
 }
 
-static bool map_empty(struct vhost_iotlb *iotlb)
-{
-       return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
-}
-
 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
                             bool *change_map)
 {
@@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
        int err = 0;
 
        *change_map = false;
-       if (map_empty(iotlb)) {
-               mlx5_vdpa_destroy_mr(mvdev);
-               return 0;
-       }
        mutex_lock(&mr->mkey_mtx);
        if (mr->initialized) {
                mlx5_vdpa_info(mvdev, "memory map update\n");
index 2a31467..3cc12fc 100644 (file)
@@ -526,7 +526,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
        void __iomem *uar_page = ndev->mvdev.res.uar->map;
        u32 out[MLX5_ST_SZ_DW(create_cq_out)];
        struct mlx5_vdpa_cq *vcq = &mvq->cq;
-       unsigned int irqn;
        __be64 *pas;
        int inlen;
        void *cqc;
@@ -566,7 +565,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
        /* Use vector 0 by default. Consider adding code to choose least used
         * vector.
         */
-       err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, 0, &eqn);
        if (err)
                goto err_vec;
 
@@ -753,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
        type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
 
        /* prefer split queue */
-       if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
-               return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+       if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
+               return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
 
-       WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+       WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
 
-       return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+       return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
 }
 
 static bool vq_is_tx(u16 idx)
@@ -2030,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
                return -ENOSPC;
 
        mdev = mgtdev->madev->mdev;
+       if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
+           MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
+               dev_warn(mdev->device, "missing support for split virtqueues\n");
+               return -EOPNOTSUPP;
+       }
+
        /* we save one virtqueue for control virtqueue should we require it */
        max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
        max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
index 14e024d..c621cf7 100644 (file)
@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
 
        vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
                                    dev_attr->name);
-       if (!vdpasim)
+       if (IS_ERR(vdpasim)) {
+               ret = PTR_ERR(vdpasim);
                goto err_alloc;
+       }
 
        vdpasim->dev_attr = *dev_attr;
        INIT_WORK(&vdpasim->work, dev_attr->work_fn);
index 7b4a639..fe05273 100644 (file)
@@ -436,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
                                    dev, &vp_vdpa_ops, NULL);
-       if (vp_vdpa == NULL) {
+       if (IS_ERR(vp_vdpa)) {
                dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
-               return -ENOMEM;
+               return PTR_ERR(vp_vdpa);
        }
 
        mdev = &vp_vdpa->mdev;
index 210ab35..9479f7f 100644 (file)
@@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
        long pinned;
        int ret = 0;
 
-       if (msg->iova < v->range.first ||
+       if (msg->iova < v->range.first || !msg->size ||
+           msg->iova > U64_MAX - msg->size + 1 ||
            msg->iova + msg->size - 1 > v->range.last)
                return -EINVAL;
 
index b9e853e..59edb5a 100644 (file)
@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
                         (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 }
 
+/* Make sure 64 bit math will not overflow. */
 static bool vhost_overflow(u64 uaddr, u64 size)
 {
-       /* Make sure 64 bit math will not overflow. */
-       return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+       if (uaddr > ULONG_MAX || size > ULONG_MAX)
+               return true;
+
+       if (!size)
+               return false;
+
+       return uaddr > ULONG_MAX - size + 1;
 }
 
 /* Caller should have vq mutex and device mutex. */
index 4af8fa2..14e2043 100644 (file)
@@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
                        iov = wiov;
                else {
                        iov = riov;
-                       if (unlikely(wiov && wiov->i)) {
+                       if (unlikely(wiov && wiov->used)) {
                                vringh_bad("Readable desc %p after writable",
                                           &descs[i]);
                                err = -EINVAL;
index 0d002a3..fbc9f10 100644 (file)
@@ -64,6 +64,14 @@ int acrn_vm_destroy(struct acrn_vm *vm)
            test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
                return 0;
 
+       ret = hcall_destroy_vm(vm->vmid);
+       if (ret < 0) {
+               dev_err(acrn_dev.this_device,
+                       "Failed to destroy VM %u\n", vm->vmid);
+               clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
+               return ret;
+       }
+
        /* Remove from global VM list */
        write_lock_bh(&acrn_vm_list_lock);
        list_del_init(&vm->list);
@@ -78,14 +86,6 @@ int acrn_vm_destroy(struct acrn_vm *vm)
                vm->monitor_page = NULL;
        }
 
-       ret = hcall_destroy_vm(vm->vmid);
-       if (ret < 0) {
-               dev_err(acrn_dev.this_device,
-                       "Failed to destroy VM %u\n", vm->vmid);
-               clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
-               return ret;
-       }
-
        acrn_vm_all_ram_unmap(vm);
 
        dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
index 4b15c00..49984d2 100644 (file)
@@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
        virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 
        INIT_LIST_HEAD(&dev->vqs);
+       spin_lock_init(&dev->vqs_list_lock);
 
        /*
         * device_add() causes the bus infrastructure to look for a matching
index 09ed55d..b91bc81 100644 (file)
@@ -1242,12 +1242,19 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
                        do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
                                    VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
                }
+
+               /*
+                * virtio_mem_set_fake_offline() might sleep, we don't need
+                * the device anymore. See virtio_mem_remove() how races
+                * between memory onlining and device removal are handled.
+                */
+               rcu_read_unlock();
+
                if (do_online)
                        generic_online_page(page, order);
                else
                        virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
                                                    false);
-               rcu_read_unlock();
                return;
        }
        rcu_read_unlock();
index 222d630..b35bb2d 100644 (file)
@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
        struct device *dev = get_device(&vp_dev->vdev.dev);
 
+       /*
+        * Device is marked broken on surprise removal so that virtio upper
+        * layers can abort any ongoing operation.
+        */
+       if (!pci_device_is_present(pci_dev))
+               virtio_break_device(&vp_dev->vdev);
+
        pci_disable_sriov(pci_dev);
 
        unregister_virtio_device(&vp_dev->vdev);
index 89bfe46..dd95dfd 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
 #include <xen/xen.h>
 
 #ifdef DEBUG
@@ -1755,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
                        cpu_to_le16(vq->packed.event_flags_shadow);
        }
 
+       spin_lock(&vdev->vqs_list_lock);
        list_add_tail(&vq->vq.list, &vdev->vqs);
+       spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
 err_desc_extra:
@@ -2229,7 +2232,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
        memset(vq->split.desc_state, 0, vring.num *
                        sizeof(struct vring_desc_state_split));
 
+       spin_lock(&vdev->vqs_list_lock);
        list_add_tail(&vq->vq.list, &vdev->vqs);
+       spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
 err_extra:
@@ -2291,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
+       spin_lock(&vq->vq.vdev->vqs_list_lock);
+       list_del(&_vq->list);
+       spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
        if (vq->we_own_ring) {
                if (vq->packed_ring) {
                        vring_free_queue(vq->vq.vdev,
@@ -2321,7 +2330,6 @@ void vring_del_virtqueue(struct virtqueue *_vq)
                kfree(vq->split.desc_state);
                kfree(vq->split.desc_extra);
        }
-       list_del(&_vq->list);
        kfree(vq);
 }
 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2373,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
-       return vq->broken;
+       return READ_ONCE(vq->broken);
 }
 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
 
@@ -2385,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev)
 {
        struct virtqueue *_vq;
 
+       spin_lock(&dev->vqs_list_lock);
        list_for_each_entry(_vq, &dev->vqs, list) {
                struct vring_virtqueue *vq = to_vvq(_vq);
-               vq->broken = true;
+
+               /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+               WRITE_ONCE(vq->broken, true);
        }
+       spin_unlock(&dev->vqs_list_lock);
 }
 EXPORT_SYMBOL_GPL(virtio_break_device);
 
index e1a1411..72eaef2 100644 (file)
@@ -151,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
        if (!name)
                return NULL;
 
+       if (index >= vdpa->nvqs)
+               return ERR_PTR(-ENOENT);
+
        /* Queue shouldn't already be set up. */
        if (ops->get_vq_ready(vdpa, index))
                return ERR_PTR(-ENOENT);
index d7e361f..a78704a 100644 (file)
@@ -198,12 +198,12 @@ static void disable_dynirq(struct irq_data *data);
 
 static DEFINE_PER_CPU(unsigned int, irq_epoch);
 
-static void clear_evtchn_to_irq_row(unsigned row)
+static void clear_evtchn_to_irq_row(int *evtchn_row)
 {
        unsigned col;
 
        for (col = 0; col < EVTCHN_PER_ROW; col++)
-               WRITE_ONCE(evtchn_to_irq[row][col], -1);
+               WRITE_ONCE(evtchn_row[col], -1);
 }
 
 static void clear_evtchn_to_irq_all(void)
@@ -213,7 +213,7 @@ static void clear_evtchn_to_irq_all(void)
        for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
                if (evtchn_to_irq[row] == NULL)
                        continue;
-               clear_evtchn_to_irq_row(row);
+               clear_evtchn_to_irq_row(evtchn_to_irq[row]);
        }
 }
 
@@ -221,6 +221,7 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
 {
        unsigned row;
        unsigned col;
+       int *evtchn_row;
 
        if (evtchn >= xen_evtchn_max_channels())
                return -EINVAL;
@@ -233,11 +234,18 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
                if (irq == -1)
                        return 0;
 
-               evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
-               if (evtchn_to_irq[row] == NULL)
+               evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
+               if (evtchn_row == NULL)
                        return -ENOMEM;
 
-               clear_evtchn_to_irq_row(row);
+               clear_evtchn_to_irq_row(evtchn_row);
+
+               /*
+                * We've prepared an empty row for the mapping. If a different
+                * thread was faster inserting it, we can drop ours.
+                */
+               if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
+                       free_page((unsigned long) evtchn_row);
        }
 
        WRITE_ONCE(evtchn_to_irq[row][col], irq);
@@ -1009,7 +1017,7 @@ static void __unbind_from_irq(unsigned int irq)
 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
                             unsigned pirq, int shareable, char *name)
 {
-       int irq = -1;
+       int irq;
        struct physdev_irq irq_op;
        int ret;
 
index 59c32c9..aab5e65 100644 (file)
@@ -121,10 +121,6 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
                filemap_write_and_wait(inode->i_mapping);
                invalidate_mapping_pages(&inode->i_data, 0, -1);
@@ -312,10 +308,6 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
                 filp, cmd, fl, filp);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
                filemap_write_and_wait(inode->i_mapping);
                invalidate_mapping_pages(&inode->i_data, 0, -1);
@@ -327,7 +319,6 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
                ret = v9fs_file_getlock(filp, fl);
        else
                ret = -EINVAL;
-out_err:
        return ret;
 }
 
@@ -348,10 +339,6 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd,
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
                 filp, cmd, fl, filp);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if (!(fl->fl_flags & FL_FLOCK))
                goto out_err;
 
@@ -625,12 +612,7 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
        p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
 
        inode = file_inode(vma->vm_file);
-
-       if (!mapping_can_writeback(inode->i_mapping))
-               wbc.nr_to_write = 0;
-
-       might_sleep();
-       sync_inode(inode, &wbc);
+       filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
 }
 
 
index a7749c1..b11bd4b 100644 (file)
@@ -101,16 +101,6 @@ config FILE_LOCKING
           for filesystems like NFS and for the flock() system
           call. Disabling this option saves about 11k.
 
-config MANDATORY_FILE_LOCKING
-       bool "Enable Mandatory file locking"
-       depends on FILE_LOCKING
-       default y
-       help
-         This option enables files appropriately marked files on appropriely
-         mounted filesystems to support mandatory locking.
-
-         To the best of my knowledge this is dead code that no one cares about.
-
 source "fs/crypto/Kconfig"
 
 source "fs/verity/Kconfig"
@@ -358,7 +348,15 @@ config NFS_V4_2_SSC_HELPER
 
 source "net/sunrpc/Kconfig"
 source "fs/ceph/Kconfig"
+
 source "fs/cifs/Kconfig"
+source "fs/ksmbd/Kconfig"
+
+config CIFS_COMMON
+       tristate
+       default y if CIFS=y
+       default m if CIFS=m
+
 source "fs/coda/Kconfig"
 source "fs/afs/Kconfig"
 source "fs/9p/Kconfig"
index f98f3e6..354e2ba 100644 (file)
@@ -96,7 +96,9 @@ obj-$(CONFIG_LOCKD)           += lockd/
 obj-$(CONFIG_NLS)              += nls/
 obj-$(CONFIG_UNICODE)          += unicode/
 obj-$(CONFIG_SYSV_FS)          += sysv/
+obj-$(CONFIG_CIFS_COMMON)      += cifs_common/
 obj-$(CONFIG_CIFS)             += cifs/
+obj-$(CONFIG_SMB_SERVER)       += ksmbd/
 obj-$(CONFIG_HPFS_FS)          += hpfs/
 obj-$(CONFIG_NTFS_FS)          += ntfs/
 obj-$(CONFIG_UFS_FS)           += ufs/
index cb3054c..c4210a3 100644 (file)
@@ -772,10 +772,6 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
               fl->fl_type, fl->fl_flags,
               (long long) fl->fl_start, (long long) fl->fl_end);
 
-       /* AFS doesn't support mandatory locks */
-       if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if (IS_GETLK(cmd))
                return afs_do_getlk(file, fl);
 
index 76ce0cc..51b08ab 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1695,7 +1695,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                list_del(&iocb->ki_list);
                iocb->ki_res.res = mangle_poll(mask);
                req->done = true;
-               if (iocb->ki_eventfd && eventfd_signal_count()) {
+               if (iocb->ki_eventfd && eventfd_signal_allowed()) {
                        iocb = NULL;
                        INIT_WORK(&req->work, aio_poll_put_work);
                        schedule_work(&req->work);
index 9ef4f1f..45df6cb 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/uaccess.h>
 #include <linux/suspend.h>
 #include "internal.h"
+#include "../block/blk.h"
 
 struct bdev_inode {
        struct block_device bdev;
@@ -385,7 +386,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
            (bdev_logical_block_size(bdev) - 1))
                return -EINVAL;
 
-       bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
+       bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
 
        dio = container_of(bio, struct blkdev_dio, bio);
        dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -513,7 +514,9 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 
 static __init int blkdev_init(void)
 {
-       return bioset_init(&blkdev_dio_pool, 4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
+       return bioset_init(&blkdev_dio_pool, 4,
+                               offsetof(struct blkdev_dio, bio),
+                               BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
 }
 module_init(blkdev_init);
 
@@ -686,7 +689,8 @@ static loff_t block_llseek(struct file *file, loff_t offset, int whence)
        return retval;
 }
        
-int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
+               int datasync)
 {
        struct inode *bd_inode = bdev_file_inode(filp);
        struct block_device *bdev = I_BDEV(bd_inode);
@@ -707,7 +711,6 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
 
        return error;
 }
-EXPORT_SYMBOL(blkdev_fsync);
 
 /**
  * bdev_read_page() - Start reading a page from a block device
@@ -801,7 +804,6 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
        if (!ei)
                return NULL;
        memset(&ei->bdev, 0, sizeof(ei->bdev));
-       ei->bdev.bd_bdi = &noop_backing_dev_info;
        return &ei->vfs_inode;
 }
 
@@ -812,8 +814,15 @@ static void bdev_free_inode(struct inode *inode)
        free_percpu(bdev->bd_stats);
        kfree(bdev->bd_meta_info);
 
-       if (!bdev_is_partition(bdev))
+       if (!bdev_is_partition(bdev)) {
+               if (bdev->bd_disk && bdev->bd_disk->bdi)
+                       bdi_put(bdev->bd_disk->bdi);
                kfree(bdev->bd_disk);
+       }
+
+       if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
+               blk_free_ext_minor(MINOR(bdev->bd_dev));
+
        kmem_cache_free(bdev_cachep, BDEV_I(inode));
 }
 
@@ -826,16 +835,9 @@ static void init_once(void *data)
 
 static void bdev_evict_inode(struct inode *inode)
 {
-       struct block_device *bdev = &BDEV_I(inode)->bdev;
        truncate_inode_pages_final(&inode->i_data);
        invalidate_inode_buffers(inode); /* is it needed here? */
        clear_inode(inode);
-       /* Detach inode from wb early as bdi_put() may free bdi->wb */
-       inode_detach_wb(inode);
-       if (bdev->bd_bdi != &noop_backing_dev_info) {
-               bdi_put(bdev->bd_bdi);
-               bdev->bd_bdi = &noop_backing_dev_info;
-       }
 }
 
 static const struct super_operations bdev_sops = {
@@ -902,9 +904,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
        bdev->bd_disk = disk;
        bdev->bd_partno = partno;
        bdev->bd_inode = inode;
-#ifdef CONFIG_SYSFS
-       INIT_LIST_HEAD(&bdev->bd_holder_disks);
-#endif
        bdev->bd_stats = alloc_percpu(struct disk_stats);
        if (!bdev->bd_stats) {
                iput(inode);
@@ -921,31 +920,6 @@ void bdev_add(struct block_device *bdev, dev_t dev)
        insert_inode_hash(bdev->bd_inode);
 }
 
-static struct block_device *bdget(dev_t dev)
-{
-       struct inode *inode;
-
-       inode = ilookup(blockdev_superblock, dev);
-       if (!inode)
-               return NULL;
-       return &BDEV_I(inode)->bdev;
-}
-
-/**
- * bdgrab -- Grab a reference to an already referenced block device
- * @bdev:      Block device to grab a reference to.
- *
- * Returns the block_device with an additional reference when successful,
- * or NULL if the inode is already beeing freed.
- */
-struct block_device *bdgrab(struct block_device *bdev)
-{
-       if (!igrab(bdev->bd_inode))
-               return NULL;
-       return bdev;
-}
-EXPORT_SYMBOL(bdgrab);
-
 long nr_blockdev_pages(void)
 {
        struct inode *inode;
@@ -959,12 +933,6 @@ long nr_blockdev_pages(void)
        return ret;
 }
 
-void bdput(struct block_device *bdev)
-{
-       iput(bdev->bd_inode);
-}
-EXPORT_SYMBOL(bdput);
 /**
  * bd_may_claim - test whether a block device can be claimed
  * @bdev: block device of interest
@@ -1094,148 +1062,6 @@ void bd_abort_claiming(struct block_device *bdev, void *holder)
 }
 EXPORT_SYMBOL(bd_abort_claiming);
 
-#ifdef CONFIG_SYSFS
-struct bd_holder_disk {
-       struct list_head        list;
-       struct gendisk          *disk;
-       int                     refcnt;
-};
-
-static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
-                                                 struct gendisk *disk)
-{
-       struct bd_holder_disk *holder;
-
-       list_for_each_entry(holder, &bdev->bd_holder_disks, list)
-               if (holder->disk == disk)
-                       return holder;
-       return NULL;
-}
-
-static int add_symlink(struct kobject *from, struct kobject *to)
-{
-       return sysfs_create_link(from, to, kobject_name(to));
-}
-
-static void del_symlink(struct kobject *from, struct kobject *to)
-{
-       sysfs_remove_link(from, kobject_name(to));
-}
-
-/**
- * bd_link_disk_holder - create symlinks between holding disk and slave bdev
- * @bdev: the claimed slave bdev
- * @disk: the holding disk
- *
- * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
- *
- * This functions creates the following sysfs symlinks.
- *
- * - from "slaves" directory of the holder @disk to the claimed @bdev
- * - from "holders" directory of the @bdev to the holder @disk
- *
- * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
- * passed to bd_link_disk_holder(), then:
- *
- *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
- *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
- *
- * The caller must have claimed @bdev before calling this function and
- * ensure that both @bdev and @disk are valid during the creation and
- * lifetime of these symlinks.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
-{
-       struct bd_holder_disk *holder;
-       int ret = 0;
-
-       mutex_lock(&bdev->bd_disk->open_mutex);
-
-       WARN_ON_ONCE(!bdev->bd_holder);
-
-       /* FIXME: remove the following once add_disk() handles errors */
-       if (WARN_ON(!disk->slave_dir || !bdev->bd_holder_dir))
-               goto out_unlock;
-
-       holder = bd_find_holder_disk(bdev, disk);
-       if (holder) {
-               holder->refcnt++;
-               goto out_unlock;
-       }
-
-       holder = kzalloc(sizeof(*holder), GFP_KERNEL);
-       if (!holder) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       INIT_LIST_HEAD(&holder->list);
-       holder->disk = disk;
-       holder->refcnt = 1;
-
-       ret = add_symlink(disk->slave_dir, bdev_kobj(bdev));
-       if (ret)
-               goto out_free;
-
-       ret = add_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
-       if (ret)
-               goto out_del;
-       /*
-        * bdev could be deleted beneath us which would implicitly destroy
-        * the holder directory.  Hold on to it.
-        */
-       kobject_get(bdev->bd_holder_dir);
-
-       list_add(&holder->list, &bdev->bd_holder_disks);
-       goto out_unlock;
-
-out_del:
-       del_symlink(disk->slave_dir, bdev_kobj(bdev));
-out_free:
-       kfree(holder);
-out_unlock:
-       mutex_unlock(&bdev->bd_disk->open_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(bd_link_disk_holder);
-
-/**
- * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
- * @bdev: the calimed slave bdev
- * @disk: the holding disk
- *
- * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
- *
- * CONTEXT:
- * Might sleep.
- */
-void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
-{
-       struct bd_holder_disk *holder;
-
-       mutex_lock(&bdev->bd_disk->open_mutex);
-
-       holder = bd_find_holder_disk(bdev, disk);
-
-       if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
-               del_symlink(disk->slave_dir, bdev_kobj(bdev));
-               del_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
-               kobject_put(bdev->bd_holder_dir);
-               list_del_init(&holder->list);
-               kfree(holder);
-       }
-
-       mutex_unlock(&bdev->bd_disk->open_mutex);
-}
-EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
-#endif
-
 static void blkdev_flush_mapping(struct block_device *bdev)
 {
        WARN_ON_ONCE(bdev->bd_holders);
@@ -1260,11 +1086,8 @@ static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
                }
        }
 
-       if (!bdev->bd_openers) {
+       if (!bdev->bd_openers)
                set_init_blocksize(bdev);
-               if (bdev->bd_bdi == &noop_backing_dev_info)
-                       bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
-       }
        if (test_bit(GD_NEED_PART_SCAN, &disk->state))
                bdev_disk_changed(disk, false);
        bdev->bd_openers++;
@@ -1282,16 +1105,14 @@ static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
 static int blkdev_get_part(struct block_device *part, fmode_t mode)
 {
        struct gendisk *disk = part->bd_disk;
-       struct block_device *whole;
        int ret;
 
        if (part->bd_openers)
                goto done;
 
-       whole = bdgrab(disk->part0);
-       ret = blkdev_get_whole(whole, mode);
+       ret = blkdev_get_whole(bdev_whole(part), mode);
        if (ret)
-               goto out_put_whole;
+               return ret;
 
        ret = -ENXIO;
        if (!bdev_nr_sectors(part))
@@ -1299,16 +1120,12 @@ static int blkdev_get_part(struct block_device *part, fmode_t mode)
 
        disk->open_partitions++;
        set_init_blocksize(part);
-       if (part->bd_bdi == &noop_backing_dev_info)
-               part->bd_bdi = bdi_get(disk->queue->backing_dev_info);
 done:
        part->bd_openers++;
        return 0;
 
 out_blkdev_put:
-       blkdev_put_whole(whole, mode);
-out_put_whole:
-       bdput(whole);
+       blkdev_put_whole(bdev_whole(part), mode);
        return ret;
 }
 
@@ -1321,42 +1138,42 @@ static void blkdev_put_part(struct block_device *part, fmode_t mode)
        blkdev_flush_mapping(part);
        whole->bd_disk->open_partitions--;
        blkdev_put_whole(whole, mode);
-       bdput(whole);
 }
 
 struct block_device *blkdev_get_no_open(dev_t dev)
 {
        struct block_device *bdev;
-       struct gendisk *disk;
+       struct inode *inode;
 
-       bdev = bdget(dev);
-       if (!bdev) {
+       inode = ilookup(blockdev_superblock, dev);
+       if (!inode) {
                blk_request_module(dev);
-               bdev = bdget(dev);
-               if (!bdev)
+               inode = ilookup(blockdev_superblock, dev);
+               if (!inode)
                        return NULL;
        }
 
-       disk = bdev->bd_disk;
-       if (!kobject_get_unless_zero(&disk_to_dev(disk)->kobj))
-               goto bdput;
-       if ((disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
-               goto put_disk;
-       if (!try_module_get(bdev->bd_disk->fops->owner))
-               goto put_disk;
+       /* switch from the inode reference to a device mode one: */
+       bdev = &BDEV_I(inode)->bdev;
+       if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
+               bdev = NULL;
+       iput(inode);
+
+       if (!bdev)
+               return NULL;
+       if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
+           !try_module_get(bdev->bd_disk->fops->owner)) {
+               put_device(&bdev->bd_device);
+               return NULL;
+       }
+
        return bdev;
-put_disk:
-       put_disk(disk);
-bdput:
-       bdput(bdev);
-       return NULL;
 }
 
 void blkdev_put_no_open(struct block_device *bdev)
 {
        module_put(bdev->bd_disk->fops->owner);
-       put_disk(bdev->bd_disk);
-       bdput(bdev);
+       put_device(&bdev->bd_device);
 }
 
 /**
@@ -1409,7 +1226,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
 
        mutex_lock(&disk->open_mutex);
        ret = -ENXIO;
-       if (!(disk->flags & GENHD_FL_UP))
+       if (!disk_live(disk))
                goto abort_claiming;
        if (bdev_is_partition(bdev))
                ret = blkdev_get_part(bdev, mode);
index cec88a6..3dcf9bc 100644 (file)
@@ -36,6 +36,7 @@ btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
 btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
 btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
 btrfs-$(CONFIG_BLK_DEV_ZONED) += zoned.o
+btrfs-$(CONFIG_FS_VERITY) += verity.o
 
 btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
        tests/extent-buffer-tests.o tests/btrfs-tests.o \
index d95eb5c..c9f9789 100644 (file)
@@ -53,7 +53,8 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
 }
 
 static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
-                        struct inode *inode, struct posix_acl *acl, int type)
+                          struct user_namespace *mnt_userns,
+                          struct inode *inode, struct posix_acl *acl, int type)
 {
        int ret, size = 0;
        const char *name;
@@ -114,12 +115,12 @@ int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
        umode_t old_mode = inode->i_mode;
 
        if (type == ACL_TYPE_ACCESS && acl) {
-               ret = posix_acl_update_mode(&init_user_ns, inode,
+               ret = posix_acl_update_mode(mnt_userns, inode,
                                            &inode->i_mode, &acl);
                if (ret)
                        return ret;
        }
-       ret = __btrfs_set_acl(NULL, inode, acl, type);
+       ret = __btrfs_set_acl(NULL, mnt_userns, inode, acl, type);
        if (ret)
                inode->i_mode = old_mode;
        return ret;
@@ -140,14 +141,14 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
                return ret;
 
        if (default_acl) {
-               ret = __btrfs_set_acl(trans, inode, default_acl,
+               ret = __btrfs_set_acl(trans, &init_user_ns, inode, default_acl,
                                      ACL_TYPE_DEFAULT);
                posix_acl_release(default_acl);
        }
 
        if (acl) {
                if (!ret)
-                       ret = __btrfs_set_acl(trans, inode, acl,
+                       ret = __btrfs_set_acl(trans, &init_user_ns, inode, acl,
                                              ACL_TYPE_ACCESS);
                posix_acl_release(acl);
        }
index 78b202d..f735b87 100644 (file)
@@ -1211,7 +1211,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
 again:
        head = NULL;
 
-       ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
        if (ret < 0)
                goto out;
        BUG_ON(ret == 0);
@@ -1488,14 +1488,14 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                         struct btrfs_fs_info *fs_info, u64 bytenr,
                         u64 time_seq, struct ulist **roots,
-                        bool ignore_offset, bool skip_commit_root_sem)
+                        bool skip_commit_root_sem)
 {
        int ret;
 
        if (!trans && !skip_commit_root_sem)
                down_read(&fs_info->commit_root_sem);
        ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
-                                       time_seq, roots, ignore_offset);
+                                       time_seq, roots, false);
        if (!trans && !skip_commit_root_sem)
                up_read(&fs_info->commit_root_sem);
        return ret;
index ff5f07f..ba45403 100644 (file)
@@ -47,7 +47,7 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
                         const u64 *extent_item_pos, bool ignore_offset);
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                         struct btrfs_fs_info *fs_info, u64 bytenr,
-                        u64 time_seq, struct ulist **roots, bool ignore_offset,
+                        u64 time_seq, struct ulist **roots,
                         bool skip_commit_root_sem);
 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
                        u32 name_len, unsigned long name_off,
index 9e7d9d0..a3b830b 100644 (file)
@@ -1561,7 +1561,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
                                div64_u64(zone_unusable * 100, bg->length));
                trace_btrfs_reclaim_block_group(bg);
                ret = btrfs_relocate_chunk(fs_info, bg->start);
-               if (ret)
+               if (ret && ret != -EAGAIN)
                        btrfs_err(fs_info, "error relocating chunk %llu",
                                  bg->start);
 
@@ -2105,11 +2105,22 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
                bg->used = em->len;
                bg->flags = map->type;
                ret = btrfs_add_block_group_cache(fs_info, bg);
+               /*
+                * We may have some valid block group cache added already, in
+                * that case we skip to the next one.
+                */
+               if (ret == -EEXIST) {
+                       ret = 0;
+                       btrfs_put_block_group(bg);
+                       continue;
+               }
+
                if (ret) {
                        btrfs_remove_free_space_cache(bg);
                        btrfs_put_block_group(bg);
                        break;
                }
+
                btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
                                        0, 0, &space_info);
                bg->space_info = space_info;
@@ -2212,6 +2223,14 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
        ret = check_chunk_block_group_mappings(info);
 error:
        btrfs_free_path(path);
+       /*
+        * We've hit some error while reading the extent tree, and have
+        * rescue=ibadroots mount option.
+        * Try to fill the tree using dummy block groups so that the user can
+        * continue to mount and grab their data.
+        */
+       if (ret && btrfs_test_opt(info, IGNOREBADROOTS))
+               ret = fill_dummy_bgs(info);
        return ret;
 }
 
@@ -2244,6 +2263,95 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
        return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
 }
 
+static int insert_dev_extent(struct btrfs_trans_handle *trans,
+                           struct btrfs_device *device, u64 chunk_offset,
+                           u64 start, u64 num_bytes)
+{
+       struct btrfs_fs_info *fs_info = device->fs_info;
+       struct btrfs_root *root = fs_info->dev_root;
+       struct btrfs_path *path;
+       struct btrfs_dev_extent *extent;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       int ret;
+
+       WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
+       WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       key.objectid = device->devid;
+       key.type = BTRFS_DEV_EXTENT_KEY;
+       key.offset = start;
+       ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
+       if (ret)
+               goto out;
+
+       leaf = path->nodes[0];
+       extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
+       btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
+       btrfs_set_dev_extent_chunk_objectid(leaf, extent,
+                                           BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+       btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
+
+       btrfs_set_dev_extent_length(leaf, extent, num_bytes);
+       btrfs_mark_buffer_dirty(leaf);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+/*
+ * This function belongs to phase 2.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
+static int insert_dev_extents(struct btrfs_trans_handle *trans,
+                                  u64 chunk_offset, u64 chunk_size)
+{
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+       struct btrfs_device *device;
+       struct extent_map *em;
+       struct map_lookup *map;
+       u64 dev_offset;
+       u64 stripe_size;
+       int i;
+       int ret = 0;
+
+       em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
+       if (IS_ERR(em))
+               return PTR_ERR(em);
+
+       map = em->map_lookup;
+       stripe_size = em->orig_block_len;
+
+       /*
+        * Take the device list mutex to prevent races with the final phase of
+        * a device replace operation that replaces the device object associated
+        * with the map's stripes, because the device object's id can change
+        * at any time during that final phase of the device replace operation
+        * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+        * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
+        * resulting in persisting a device extent item with such ID.
+        */
+       mutex_lock(&fs_info->fs_devices->device_list_mutex);
+       for (i = 0; i < map->num_stripes; i++) {
+               device = map->stripes[i].dev;
+               dev_offset = map->stripes[i].physical;
+
+               ret = insert_dev_extent(trans, device, chunk_offset, dev_offset,
+                                      stripe_size);
+               if (ret)
+                       break;
+       }
+       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+       free_extent_map(em);
+       return ret;
+}
+
 /*
  * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
  * chunk allocation.
@@ -2278,8 +2386,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
                        if (ret)
                                btrfs_abort_transaction(trans, ret);
                }
-               ret = btrfs_finish_chunk_alloc(trans, block_group->start,
-                                       block_group->length);
+               ret = insert_dev_extents(trans, block_group->start,
+                                        block_group->length);
                if (ret)
                        btrfs_abort_transaction(trans, ret);
                add_block_group_free_space(trans, block_group);
index c652e19..76ee145 100644 (file)
@@ -51,6 +51,13 @@ enum {
         * the file range, inode's io_tree).
         */
        BTRFS_INODE_NO_DELALLOC_FLUSH,
+       /*
+        * Set when we are working on enabling verity for a file. Computing and
+        * writing the whole Merkle tree can take a while so we want to prevent
+        * races where two separate tasks attempt to simultaneously start verity
+        * on the same file.
+        */
+       BTRFS_INODE_VERITY_IN_PROGRESS,
 };
 
 /* in memory btrfs inode */
@@ -189,8 +196,10 @@ struct btrfs_inode {
         */
        u64 csum_bytes;
 
-       /* flags field from the on disk inode */
+       /* Backwards incompatible flags, lower half of inode_item::flags  */
        u32 flags;
+       /* Read-only compatibility flags, upper half of inode_item::flags */
+       u32 ro_flags;
 
        /*
         * Counters to keep track of the number of extent item's we may use due
@@ -348,6 +357,22 @@ struct btrfs_dio_private {
        u8 csums[];
 };
 
+/*
+ * btrfs_inode_item stores flags in a u64, btrfs_inode stores them in two
+ * separate u32s. These two functions convert between the two representations.
+ */
+static inline u64 btrfs_inode_combine_flags(u32 flags, u32 ro_flags)
+{
+       return (flags | ((u64)ro_flags << 32));
+}
+
+static inline void btrfs_inode_split_flags(u64 inode_item_flags,
+                                          u32 *flags, u32 *ro_flags)
+{
+       *flags = (u32)inode_item_flags;
+       *ro_flags = (u32)(inode_item_flags >> 32);
+}
+
 /* Array of bytes with variable length, hexadecimal format 0x1234 */
 #define CSUM_FMT                               "0x%*phN"
 #define CSUM_FMT_VALUE(size, bytes)            size, bytes
index 1695086..8681608 100644 (file)
@@ -243,47 +243,6 @@ struct btrfsic_state {
        u32 datablock_size;
 };
 
-static void btrfsic_block_init(struct btrfsic_block *b);
-static struct btrfsic_block *btrfsic_block_alloc(void);
-static void btrfsic_block_free(struct btrfsic_block *b);
-static void btrfsic_block_link_init(struct btrfsic_block_link *n);
-static struct btrfsic_block_link *btrfsic_block_link_alloc(void);
-static void btrfsic_block_link_free(struct btrfsic_block_link *n);
-static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds);
-static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void);
-static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds);
-static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h);
-static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
-                                       struct btrfsic_block_hashtable *h);
-static void btrfsic_block_hashtable_remove(struct btrfsic_block *b);
-static struct btrfsic_block *btrfsic_block_hashtable_lookup(
-               struct block_device *bdev,
-               u64 dev_bytenr,
-               struct btrfsic_block_hashtable *h);
-static void btrfsic_block_link_hashtable_init(
-               struct btrfsic_block_link_hashtable *h);
-static void btrfsic_block_link_hashtable_add(
-               struct btrfsic_block_link *l,
-               struct btrfsic_block_link_hashtable *h);
-static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l);
-static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
-               struct block_device *bdev_ref_to,
-               u64 dev_bytenr_ref_to,
-               struct block_device *bdev_ref_from,
-               u64 dev_bytenr_ref_from,
-               struct btrfsic_block_link_hashtable *h);
-static void btrfsic_dev_state_hashtable_init(
-               struct btrfsic_dev_state_hashtable *h);
-static void btrfsic_dev_state_hashtable_add(
-               struct btrfsic_dev_state *ds,
-               struct btrfsic_dev_state_hashtable *h);
-static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds);
-static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev,
-               struct btrfsic_dev_state_hashtable *h);
-static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void);
-static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf);
-static int btrfsic_process_superblock(struct btrfsic_state *state,
-                                     struct btrfs_fs_devices *fs_devices);
 static int btrfsic_process_metablock(struct btrfsic_state *state,
                                     struct btrfsic_block *block,
                                     struct btrfsic_block_data_ctx *block_ctx,
@@ -313,14 +272,6 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
 static int btrfsic_read_block(struct btrfsic_state *state,
                              struct btrfsic_block_data_ctx *block_ctx);
-static void btrfsic_dump_database(struct btrfsic_state *state);
-static int btrfsic_test_for_metadata(struct btrfsic_state *state,
-                                    char **datav, unsigned int num_pages);
-static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
-                                         u64 dev_bytenr, char **mapped_datav,
-                                         unsigned int num_pages,
-                                         struct bio *bio, int *bio_is_patched,
-                                         int submit_bio_bh_rw);
 static int btrfsic_process_written_superblock(
                struct btrfsic_state *state,
                struct btrfsic_block *const block,
@@ -1558,10 +1509,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
                /* Pages must be unmapped in reverse order */
                while (num_pages > 0) {
                        num_pages--;
-                       if (block_ctx->datav[num_pages]) {
-                               kunmap_local(block_ctx->datav[num_pages]);
+                       if (block_ctx->datav[num_pages])
                                block_ctx->datav[num_pages] = NULL;
-                       }
                        if (block_ctx->pagev[num_pages]) {
                                __free_page(block_ctx->pagev[num_pages]);
                                block_ctx->pagev[num_pages] = NULL;
@@ -1638,7 +1587,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                i = j;
        }
        for (i = 0; i < num_pages; i++)
-               block_ctx->datav[i] = kmap_local_page(block_ctx->pagev[i]);
+               block_ctx->datav[i] = page_address(block_ctx->pagev[i]);
 
        return block_ctx->len;
 }
@@ -2703,7 +2652,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
 
                bio_for_each_segment(bvec, bio, iter) {
                        BUG_ON(bvec.bv_len != PAGE_SIZE);
-                       mapped_datav[i] = kmap_local_page(bvec.bv_page);
+                       mapped_datav[i] = page_address(bvec.bv_page);
                        i++;
 
                        if (dev_state->state->print_mask &
@@ -2716,9 +2665,6 @@ static void __btrfsic_submit_bio(struct bio *bio)
                                              mapped_datav, segs,
                                              bio, &bio_is_patched,
                                              bio->bi_opf);
-               /* Unmap in reverse order */
-               for (--i; i >= 0; i--)
-                       kunmap_local(mapped_datav[i]);
                kfree(mapped_datav);
        } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
                if (dev_state->state->print_mask &
index 30d82cd..7869ad1 100644 (file)
@@ -172,10 +172,9 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
                /* Hash through the page sector by sector */
                for (pg_offset = 0; pg_offset < bytes_left;
                     pg_offset += sectorsize) {
-                       kaddr = kmap_atomic(page);
+                       kaddr = page_address(page);
                        crypto_shash_digest(shash, kaddr + pg_offset,
                                            sectorsize, csum);
-                       kunmap_atomic(kaddr);
 
                        if (memcmp(&csum, cb_sum, csum_size) != 0) {
                                btrfs_print_data_csum_error(inode, disk_start,
@@ -565,6 +564,16 @@ static noinline int add_ra_bio_pages(struct inode *inode,
        if (isize == 0)
                return 0;
 
+       /*
+        * For current subpage support, we only support 64K page size,
+        * which means maximum compressed extent size (128K) is just 2x page
+        * size.
+        * This makes readahead less effective, so here disable readahead for
+        * subpage for now, until full compressed write is supported.
+        */
+       if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
+               return 0;
+
        end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
        while (last_offset < compressed_end) {
@@ -673,6 +682,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        struct page *page;
        struct bio *comp_bio;
        u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
+       u64 file_offset;
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
@@ -682,15 +692,17 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        em_tree = &BTRFS_I(inode)->extent_tree;
 
+       file_offset = bio_first_bvec_all(bio)->bv_offset +
+                     page_offset(bio_first_page_all(bio));
+
        /* we need the actual starting offset of this extent in the file */
        read_lock(&em_tree->lock);
-       em = lookup_extent_mapping(em_tree,
-                                  page_offset(bio_first_page_all(bio)),
-                                  fs_info->sectorsize);
+       em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
        read_unlock(&em_tree->lock);
        if (!em)
                return BLK_STS_IOERR;
 
+       ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
        compressed_len = em->block_len;
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
        if (!cb)
@@ -721,8 +733,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                goto fail1;
 
        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
-               cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
-                                                             __GFP_HIGHMEM);
+               cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
                if (!cb->compressed_pages[pg_index]) {
                        faili = pg_index - 1;
                        ret = BLK_STS_RESOURCE;
@@ -1261,96 +1272,82 @@ void __cold btrfs_exit_compress(void)
 }
 
 /*
- * Copy uncompressed data from working buffer to pages.
+ * Copy decompressed data from working buffer to pages.
+ *
+ * @buf:               The decompressed data buffer
+ * @buf_len:           The decompressed data length
+ * @decompressed:      Number of bytes that are already decompressed inside the
+ *                     compressed extent
+ * @cb:                        The compressed extent descriptor
+ * @orig_bio:          The original bio that the caller wants to read for
+ *
+ * An easier to understand graph is like below:
+ *
+ *             |<- orig_bio ->|     |<- orig_bio->|
+ *     |<-------      full decompressed extent      ----->|
+ *     |<-----------    @cb range   ---->|
+ *     |                       |<-- @buf_len -->|
+ *     |<--- @decompressed --->|
+ *
+ * Note that, @cb can be a subpage of the full decompressed extent, but
+ * @cb->start always has the same as the orig_file_offset value of the full
+ * decompressed extent.
  *
- * buf_start is the byte offset we're of the start of our workspace buffer.
+ * When reading compressed extent, we have to read the full compressed extent,
+ * while @orig_bio may only want part of the range.
+ * Thus this function will ensure only data covered by @orig_bio will be copied
+ * to.
  *
- * total_out is the last byte of the buffer
+ * Return 0 if we have copied all needed contents for @orig_bio.
+ * Return >0 if we need continue decompress.
  */
-int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
-                             unsigned long total_out, u64 disk_start,
-                             struct bio *bio)
+int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
+                             struct compressed_bio *cb, u32 decompressed)
 {
-       unsigned long buf_offset;
-       unsigned long current_buf_start;
-       unsigned long start_byte;
-       unsigned long prev_start_byte;
-       unsigned long working_bytes = total_out - buf_start;
-       unsigned long bytes;
-       struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
-
-       /*
-        * start byte is the first byte of the page we're currently
-        * copying into relative to the start of the compressed data.
-        */
-       start_byte = page_offset(bvec.bv_page) - disk_start;
-
-       /* we haven't yet hit data corresponding to this page */
-       if (total_out <= start_byte)
-               return 1;
-
-       /*
-        * the start of the data we care about is offset into
-        * the middle of our working buffer
-        */
-       if (total_out > start_byte && buf_start < start_byte) {
-               buf_offset = start_byte - buf_start;
-               working_bytes -= buf_offset;
-       } else {
-               buf_offset = 0;
-       }
-       current_buf_start = buf_start;
-
-       /* copy bytes from the working buffer into the pages */
-       while (working_bytes > 0) {
-               bytes = min_t(unsigned long, bvec.bv_len,
-                               PAGE_SIZE - (buf_offset % PAGE_SIZE));
-               bytes = min(bytes, working_bytes);
-
-               memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
-                              bytes);
-               flush_dcache_page(bvec.bv_page);
+       struct bio *orig_bio = cb->orig_bio;
+       /* Offset inside the full decompressed extent */
+       u32 cur_offset;
+
+       cur_offset = decompressed;
+       /* The main loop to do the copy */
+       while (cur_offset < decompressed + buf_len) {
+               struct bio_vec bvec;
+               size_t copy_len;
+               u32 copy_start;
+               /* Offset inside the full decompressed extent */
+               u32 bvec_offset;
+
+               bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
+               /*
+                * cb->start may underflow, but subtracting that value can still
+                * give us correct offset inside the full decompressed extent.
+                */
+               bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
 
-               buf_offset += bytes;
-               working_bytes -= bytes;
-               current_buf_start += bytes;
+               /* Haven't reached the bvec range, exit */
+               if (decompressed + buf_len <= bvec_offset)
+                       return 1;
 
-               /* check if we need to pick another page */
-               bio_advance(bio, bytes);
-               if (!bio->bi_iter.bi_size)
-                       return 0;
-               bvec = bio_iter_iovec(bio, bio->bi_iter);
-               prev_start_byte = start_byte;
-               start_byte = page_offset(bvec.bv_page) - disk_start;
+               copy_start = max(cur_offset, bvec_offset);
+               copy_len = min(bvec_offset + bvec.bv_len,
+                              decompressed + buf_len) - copy_start;
+               ASSERT(copy_len);
 
                /*
-                * We need to make sure we're only adjusting
-                * our offset into compression working buffer when
-                * we're switching pages.  Otherwise we can incorrectly
-                * keep copying when we were actually done.
+                * Extra range check to ensure we didn't go beyond
+                * @buf + @buf_len.
                 */
-               if (start_byte != prev_start_byte) {
-                       /*
-                        * make sure our new page is covered by this
-                        * working buffer
-                        */
-                       if (total_out <= start_byte)
-                               return 1;
+               ASSERT(copy_start - decompressed < buf_len);
+               memcpy_to_page(bvec.bv_page, bvec.bv_offset,
+                              buf + copy_start - decompressed, copy_len);
+               flush_dcache_page(bvec.bv_page);
+               cur_offset += copy_len;
 
-                       /*
-                        * the next page in the biovec might not be adjacent
-                        * to the last page, but it might still be found
-                        * inside this working buffer. bump our offset pointer
-                        */
-                       if (total_out > start_byte &&
-                           current_buf_start < start_byte) {
-                               buf_offset = start_byte - buf_start;
-                               working_bytes = total_out - start_byte;
-                               current_buf_start = buf_start + buf_offset;
-                       }
-               }
+               bio_advance(orig_bio, copy_len);
+               /* Finished the bio */
+               if (!orig_bio->bi_iter.bi_size)
+                       return 0;
        }
-
        return 1;
 }
 
index c359f20..399be0b 100644 (file)
@@ -86,9 +86,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
                         unsigned long *total_out);
 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
                     unsigned long start_byte, size_t srclen, size_t destlen);
-int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
-                             unsigned long total_out, u64 disk_start,
-                             struct bio *bio);
+int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
+                             struct compressed_bio *cb, u32 decompressed);
 
 blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                                  unsigned int len, u64 disk_start,
index c5c08c8..84627cb 100644 (file)
@@ -726,21 +726,21 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 
 /*
  * search for key in the extent_buffer.  The items start at offset p,
- * and they are item_size apart.  There are 'max' items in p.
+ * and they are item_size apart.
  *
  * the slot in the array is returned via slot, and it points to
  * the place where you would insert key if it is not found in
  * the array.
  *
- * slot may point to max if the key is bigger than all of the keys
+ * Slot may point to total number of items if the key is bigger than
+ * all of the keys
  */
 static noinline int generic_bin_search(struct extent_buffer *eb,
                                       unsigned long p, int item_size,
-                                      const struct btrfs_key *key,
-                                      int max, int *slot)
+                                      const struct btrfs_key *key, int *slot)
 {
        int low = 0;
-       int high = max;
+       int high = btrfs_header_nritems(eb);
        int ret;
        const int key_size = sizeof(struct btrfs_disk_key);
 
@@ -799,15 +799,11 @@ int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
        if (btrfs_header_level(eb) == 0)
                return generic_bin_search(eb,
                                          offsetof(struct btrfs_leaf, items),
-                                         sizeof(struct btrfs_item),
-                                         key, btrfs_header_nritems(eb),
-                                         slot);
+                                         sizeof(struct btrfs_item), key, slot);
        else
                return generic_bin_search(eb,
                                          offsetof(struct btrfs_node, ptrs),
-                                         sizeof(struct btrfs_key_ptr),
-                                         key, btrfs_header_nritems(eb),
-                                         slot);
+                                         sizeof(struct btrfs_key_ptr), key, slot);
 }
 
 static void root_add_used(struct btrfs_root *root, u32 size)
@@ -1237,7 +1233,6 @@ static void reada_for_search(struct btrfs_fs_info *fs_info,
        u64 target;
        u64 nread = 0;
        u64 nread_max;
-       struct extent_buffer *eb;
        u32 nr;
        u32 blocksize;
        u32 nscan = 0;
@@ -1266,10 +1261,14 @@ static void reada_for_search(struct btrfs_fs_info *fs_info,
 
        search = btrfs_node_blockptr(node, slot);
        blocksize = fs_info->nodesize;
-       eb = find_extent_buffer(fs_info, search);
-       if (eb) {
-               free_extent_buffer(eb);
-               return;
+       if (path->reada != READA_FORWARD_ALWAYS) {
+               struct extent_buffer *eb;
+
+               eb = find_extent_buffer(fs_info, search);
+               if (eb) {
+                       free_extent_buffer(eb);
+                       return;
+               }
        }
 
        target = search;
@@ -2102,6 +2101,27 @@ again:
        return 0;
 }
 
+/*
+ * Execute search and call btrfs_previous_item to traverse backwards if the item
+ * was not found.
+ *
+ * Return 0 if found, 1 if not found and < 0 if error.
+ */
+int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
+                          struct btrfs_path *path)
+{
+       int ret;
+
+       ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+       if (ret > 0)
+               ret = btrfs_previous_item(root, path, key->objectid, key->type);
+
+       if (ret == 0)
+               btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
+
+       return ret;
+}
+
 /*
  * adjust the pointers going up the tree, starting at level
  * making sure the right key of each node is points to 'key'.
@@ -4358,16 +4378,6 @@ next:
        return 1;
 }
 
-/*
- * search the tree again to find a leaf with greater keys
- * returns 0 if it found something or 1 if there are no greater leaves.
- * returns < 0 on io errors.
- */
-int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
-{
-       return btrfs_next_old_leaf(root, path, 0);
-}
-
 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
                        u64 time_seq)
 {
index e5e53e5..f07c82f 100644 (file)
@@ -281,7 +281,8 @@ struct btrfs_super_block {
 
 #define BTRFS_FEATURE_COMPAT_RO_SUPP                   \
        (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE |      \
-        BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID)
+        BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
+        BTRFS_FEATURE_COMPAT_RO_VERITY)
 
 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET       0ULL
 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR     0ULL
@@ -1012,8 +1013,6 @@ struct btrfs_fs_info {
                u64 zoned;
        };
 
-       /* Max size to emit ZONE_APPEND write command */
-       u64 max_zone_append_size;
        struct mutex zoned_meta_io_lock;
        spinlock_t treelog_bg_lock;
        u64 treelog_bg;
@@ -1484,20 +1483,20 @@ do {                                                                   \
 /*
  * Inode flags
  */
-#define BTRFS_INODE_NODATASUM          (1 << 0)
-#define BTRFS_INODE_NODATACOW          (1 << 1)
-#define BTRFS_INODE_READONLY           (1 << 2)
-#define BTRFS_INODE_NOCOMPRESS         (1 << 3)
-#define BTRFS_INODE_PREALLOC           (1 << 4)
-#define BTRFS_INODE_SYNC               (1 << 5)
-#define BTRFS_INODE_IMMUTABLE          (1 << 6)
-#define BTRFS_INODE_APPEND             (1 << 7)
-#define BTRFS_INODE_NODUMP             (1 << 8)
-#define BTRFS_INODE_NOATIME            (1 << 9)
-#define BTRFS_INODE_DIRSYNC            (1 << 10)
-#define BTRFS_INODE_COMPRESS           (1 << 11)
-
-#define BTRFS_INODE_ROOT_ITEM_INIT     (1 << 31)
+#define BTRFS_INODE_NODATASUM          (1U << 0)
+#define BTRFS_INODE_NODATACOW          (1U << 1)
+#define BTRFS_INODE_READONLY           (1U << 2)
+#define BTRFS_INODE_NOCOMPRESS         (1U << 3)
+#define BTRFS_INODE_PREALLOC           (1U << 4)
+#define BTRFS_INODE_SYNC               (1U << 5)
+#define BTRFS_INODE_IMMUTABLE          (1U << 6)
+#define BTRFS_INODE_APPEND             (1U << 7)
+#define BTRFS_INODE_NODUMP             (1U << 8)
+#define BTRFS_INODE_NOATIME            (1U << 9)
+#define BTRFS_INODE_DIRSYNC            (1U << 10)
+#define BTRFS_INODE_COMPRESS           (1U << 11)
+
+#define BTRFS_INODE_ROOT_ITEM_INIT     (1U << 31)
 
 #define BTRFS_INODE_FLAG_MASK                                          \
        (BTRFS_INODE_NODATASUM |                                        \
@@ -1514,6 +1513,10 @@ do {                                                                   \
         BTRFS_INODE_COMPRESS |                                         \
         BTRFS_INODE_ROOT_ITEM_INIT)
 
+#define BTRFS_INODE_RO_VERITY          (1U << 0)
+
+#define BTRFS_INODE_RO_FLAG_MASK       (BTRFS_INODE_RO_VERITY)
+
 struct btrfs_map_token {
        struct extent_buffer *eb;
        char *kaddr;
@@ -2781,10 +2784,11 @@ enum btrfs_flush_state {
        FLUSH_DELAYED_REFS      =       4,
        FLUSH_DELALLOC          =       5,
        FLUSH_DELALLOC_WAIT     =       6,
-       ALLOC_CHUNK             =       7,
-       ALLOC_CHUNK_FORCE       =       8,
-       RUN_DELAYED_IPUTS       =       9,
-       COMMIT_TRANS            =       10,
+       FLUSH_DELALLOC_FULL     =       7,
+       ALLOC_CHUNK             =       8,
+       ALLOC_CHUNK_FORCE       =       9,
+       RUN_DELAYED_IPUTS       =       10,
+       COMMIT_TRANS            =       11,
 };
 
 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
@@ -2901,10 +2905,13 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
        return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1);
 }
 
-int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
                        u64 time_seq);
+
+int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
+                          struct btrfs_path *path);
+
 static inline int btrfs_next_old_item(struct btrfs_root *root,
                                      struct btrfs_path *p, u64 time_seq)
 {
@@ -2913,6 +2920,18 @@ static inline int btrfs_next_old_item(struct btrfs_root *root,
                return btrfs_next_old_leaf(root, p, time_seq);
        return 0;
 }
+
+/*
+ * Search the tree again to find a leaf with greater keys.
+ *
+ * Returns 0 if it found something or 1 if there are no greater leaves.
+ * Returns < 0 on error.
+ */
+static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
+{
+       return btrfs_next_old_leaf(root, path, 0);
+}
+
 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
 {
        return btrfs_next_old_item(root, p, 0);
@@ -3145,7 +3164,8 @@ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
                              struct extent_state **cached_state);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
                             struct btrfs_root *new_root,
-                            struct btrfs_root *parent_root);
+                            struct btrfs_root *parent_root,
+                            struct user_namespace *mnt_userns);
  void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
                               unsigned *bits);
 void btrfs_clear_delalloc_extent(struct inode *inode,
@@ -3194,10 +3214,10 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
                u64 start, u64 end, int *page_started, unsigned long *nr_written,
                struct writeback_control *wbc);
-int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end);
+int btrfs_writepage_cow_fixup(struct page *page);
 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
                                          struct page *page, u64 start,
-                                         u64 end, int uptodate);
+                                         u64 end, bool uptodate);
 extern const struct dentry_operations btrfs_dentry_operations;
 extern const struct iomap_ops btrfs_dio_iomap_ops;
 extern const struct iomap_dio_ops btrfs_dio_ops;
@@ -3779,6 +3799,30 @@ static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
        return signal_pending(current);
 }
 
+/* verity.c */
+#ifdef CONFIG_FS_VERITY
+
+extern const struct fsverity_operations btrfs_verityops;
+int btrfs_drop_verity_items(struct btrfs_inode *inode);
+
+BTRFS_SETGET_FUNCS(verity_descriptor_encryption, struct btrfs_verity_descriptor_item,
+                  encryption, 8);
+BTRFS_SETGET_FUNCS(verity_descriptor_size, struct btrfs_verity_descriptor_item,
+                  size, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_verity_descriptor_encryption,
+                        struct btrfs_verity_descriptor_item, encryption, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_verity_descriptor_size,
+                        struct btrfs_verity_descriptor_item, size, 64);
+
+#else
+
+static inline int btrfs_drop_verity_items(struct btrfs_inode *inode)
+{
+       return 0;
+}
+
+#endif
+
 /* Sanity test specific functions */
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 void btrfs_test_destroy_inode(struct inode *inode);
index 257c1e1..1e08eb2 100644 (file)
@@ -6,7 +6,6 @@
 
 #include <linux/slab.h>
 #include <linux/iversion.h>
-#include <linux/sched/mm.h>
 #include "misc.h"
 #include "delayed-inode.h"
 #include "disk-io.h"
@@ -672,176 +671,119 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 }
 
 /*
- * This helper will insert some continuous items into the same leaf according
- * to the free space of the leaf.
+ * Insert a single delayed item or a batch of delayed items that have consecutive
+ * keys if they exist.
  */
-static int btrfs_batch_insert_items(struct btrfs_root *root,
-                                   struct btrfs_path *path,
-                                   struct btrfs_delayed_item *item)
+static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
+                                    struct btrfs_root *root,
+                                    struct btrfs_path *path,
+                                    struct btrfs_delayed_item *first_item)
 {
-       struct btrfs_delayed_item *curr, *next;
-       int free_space;
-       int total_size = 0;
-       struct extent_buffer *leaf;
-       char *data_ptr;
-       struct btrfs_key *keys;
-       u32 *data_size;
-       struct list_head head;
-       int slot;
+       LIST_HEAD(batch);
+       struct btrfs_delayed_item *curr;
+       struct btrfs_delayed_item *next;
+       const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info);
+       int total_size;
        int nitems;
-       int i;
-       int ret = 0;
-
-       BUG_ON(!path->nodes[0]);
+       char *ins_data = NULL;
+       struct btrfs_key *ins_keys;
+       u32 *ins_sizes;
+       int ret;
 
-       leaf = path->nodes[0];
-       free_space = btrfs_leaf_free_space(leaf);
-       INIT_LIST_HEAD(&head);
+       list_add_tail(&first_item->tree_list, &batch);
+       nitems = 1;
+       total_size = first_item->data_len + sizeof(struct btrfs_item);
+       curr = first_item;
 
-       next = item;
-       nitems = 0;
+       while (true) {
+               int next_size;
 
-       /*
-        * count the number of the continuous items that we can insert in batch
-        */
-       while (total_size + next->data_len + sizeof(struct btrfs_item) <=
-              free_space) {
-               total_size += next->data_len + sizeof(struct btrfs_item);
-               list_add_tail(&next->tree_list, &head);
-               nitems++;
-
-               curr = next;
                next = __btrfs_next_delayed_item(curr);
-               if (!next)
+               if (!next || !btrfs_is_continuous_delayed_item(curr, next))
                        break;
 
-               if (!btrfs_is_continuous_delayed_item(curr, next))
+               next_size = next->data_len + sizeof(struct btrfs_item);
+               if (total_size + next_size > max_size)
                        break;
-       }
 
-       if (!nitems) {
-               ret = 0;
-               goto out;
+               list_add_tail(&next->tree_list, &batch);
+               nitems++;
+               total_size += next_size;
+               curr = next;
        }
 
-       keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
-       if (!keys) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (nitems == 1) {
+               ins_keys = &first_item->key;
+               ins_sizes = &first_item->data_len;
+       } else {
+               int i = 0;
 
-       data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
-       if (!data_size) {
-               ret = -ENOMEM;
-               goto error;
+               ins_data = kmalloc(nitems * sizeof(u32) +
+                                  nitems * sizeof(struct btrfs_key), GFP_NOFS);
+               if (!ins_data) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               ins_sizes = (u32 *)ins_data;
+               ins_keys = (struct btrfs_key *)(ins_data + nitems * sizeof(u32));
+               list_for_each_entry(curr, &batch, tree_list) {
+                       ins_keys[i] = curr->key;
+                       ins_sizes[i] = curr->data_len;
+                       i++;
+               }
        }
 
-       /* get keys of all the delayed items */
-       i = 0;
-       list_for_each_entry(next, &head, tree_list) {
-               keys[i] = next->key;
-               data_size[i] = next->data_len;
-               i++;
-       }
+       ret = btrfs_insert_empty_items(trans, root, path, ins_keys, ins_sizes,
+                                      nitems);
+       if (ret)
+               goto out;
 
-       /* insert the keys of the items */
-       setup_items_for_insert(root, path, keys, data_size, nitems);
+       list_for_each_entry(curr, &batch, tree_list) {
+               char *data_ptr;
 
-       /* insert the dir index items */
-       slot = path->slots[0];
-       list_for_each_entry_safe(curr, next, &head, tree_list) {
-               data_ptr = btrfs_item_ptr(leaf, slot, char);
-               write_extent_buffer(leaf, &curr->data,
-                                   (unsigned long)data_ptr,
-                                   curr->data_len);
-               slot++;
+               data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
+               write_extent_buffer(path->nodes[0], &curr->data,
+                                   (unsigned long)data_ptr, curr->data_len);
+               path->slots[0]++;
+       }
 
-               btrfs_delayed_item_release_metadata(root, curr);
+       /*
+        * Now release our path before releasing the delayed items and their
+        * metadata reservations, so that we don't block other tasks for more
+        * time than needed.
+        */
+       btrfs_release_path(path);
 
+       list_for_each_entry_safe(curr, next, &batch, tree_list) {
                list_del(&curr->tree_list);
+               btrfs_delayed_item_release_metadata(root, curr);
                btrfs_release_delayed_item(curr);
        }
-
-error:
-       kfree(data_size);
-       kfree(keys);
 out:
+       kfree(ins_data);
        return ret;
 }
 
-/*
- * This helper can just do simple insertion that needn't extend item for new
- * data, such as directory name index insertion, inode insertion.
- */
-static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
-                                    struct btrfs_root *root,
-                                    struct btrfs_path *path,
-                                    struct btrfs_delayed_item *delayed_item)
-{
-       struct extent_buffer *leaf;
-       unsigned int nofs_flag;
-       char *ptr;
-       int ret;
-
-       nofs_flag = memalloc_nofs_save();
-       ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
-                                     delayed_item->data_len);
-       memalloc_nofs_restore(nofs_flag);
-       if (ret < 0 && ret != -EEXIST)
-               return ret;
-
-       leaf = path->nodes[0];
-
-       ptr = btrfs_item_ptr(leaf, path->slots[0], char);
-
-       write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
-                           delayed_item->data_len);
-       btrfs_mark_buffer_dirty(leaf);
-
-       btrfs_delayed_item_release_metadata(root, delayed_item);
-       return 0;
-}
-
-/*
- * we insert an item first, then if there are some continuous items, we try
- * to insert those items into the same leaf.
- */
 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
                                      struct btrfs_path *path,
                                      struct btrfs_root *root,
                                      struct btrfs_delayed_node *node)
 {
-       struct btrfs_delayed_item *curr, *prev;
        int ret = 0;
 
-do_again:
-       mutex_lock(&node->mutex);
-       curr = __btrfs_first_delayed_insertion_item(node);
-       if (!curr)
-               goto insert_end;
-
-       ret = btrfs_insert_delayed_item(trans, root, path, curr);
-       if (ret < 0) {
-               btrfs_release_path(path);
-               goto insert_end;
-       }
+       while (ret == 0) {
+               struct btrfs_delayed_item *curr;
 
-       prev = curr;
-       curr = __btrfs_next_delayed_item(prev);
-       if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
-               /* insert the continuous items into the same leaf */
-               path->slots[0]++;
-               btrfs_batch_insert_items(root, path, curr);
+               mutex_lock(&node->mutex);
+               curr = __btrfs_first_delayed_insertion_item(node);
+               if (!curr) {
+                       mutex_unlock(&node->mutex);
+                       break;
+               }
+               ret = btrfs_insert_delayed_item(trans, root, path, curr);
+               mutex_unlock(&node->mutex);
        }
-       btrfs_release_delayed_item(prev);
-       btrfs_mark_buffer_dirty(path->nodes[0]);
 
-       btrfs_release_path(path);
-       mutex_unlock(&node->mutex);
-       goto do_again;
-
-insert_end:
-       mutex_unlock(&node->mutex);
        return ret;
 }
 
@@ -914,7 +856,6 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
                                      struct btrfs_delayed_node *node)
 {
        struct btrfs_delayed_item *curr, *prev;
-       unsigned int nofs_flag;
        int ret = 0;
 
 do_again:
@@ -923,9 +864,7 @@ do_again:
        if (!curr)
                goto delete_fail;
 
-       nofs_flag = memalloc_nofs_save();
        ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
-       memalloc_nofs_restore(nofs_flag);
        if (ret < 0)
                goto delete_fail;
        else if (ret > 0) {
@@ -994,7 +933,6 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
        struct btrfs_key key;
        struct btrfs_inode_item *inode_item;
        struct extent_buffer *leaf;
-       unsigned int nofs_flag;
        int mod;
        int ret;
 
@@ -1007,9 +945,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
        else
                mod = 1;
 
-       nofs_flag = memalloc_nofs_save();
        ret = btrfs_lookup_inode(trans, root, path, &key, mod);
-       memalloc_nofs_restore(nofs_flag);
        if (ret > 0)
                ret = -ENOENT;
        if (ret < 0)
@@ -1066,9 +1002,7 @@ search:
        key.type = BTRFS_INODE_EXTREF_KEY;
        key.offset = -1;
 
-       nofs_flag = memalloc_nofs_save();
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-       memalloc_nofs_restore(nofs_flag);
        if (ret < 0)
                goto err_out;
        ASSERT(ret);
@@ -1711,6 +1645,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
                                  struct btrfs_inode_item *inode_item,
                                  struct inode *inode)
 {
+       u64 flags;
+
        btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
        btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
@@ -1723,7 +1659,9 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
                                       inode_peek_iversion(inode));
        btrfs_set_stack_inode_transid(inode_item, trans->transid);
        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
-       btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
+       flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
+                                         BTRFS_I(inode)->ro_flags);
+       btrfs_set_stack_inode_flags(inode_item, flags);
        btrfs_set_stack_inode_block_group(inode_item, 0);
 
        btrfs_set_stack_timespec_sec(&inode_item->atime,
@@ -1781,7 +1719,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
                                   btrfs_stack_inode_sequence(inode_item));
        inode->i_rdev = 0;
        *rdev = btrfs_stack_inode_rdev(inode_item);
-       BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
+       btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
+                               &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
 
        inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
        inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
index 98b63eb..f1274d5 100644 (file)
@@ -170,6 +170,25 @@ out_free:
        return 0;
 }
 
+static struct btrfs_dir_item *btrfs_lookup_match_dir(
+                       struct btrfs_trans_handle *trans,
+                       struct btrfs_root *root, struct btrfs_path *path,
+                       struct btrfs_key *key, const char *name,
+                       int name_len, int mod)
+{
+       const int ins_len = (mod < 0 ? -1 : 0);
+       const int cow = (mod != 0);
+       int ret;
+
+       ret = btrfs_search_slot(trans, root, key, path, ins_len, cow);
+       if (ret < 0)
+               return ERR_PTR(ret);
+       if (ret > 0)
+               return ERR_PTR(-ENOENT);
+
+       return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+}
+
 /*
  * lookup a directory item based on name.  'dir' is the objectid
  * we're searching in, and 'mod' tells us if you plan on deleting the
@@ -181,23 +200,18 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
                                             const char *name, int name_len,
                                             int mod)
 {
-       int ret;
        struct btrfs_key key;
-       int ins_len = mod < 0 ? -1 : 0;
-       int cow = mod != 0;
+       struct btrfs_dir_item *di;
 
        key.objectid = dir;
        key.type = BTRFS_DIR_ITEM_KEY;
-
        key.offset = btrfs_name_hash(name, name_len);
 
-       ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
-       if (ret < 0)
-               return ERR_PTR(ret);
-       if (ret > 0)
+       di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
                return NULL;
 
-       return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+       return di;
 }
 
 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
@@ -211,7 +225,6 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
        int slot;
        struct btrfs_path *path;
 
-
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -220,20 +233,20 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
        key.type = BTRFS_DIR_ITEM_KEY;
        key.offset = btrfs_name_hash(name, name_len);
 
-       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-
-       /* return back any errors */
-       if (ret < 0)
-               goto out;
+       di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0);
+       if (IS_ERR(di)) {
+               ret = PTR_ERR(di);
+               /* Nothing found, we're safe */
+               if (ret == -ENOENT) {
+                       ret = 0;
+                       goto out;
+               }
 
-       /* nothing found, we're safe */
-       if (ret > 0) {
-               ret = 0;
-               goto out;
+               if (ret < 0)
+                       goto out;
        }
 
        /* we found an item, look for our name in the item */
-       di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
        if (di) {
                /* our exact name was found */
                ret = -EEXIST;
@@ -274,21 +287,13 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
                            u64 objectid, const char *name, int name_len,
                            int mod)
 {
-       int ret;
        struct btrfs_key key;
-       int ins_len = mod < 0 ? -1 : 0;
-       int cow = mod != 0;
 
        key.objectid = dir;
        key.type = BTRFS_DIR_INDEX_KEY;
        key.offset = objectid;
 
-       ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
-       if (ret < 0)
-               return ERR_PTR(ret);
-       if (ret > 0)
-               return ERR_PTR(-ENOENT);
-       return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+       return btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
 }
 
 struct btrfs_dir_item *
@@ -345,21 +350,18 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
                                          const char *name, u16 name_len,
                                          int mod)
 {
-       int ret;
        struct btrfs_key key;
-       int ins_len = mod < 0 ? -1 : 0;
-       int cow = mod != 0;
+       struct btrfs_dir_item *di;
 
        key.objectid = dir;
        key.type = BTRFS_XATTR_ITEM_KEY;
        key.offset = btrfs_name_hash(name, name_len);
-       ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
-       if (ret < 0)
-               return ERR_PTR(ret);
-       if (ret > 0)
+
+       di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
                return NULL;
 
-       return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+       return di;
 }
 
 /*
index a59ab7b..2f9515d 100644 (file)
@@ -3392,11 +3392,16 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
                goto fail_alloc;
        }
 
-       /* For 4K sector size support, it's only read-only */
-       if (PAGE_SIZE == SZ_64K && sectorsize == SZ_4K) {
-               if (!sb_rdonly(sb) || btrfs_super_log_root(disk_super)) {
+       if (sectorsize != PAGE_SIZE) {
+               btrfs_warn(fs_info,
+               "read-write for sector size %u with page size %lu is experimental",
+                          sectorsize, PAGE_SIZE);
+       }
+       if (sectorsize != PAGE_SIZE) {
+               if (btrfs_super_incompat_flags(fs_info->super_copy) &
+                       BTRFS_FEATURE_INCOMPAT_RAID56) {
                        btrfs_err(fs_info,
-       "subpage sectorsize %u only supported read-only for page size %lu",
+               "RAID56 is not yet supported for sector size %u with page size %lu",
                                sectorsize, PAGE_SIZE);
                        err = -EINVAL;
                        goto fail_alloc;
index 268ce58..fc3da75 100644 (file)
@@ -153,7 +153,7 @@ search_again:
        else
                key.type = BTRFS_EXTENT_ITEM_KEY;
 
-       ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
        if (ret < 0)
                goto out_free;
 
@@ -5950,9 +5950,9 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
  */
 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
 {
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
        struct btrfs_block_group *cache = NULL;
        struct btrfs_device *device;
-       struct list_head *devices;
        u64 group_trimmed;
        u64 range_end = U64_MAX;
        u64 start;
@@ -6016,9 +6016,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
                btrfs_warn(fs_info,
                        "failed to trim %llu block group(s), last error %d",
                        bg_failed, bg_ret);
-       mutex_lock(&fs_info->fs_devices->device_list_mutex);
-       devices = &fs_info->fs_devices->devices;
-       list_for_each_entry(device, devices, dev_list) {
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
                if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
                        continue;
 
@@ -6031,7 +6031,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
 
                trimmed += group_trimmed;
        }
-       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+       mutex_unlock(&fs_devices->device_list_mutex);
 
        if (dev_failed)
                btrfs_warn(fs_info,
index 9e81d25..aaddd72 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/pagevec.h>
 #include <linux/prefetch.h>
 #include <linux/cleancache.h>
+#include <linux/fsverity.h>
 #include "misc.h"
 #include "extent_io.h"
 #include "extent-io-tree.h"
@@ -172,6 +173,8 @@ int __must_check submit_one_bio(struct bio *bio, int mirror_num,
 
        bio->bi_private = NULL;
 
+       /* Caller should ensure the bio has at least some range added */
+       ASSERT(bio->bi_iter.bi_size);
        if (is_data_inode(tree->private_data))
                ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
                                            bio_flags);
@@ -2245,18 +2248,6 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
        return bitset;
 }
 
-/*
- * helper function to set a given page up to date if all the
- * extents in the tree for that page are up to date
- */
-static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
-{
-       u64 start = page_offset(page);
-       u64 end = start + PAGE_SIZE - 1;
-       if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
-               SetPageUptodate(page);
-}
-
 int free_io_failure(struct extent_io_tree *failure_tree,
                    struct extent_io_tree *io_tree,
                    struct io_failure_record *rec)
@@ -2688,7 +2679,15 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
               start + len <= page_offset(page) + PAGE_SIZE);
 
        if (uptodate) {
-               btrfs_page_set_uptodate(fs_info, page, start, len);
+               if (fsverity_active(page->mapping->host) &&
+                   !PageError(page) &&
+                   !PageUptodate(page) &&
+                   start < i_size_read(page->mapping->host) &&
+                   !fsverity_verify_page(page)) {
+                       btrfs_page_set_error(fs_info, page, start, len);
+               } else {
+                       btrfs_page_set_uptodate(fs_info, page, start, len);
+               }
        } else {
                btrfs_page_clear_uptodate(fs_info, page, start, len);
                btrfs_page_set_error(fs_info, page, start, len);
@@ -2779,7 +2778,7 @@ next:
 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
 {
        struct btrfs_inode *inode;
-       int uptodate = (err == 0);
+       const bool uptodate = (err == 0);
        int ret = 0;
 
        ASSERT(page && page->mapping);
@@ -2787,8 +2786,14 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
        btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
 
        if (!uptodate) {
-               ClearPageUptodate(page);
-               SetPageError(page);
+               const struct btrfs_fs_info *fs_info = inode->root->fs_info;
+               u32 len;
+
+               ASSERT(end + 1 - start <= U32_MAX);
+               len = end + 1 - start;
+
+               btrfs_page_clear_uptodate(fs_info, page, start, len);
+               btrfs_page_set_error(fs_info, page, start, len);
                ret = err < 0 ? err : -EIO;
                mapping_set_error(page->mapping, ret);
        }
@@ -3097,7 +3102,7 @@ readpage_ok:
                /* Update page status and unlock */
                end_page_read(page, uptodate, start, len);
                endio_readpage_release_extent(&processed, BTRFS_I(inode),
-                                             start, end, uptodate);
+                                             start, end, PageUptodate(page));
        }
        /* Release the last extent */
        endio_readpage_release_extent(&processed, NULL, 0, 0, false);
@@ -3153,11 +3158,13 @@ struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
        return bio;
 }
 
-struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
+struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
 {
        struct bio *bio;
        struct btrfs_io_bio *btrfs_bio;
 
+       ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
+
        /* this will never fail when it's backed by a bioset */
        bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
        ASSERT(bio);
@@ -3181,20 +3188,22 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
  * @size:      portion of page that we want to write
  * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
  * @bio_flags: flags of the current bio to see if we can merge them
- * @return:    true if page was added, false otherwise
  *
  * Attempt to add a page to bio considering stripe alignment etc.
  *
- * Return true if successfully page added. Otherwise, return false.
+ * Return >= 0 for the number of bytes added to the bio.
+ * Can return 0 if the current bio is already at stripe/zone boundary.
+ * Return <0 for error.
  */
-static bool btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
-                              struct page *page,
-                              u64 disk_bytenr, unsigned int size,
-                              unsigned int pg_offset,
-                              unsigned long bio_flags)
+static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
+                             struct page *page,
+                             u64 disk_bytenr, unsigned int size,
+                             unsigned int pg_offset,
+                             unsigned long bio_flags)
 {
        struct bio *bio = bio_ctrl->bio;
        u32 bio_size = bio->bi_iter.bi_size;
+       u32 real_size;
        const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
        bool contig;
        int ret;
@@ -3203,29 +3212,36 @@ static bool btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
        /* The limit should be calculated when bio_ctrl->bio is allocated */
        ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
        if (bio_ctrl->bio_flags != bio_flags)
-               return false;
+               return 0;
 
        if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
                contig = bio->bi_iter.bi_sector == sector;
        else
                contig = bio_end_sector(bio) == sector;
        if (!contig)
-               return false;
+               return 0;
 
-       if (bio_size + size > bio_ctrl->len_to_oe_boundary ||
-           bio_size + size > bio_ctrl->len_to_stripe_boundary)
-               return false;
+       real_size = min(bio_ctrl->len_to_oe_boundary,
+                       bio_ctrl->len_to_stripe_boundary) - bio_size;
+       real_size = min(real_size, size);
+
+       /*
+        * If real_size is 0, never call bio_add_*_page(), as even size is 0,
+        * bio will still execute its endio function on the page!
+        */
+       if (real_size == 0)
+               return 0;
 
        if (bio_op(bio) == REQ_OP_ZONE_APPEND)
-               ret = bio_add_zone_append_page(bio, page, size, pg_offset);
+               ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
        else
-               ret = bio_add_page(bio, page, size, pg_offset);
+               ret = bio_add_page(bio, page, real_size, pg_offset);
 
-       return ret == size;
+       return ret;
 }
 
 static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
-                              struct btrfs_inode *inode)
+                              struct btrfs_inode *inode, u64 file_offset)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_io_geometry geom;
@@ -3266,9 +3282,8 @@ static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
                return 0;
        }
 
-       ASSERT(fs_info->max_zone_append_size > 0);
        /* Ordered extent not yet created, so we're good */
-       ordered = btrfs_lookup_ordered_extent(inode, logical);
+       ordered = btrfs_lookup_ordered_extent(inode, file_offset);
        if (!ordered) {
                bio_ctrl->len_to_oe_boundary = U32_MAX;
                return 0;
@@ -3280,6 +3295,62 @@ static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
        return 0;
 }
 
+static int alloc_new_bio(struct btrfs_inode *inode,
+                        struct btrfs_bio_ctrl *bio_ctrl,
+                        struct writeback_control *wbc,
+                        unsigned int opf,
+                        bio_end_io_t end_io_func,
+                        u64 disk_bytenr, u32 offset, u64 file_offset,
+                        unsigned long bio_flags)
+{
+       struct btrfs_fs_info *fs_info = inode->root->fs_info;
+       struct bio *bio;
+       int ret;
+
+       /*
+        * For compressed page range, its disk_bytenr is always @disk_bytenr
+        * passed in, no matter if we have added any range into previous bio.
+        */
+       if (bio_flags & EXTENT_BIO_COMPRESSED)
+               bio = btrfs_bio_alloc(disk_bytenr);
+       else
+               bio = btrfs_bio_alloc(disk_bytenr + offset);
+       bio_ctrl->bio = bio;
+       bio_ctrl->bio_flags = bio_flags;
+       bio->bi_end_io = end_io_func;
+       bio->bi_private = &inode->io_tree;
+       bio->bi_write_hint = inode->vfs_inode.i_write_hint;
+       bio->bi_opf = opf;
+       ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
+       if (ret < 0)
+               goto error;
+       if (wbc) {
+               struct block_device *bdev;
+
+               bdev = fs_info->fs_devices->latest_bdev;
+               bio_set_dev(bio, bdev);
+               wbc_init_bio(wbc, bio);
+       }
+       if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
+               struct btrfs_device *device;
+
+               device = btrfs_zoned_get_device(fs_info, disk_bytenr,
+                                               fs_info->sectorsize);
+               if (IS_ERR(device)) {
+                       ret = PTR_ERR(device);
+                       goto error;
+               }
+
+               btrfs_io_bio(bio)->device = device;
+       }
+       return 0;
+error:
+       bio_ctrl->bio = NULL;
+       bio->bi_status = errno_to_blk_status(ret);
+       bio_endio(bio);
+       return ret;
+}
+
 /*
  * @opf:       bio REQ_OP_* and REQ_* flags as one value
  * @wbc:       optional writeback control for io accounting
@@ -3305,61 +3376,67 @@ static int submit_extent_page(unsigned int opf,
                              bool force_bio_submit)
 {
        int ret = 0;
-       struct bio *bio;
-       size_t io_size = min_t(size_t, size, PAGE_SIZE);
        struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
-       struct extent_io_tree *tree = &inode->io_tree;
-       struct btrfs_fs_info *fs_info = inode->root->fs_info;
+       unsigned int cur = pg_offset;
 
        ASSERT(bio_ctrl);
 
        ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
               pg_offset + size <= PAGE_SIZE);
-       if (bio_ctrl->bio) {
-               bio = bio_ctrl->bio;
-               if (force_bio_submit ||
-                   !btrfs_bio_add_page(bio_ctrl, page, disk_bytenr, io_size,
-                                       pg_offset, bio_flags)) {
-                       ret = submit_one_bio(bio, mirror_num, bio_ctrl->bio_flags);
+       if (force_bio_submit && bio_ctrl->bio) {
+               ret = submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
+               bio_ctrl->bio = NULL;
+               if (ret < 0)
+                       return ret;
+       }
+
+       while (cur < pg_offset + size) {
+               u32 offset = cur - pg_offset;
+               int added;
+
+               /* Allocate new bio if needed */
+               if (!bio_ctrl->bio) {
+                       ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
+                                           end_io_func, disk_bytenr, offset,
+                                           page_offset(page) + cur,
+                                           bio_flags);
+                       if (ret < 0)
+                               return ret;
+               }
+               /*
+                * We must go through btrfs_bio_add_page() to ensure each
+                * page range won't cross various boundaries.
+                */
+               if (bio_flags & EXTENT_BIO_COMPRESSED)
+                       added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
+                                       size - offset, pg_offset + offset,
+                                       bio_flags);
+               else
+                       added = btrfs_bio_add_page(bio_ctrl, page,
+                                       disk_bytenr + offset, size - offset,
+                                       pg_offset + offset, bio_flags);
+
+               /* Metadata page range should never be split */
+               if (!is_data_inode(&inode->vfs_inode))
+                       ASSERT(added == 0 || added == size - offset);
+
+               /* At least we added some page, update the account */
+               if (wbc && added)
+                       wbc_account_cgroup_owner(wbc, page, added);
+
+               /* We have reached boundary, submit right now */
+               if (added < size - offset) {
+                       /* The bio should contain some page(s) */
+                       ASSERT(bio_ctrl->bio->bi_iter.bi_size);
+                       ret = submit_one_bio(bio_ctrl->bio, mirror_num,
+                                       bio_ctrl->bio_flags);
                        bio_ctrl->bio = NULL;
                        if (ret < 0)
                                return ret;
-               } else {
-                       if (wbc)
-                               wbc_account_cgroup_owner(wbc, page, io_size);
-                       return 0;
                }
+               cur += added;
        }
-
-       bio = btrfs_bio_alloc(disk_bytenr);
-       bio_add_page(bio, page, io_size, pg_offset);
-       bio->bi_end_io = end_io_func;
-       bio->bi_private = tree;
-       bio->bi_write_hint = page->mapping->host->i_write_hint;
-       bio->bi_opf = opf;
-       if (wbc) {
-               struct block_device *bdev;
-
-               bdev = fs_info->fs_devices->latest_bdev;
-               bio_set_dev(bio, bdev);
-               wbc_init_bio(wbc, bio);
-               wbc_account_cgroup_owner(wbc, page, io_size);
-       }
-       if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
-               struct btrfs_device *device;
-
-               device = btrfs_zoned_get_device(fs_info, disk_bytenr, io_size);
-               if (IS_ERR(device))
-                       return PTR_ERR(device);
-
-               btrfs_io_bio(bio)->device = device;
-       }
-
-       bio_ctrl->bio = bio;
-       bio_ctrl->bio_flags = bio_flags;
-       ret = calc_bio_boundaries(bio_ctrl, inode);
-
-       return ret;
+       return 0;
 }
 
 static int attach_extent_buffer_page(struct extent_buffer *eb,
@@ -3488,7 +3565,6 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
        size_t pg_offset = 0;
        size_t iosize;
        size_t blocksize = inode->i_sb->s_blocksize;
-       unsigned long this_bio_flag = 0;
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 
        ret = set_page_extent_mapped(page);
@@ -3519,6 +3595,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
        }
        begin_page_read(fs_info, page);
        while (cur <= end) {
+               unsigned long this_bio_flag = 0;
                bool force_bio_submit = false;
                u64 disk_bytenr;
 
@@ -3627,7 +3704,6 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
                /* the get_extent function already copied into the page */
                if (test_range_bit(tree, cur, cur_end,
                                   EXTENT_UPTODATE, 1, NULL)) {
-                       check_page_uptodate(tree, page);
                        unlock_extent(tree, cur, cur + iosize - 1);
                        end_page_read(page, true, cur, iosize);
                        cur = cur + iosize;
@@ -3722,14 +3798,9 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
                ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
                                delalloc_end, &page_started, nr_written, wbc);
                if (ret) {
-                       SetPageError(page);
-                       /*
-                        * btrfs_run_delalloc_range should return < 0 for error
-                        * but just in case, we use > 0 here meaning the IO is
-                        * started, so we don't want to return > 0 unless
-                        * things are going well.
-                        */
-                       return ret < 0 ? ret : -EIO;
+                       btrfs_page_set_error(inode->root->fs_info, page,
+                                            page_offset(page), PAGE_SIZE);
+                       return ret;
                }
                /*
                 * delalloc_end is already one less than the total length, so
@@ -3829,9 +3900,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
                                 int *nr_ret)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
-       u64 start = page_offset(page);
-       u64 end = start + PAGE_SIZE - 1;
-       u64 cur = start;
+       u64 cur = page_offset(page);
+       u64 end = cur + PAGE_SIZE - 1;
        u64 extent_offset;
        u64 block_start;
        struct extent_map *em;
@@ -3841,7 +3911,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
        const unsigned int write_flags = wbc_to_write_flags(wbc);
        bool compressed;
 
-       ret = btrfs_writepage_cow_fixup(page, start, end);
+       ret = btrfs_writepage_cow_fixup(page);
        if (ret) {
                /* Fixup worker will requeue */
                redirty_page_for_writepage(wbc, page);
@@ -3865,7 +3935,16 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
 
                if (cur >= i_size) {
                        btrfs_writepage_endio_finish_ordered(inode, page, cur,
-                                                            end, 1);
+                                                            end, true);
+                       /*
+                        * This range is beyond i_size, thus we don't need to
+                        * bother writing back.
+                        * But we still need to clear the dirty subpage bit, or
+                        * the next time the page gets dirtied, we will try to
+                        * writeback the sectors with subpage dirty bits,
+                        * causing writeback without ordered extent.
+                        */
+                       btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
                        break;
                }
 
@@ -3915,7 +3994,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
                                nr++;
                        else
                                btrfs_writepage_endio_finish_ordered(inode,
-                                               page, cur, cur + iosize - 1, 1);
+                                               page, cur, cur + iosize - 1, true);
+                       btrfs_page_clear_dirty(fs_info, page, cur, iosize);
                        cur += iosize;
                        continue;
                }
@@ -3951,6 +4031,12 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
                cur += iosize;
                nr++;
        }
+       /*
+        * If we finish without problem, we should not only clear page dirty,
+        * but also empty subpage dirty bits
+        */
+       if (!ret)
+               btrfs_page_assert_not_dirty(fs_info, page);
        *nr_ret = nr;
        return ret;
 }
@@ -3981,7 +4067,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
 
        WARN_ON(!PageLocked(page));
 
-       ClearPageError(page);
+       btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
+                              page_offset(page), PAGE_SIZE);
 
        pg_offset = offset_in_page(i_size);
        if (page->index > end_index ||
@@ -4022,10 +4109,39 @@ done:
                set_page_writeback(page);
                end_page_writeback(page);
        }
-       if (PageError(page)) {
-               ret = ret < 0 ? ret : -EIO;
+       /*
+        * Here we used to have a check for PageError() and then set @ret and
+        * call end_extent_writepage().
+        *
+        * But in fact setting @ret here will cause different error paths
+        * between subpage and regular sectorsize.
+        *
+        * For regular page size, we never submit current page, but only add
+        * current page to current bio.
+        * The bio submission can only happen in next page.
+        * Thus if we hit the PageError() branch, @ret is already set to
+        * non-zero value and will not get updated for regular sectorsize.
+        *
+        * But for subpage case, it's possible we submit part of current page,
+        * thus can get PageError() set by submitted bio of the same page,
+        * while our @ret is still 0.
+        *
+        * So here we unify the behavior and don't set @ret.
+        * Error can still be properly passed to higher layer as page will
+        * be set error, here we just don't handle the IO failure.
+        *
+        * NOTE: This is just a hotfix for subpage.
+        * The root fix will be properly ending ordered extent when we hit
+        * an error during writeback.
+        *
+        * But that needs a bigger refactoring, as we not only need to grab the
+        * submitted OE, but also need to know exactly at which bytenr we hit
+        * the error.
+        * Currently the full page based __extent_writepage_io() is not
+        * capable of that.
+        */
+       if (PageError(page))
                end_extent_writepage(page, ret, start, page_end);
-       }
        unlock_page(page);
        ASSERT(ret <= 0);
        return ret;
@@ -4984,7 +5100,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
                        ret = __extent_writepage(page, &wbc_writepages, &epd);
                else {
                        btrfs_writepage_endio_finish_ordered(BTRFS_I(inode),
-                                       page, start, start + PAGE_SIZE - 1, 1);
+                                       page, start, start + PAGE_SIZE - 1, true);
                        unlock_page(page);
                }
                put_page(page);
index 62027f5..53abdc2 100644 (file)
@@ -280,7 +280,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 struct bio *btrfs_bio_alloc(u64 first_byte);
 struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
 struct bio *btrfs_bio_clone(struct bio *bio);
-struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
+struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
 
 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
                      u64 length, u64 logical, struct page *page,
index df6631e..2673c6b 100644 (file)
@@ -233,7 +233,6 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
                             struct btrfs_path *path, u64 objectid,
                             u64 offset, int mod)
 {
-       int ret;
        struct btrfs_key file_key;
        int ins_len = mod < 0 ? -1 : 0;
        int cow = mod != 0;
@@ -241,8 +240,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
        file_key.objectid = objectid;
        file_key.offset = offset;
        file_key.type = BTRFS_EXTENT_DATA_KEY;
-       ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
-       return ret;
+
+       return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
 }
 
 /*
index ee34497..7ff5770 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/btrfs.h>
 #include <linux/uio.h>
 #include <linux/iversion.h>
+#include <linux/fsverity.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -1340,7 +1341,18 @@ static int prepare_uptodate_page(struct inode *inode,
                        unlock_page(page);
                        return -EIO;
                }
-               if (page->mapping != inode->i_mapping) {
+
+               /*
+                * Since btrfs_readpage() will unlock the page before it
+                * returns, there is a window where btrfs_releasepage() can be
+                * called to release the page.  Here we check both inode
+                * mapping and PagePrivate() to make sure the page was not
+                * released.
+                *
+                * The private flag check is essential for subpage as we need
+                * to store extra bitmap using page->private.
+                */
+               if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
                        unlock_page(page);
                        return -EAGAIN;
                }
@@ -3604,7 +3616,13 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
 
 static int btrfs_file_open(struct inode *inode, struct file *filp)
 {
+       int ret;
+
        filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+
+       ret = fsverity_file_open(inode, filp);
+       if (ret)
+               return ret;
        return generic_file_open(inode, filp);
 }
 
@@ -3633,6 +3651,9 @@ static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
        struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t ret;
 
+       if (fsverity_active(inode))
+               return 0;
+
        if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
                return 0;
 
index 2131ae5..da0eee7 100644 (file)
@@ -344,19 +344,13 @@ fail:
 
 static void readahead_cache(struct inode *inode)
 {
-       struct file_ra_state *ra;
+       struct file_ra_state ra;
        unsigned long last_index;
 
-       ra = kzalloc(sizeof(*ra), GFP_NOFS);
-       if (!ra)
-               return;
-
-       file_ra_state_init(ra, inode->i_mapping);
+       file_ra_state_init(&ra, inode->i_mapping);
        last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
-       page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
-
-       kfree(ra);
+       page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index);
 }
 
 static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
@@ -2544,6 +2538,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        u64 offset = bytenr - block_group->start;
        u64 to_free, to_unusable;
+       const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
 
        spin_lock(&ctl->tree_lock);
        if (!used)
@@ -2573,9 +2568,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
        /* All the region is now unusable. Mark it as unused and reclaim */
        if (block_group->zone_unusable == block_group->length) {
                btrfs_mark_bg_unused(block_group);
-       } else if (block_group->zone_unusable >=
-                  div_factor_fine(block_group->length,
-                                  fs_info->bg_reclaim_threshold)) {
+       } else if (bg_reclaim_threshold &&
+                  block_group->zone_unusable >=
+                  div_factor_fine(block_group->length, bg_reclaim_threshold)) {
                btrfs_mark_bg_to_reclaim(block_group);
        }
 
@@ -2652,8 +2647,11 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group,
                 * btrfs_pin_extent_for_log_replay() when replaying the log.
                 * Advance the pointer not to overwrite the tree-log nodes.
                 */
-               if (block_group->alloc_offset < offset + bytes)
-                       block_group->alloc_offset = offset + bytes;
+               if (block_group->start + block_group->alloc_offset <
+                   offset + bytes) {
+                       block_group->alloc_offset =
+                               offset + bytes - block_group->start;
+               }
                return 0;
        }
 
index 0117d86..487533c 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/sched/mm.h>
 #include <linux/iomap.h>
 #include <asm/unaligned.h>
+#include <linux/fsverity.h>
 #include "misc.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -286,9 +287,8 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
                        cur_size = min_t(unsigned long, compressed_size,
                                       PAGE_SIZE);
 
-                       kaddr = kmap_atomic(cpage);
+                       kaddr = page_address(cpage);
                        write_extent_buffer(leaf, kaddr, ptr, cur_size);
-                       kunmap_atomic(kaddr);
 
                        i++;
                        ptr += cur_size;
@@ -490,6 +490,9 @@ static noinline int add_async_extent(struct async_chunk *cow,
  */
 static inline bool inode_can_compress(struct btrfs_inode *inode)
 {
+       /* Subpage doesn't support compression yet */
+       if (inode->root->fs_info->sectorsize < PAGE_SIZE)
+               return false;
        if (inode->flags & BTRFS_INODE_NODATACOW ||
            inode->flags & BTRFS_INODE_NODATASUM)
                return false;
@@ -629,7 +632,7 @@ again:
         * inode has not been flagged as nocompress.  This flag can
         * change at any time if we discover bad compression ratios.
         */
-       if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
+       if (inode_need_compress(BTRFS_I(inode), start, end)) {
                WARN_ON(pages);
                pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
                if (!pages) {
@@ -682,7 +685,11 @@ again:
                }
        }
 cont:
-       if (start == 0) {
+       /*
+        * Check cow_file_range() for why we don't even try to create inline
+        * extent for subpage case.
+        */
+       if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
                /* lets try to make an inline extent */
                if (ret || total_in < actual_end) {
                        /* we didn't compress the entire range, try
@@ -973,7 +980,7 @@ retry:
 
                        p->mapping = inode->vfs_inode.i_mapping;
                        btrfs_writepage_endio_finish_ordered(inode, p, start,
-                                                            end, 0);
+                                                            end, false);
 
                        p->mapping = NULL;
                        extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
@@ -1080,7 +1087,17 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
 
        inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
 
-       if (start == 0) {
+       /*
+        * Due to the page size limit, for subpage we can only trigger the
+        * writeback for the dirty sectors of page, that means data writeback
+        * is doing more writeback than what we want.
+        *
+        * This is especially unexpected for some call sites like fallocate,
+        * where we only increase i_size after everything is done.
+        * This means we can trigger inline extent even if we didn't want to.
+        * So here we skip inline extent creation completely.
+        */
+       if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
                /* lets try to make an inline extent */
                ret = cow_file_range_inline(inode, start, end, 0,
                                            BTRFS_COMPRESS_NONE, NULL);
@@ -1290,11 +1307,6 @@ static noinline void async_cow_submit(struct btrfs_work *work)
        nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
                PAGE_SHIFT;
 
-       /* atomic_sub_return implies a barrier */
-       if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
-           5 * SZ_1M)
-               cond_wake_up_nomb(&fs_info->async_submit_wait);
-
        /*
         * ->inode could be NULL if async_chunk_start has failed to compress,
         * in which case we don't have anything to submit, yet we need to
@@ -1303,6 +1315,11 @@ static noinline void async_cow_submit(struct btrfs_work *work)
         */
        if (async_chunk->inode)
                submit_compressed_extents(async_chunk);
+
+       /* atomic_sub_return implies a barrier */
+       if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
+           5 * SZ_1M)
+               cond_wake_up_nomb(&fs_info->async_submit_wait);
 }
 
 static noinline void async_cow_free(struct btrfs_work *work)
@@ -1946,6 +1963,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
                ret = cow_file_range_async(inode, wbc, locked_page, start, end,
                                           page_started, nr_written);
        }
+       ASSERT(ret <= 0);
        if (ret)
                btrfs_cleanup_ordered_extents(inode, locked_page, start,
                                              end - start + 1);
@@ -2285,7 +2303,6 @@ static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
        struct extent_map *split_mid = NULL;
        struct extent_map *split_post = NULL;
        int ret = 0;
-       int modified;
        unsigned long flags;
 
        /* Sanity check */
@@ -2315,11 +2332,12 @@ static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
        ASSERT(em->len == len);
        ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
        ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
+       ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags));
+       ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags));
+       ASSERT(!list_empty(&em->list));
 
        flags = em->flags;
        clear_bit(EXTENT_FLAG_PINNED, &em->flags);
-       clear_bit(EXTENT_FLAG_LOGGING, &flags);
-       modified = !list_empty(&em->list);
 
        /* First, replace the em with a new extent_map starting from * em->start */
        split_pre->start = em->start;
@@ -2333,7 +2351,7 @@ static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
        split_pre->compress_type = em->compress_type;
        split_pre->generation = em->generation;
 
-       replace_extent_mapping(em_tree, em, split_pre, modified);
+       replace_extent_mapping(em_tree, em, split_pre, 1);
 
        /*
         * Now we only have an extent_map at:
@@ -2353,7 +2371,7 @@ static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
                split_mid->flags = flags;
                split_mid->compress_type = em->compress_type;
                split_mid->generation = em->generation;
-               add_extent_mapping(em_tree, split_mid, modified);
+               add_extent_mapping(em_tree, split_mid, 1);
        }
 
        if (post) {
@@ -2367,7 +2385,7 @@ static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
                split_post->flags = flags;
                split_post->compress_type = em->compress_type;
                split_post->generation = em->generation;
-               add_extent_mapping(em_tree, split_post, modified);
+               add_extent_mapping(em_tree, split_post, 1);
        }
 
        /* Once for us */
@@ -2770,7 +2788,7 @@ out_page:
  * to fix it up.  The async helper will wait for ordered extents, set
  * the delalloc bit and make it safe to write the page.
  */
-int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
+int btrfs_writepage_cow_fixup(struct page *page)
 {
        struct inode *inode = page->mapping->host;
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -3171,7 +3189,7 @@ static void finish_ordered_fn(struct btrfs_work *work)
 
 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
                                          struct page *page, u64 start,
-                                         u64 end, int uptodate)
+                                         u64 end, bool uptodate)
 {
        trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
 
@@ -3257,25 +3275,44 @@ unsigned int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
                return 0;
        }
 
-       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
+       /*
+        * For subpage case, above PageChecked is not safe as it's not subpage
+        * compatible.
+        * But for now only cow fixup and compressed read utilize PageChecked
+        * flag, while in this context we can easily use io_bio->csum to
+        * determine if we really need to do csum verification.
+        *
+        * So for now, just exit if io_bio->csum is NULL, as it means it's
+        * compressed read, and its compressed data csum has already been
+        * verified.
+        */
+       if (io_bio->csum == NULL)
                return 0;
 
-       if (!root->fs_info->csum_root)
+       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
                return 0;
 
-       if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
-           test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
-               clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
+       if (!root->fs_info->csum_root)
                return 0;
-       }
 
        ASSERT(page_offset(page) <= start &&
               end <= page_offset(page) + PAGE_SIZE - 1);
        for (pg_off = offset_in_page(start);
             pg_off < offset_in_page(end);
             pg_off += sectorsize, bio_offset += sectorsize) {
+               u64 file_offset = pg_off + page_offset(page);
                int ret;
 
+               if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
+                   test_range_bit(io_tree, file_offset,
+                                  file_offset + sectorsize - 1,
+                                  EXTENT_NODATASUM, 1, NULL)) {
+                       /* Skip the range without csum for data reloc inode */
+                       clear_extent_bits(io_tree, file_offset,
+                                         file_offset + sectorsize - 1,
+                                         EXTENT_NODATASUM);
+                       continue;
+               }
                ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
                                      page_offset(page) + pg_off);
                if (ret < 0) {
@@ -3520,7 +3557,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 
                /*
                 * If we have an inode with links, there are a couple of
-                * possibilities. Old kernels (before v3.12) used to create an
+                * possibilities:
+                *
+                * 1. We were halfway through creating fsverity metadata for the
+                * file. In that case, the orphan item represents incomplete
+                * fsverity metadata which must be cleaned up with
+                * btrfs_drop_verity_items and deleting the orphan item.
+
+                * 2. Old kernels (before v3.12) used to create an
                 * orphan item for truncate indicating that there were possibly
                 * extent items past i_size that needed to be deleted. In v3.12,
                 * truncate was changed to update i_size in sync with the extent
@@ -3538,8 +3582,12 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                 * but either way, we can delete the orphan item.
                 */
                if (ret == -ENOENT || inode->i_nlink) {
-                       if (!ret)
+                       if (!ret) {
+                               ret = btrfs_drop_verity_items(BTRFS_I(inode));
                                iput(inode);
+                               if (ret)
+                                       goto out;
+                       }
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
                                ret = PTR_ERR(trans);
@@ -3728,7 +3776,8 @@ static int btrfs_read_locked_inode(struct inode *inode,
        rdev = btrfs_inode_rdev(leaf, inode_item);
 
        BTRFS_I(inode)->index_cnt = (u64)-1;
-       BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
+       btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
+                               &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
 
 cache_index:
        /*
@@ -3859,6 +3908,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct inode *inode)
 {
        struct btrfs_map_token token;
+       u64 flags;
 
        btrfs_init_map_token(&token, leaf);
 
@@ -3894,7 +3944,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
        btrfs_set_token_inode_transid(&token, item, trans->transid);
        btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
-       btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags);
+       flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
+                                         BTRFS_I(inode)->ro_flags);
+       btrfs_set_token_inode_flags(&token, item, flags);
        btrfs_set_token_inode_block_group(&token, item, 0);
 }
 
@@ -5088,15 +5140,13 @@ static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
        int ret;
 
        /*
-        * Still need to make sure the inode looks like it's been updated so
-        * that any holes get logged if we fsync.
+        * If NO_HOLES is enabled, we don't need to do anything.
+        * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
+        * or btrfs_update_inode() will be called, which guarantee that the next
+        * fsync will know this inode was changed and needs to be logged.
         */
-       if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
-               inode->last_trans = fs_info->generation;
-               inode->last_sub_trans = root->log_transid;
-               inode->last_log_commit = root->last_log_commit;
+       if (btrfs_fs_incompat(fs_info, NO_HOLES))
                return 0;
-       }
 
        /*
         * 1 - for the one we're dropping
@@ -5342,7 +5392,7 @@ static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentr
        if (btrfs_root_readonly(root))
                return -EROFS;
 
-       err = setattr_prepare(&init_user_ns, dentry, attr);
+       err = setattr_prepare(mnt_userns, dentry, attr);
        if (err)
                return err;
 
@@ -5353,13 +5403,12 @@ static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentr
        }
 
        if (attr->ia_valid) {
-               setattr_copy(&init_user_ns, inode, attr);
+               setattr_copy(mnt_userns, inode, attr);
                inode_inc_iversion(inode);
                err = btrfs_dirty_inode(inode);
 
                if (!err && attr->ia_valid & ATTR_MODE)
-                       err = posix_acl_chmod(&init_user_ns, inode,
-                                             inode->i_mode);
+                       err = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
        }
 
        return err;
@@ -5522,6 +5571,7 @@ void btrfs_evict_inode(struct inode *inode)
        trace_btrfs_inode_evict(inode);
 
        if (!root) {
+               fsverity_cleanup_inode(inode);
                clear_inode(inode);
                return;
        }
@@ -5604,6 +5654,7 @@ no_delete:
         * to retry these periodically in the future.
         */
        btrfs_remove_delayed_node(BTRFS_I(inode));
+       fsverity_cleanup_inode(inode);
        clear_inode(inode);
 }
 
@@ -6370,6 +6421,7 @@ static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
 
 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
+                                    struct user_namespace *mnt_userns,
                                     struct inode *dir,
                                     const char *name, int name_len,
                                     u64 ref_objectid, u64 objectid,
@@ -6479,7 +6531,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        if (ret != 0)
                goto fail_unlock;
 
-       inode_init_owner(&init_user_ns, inode, dir, mode);
+       inode_init_owner(mnt_userns, inode, dir, mode);
        inode_set_bytes(inode, 0);
 
        inode->i_mtime = current_time(inode);
@@ -6664,9 +6716,9 @@ static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
        if (err)
                goto out_unlock;
 
-       inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                       dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
-                       mode, &index);
+       inode = btrfs_new_inode(trans, root, mnt_userns, dir,
+                       dentry->d_name.name, dentry->d_name.len,
+                       btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                inode = NULL;
@@ -6728,9 +6780,9 @@ static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir,
        if (err)
                goto out_unlock;
 
-       inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                       dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
-                       mode, &index);
+       inode = btrfs_new_inode(trans, root, mnt_userns, dir,
+                       dentry->d_name.name, dentry->d_name.len,
+                       btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                inode = NULL;
@@ -6873,8 +6925,9 @@ static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
        if (err)
                goto out_fail;
 
-       inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                       dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
+       inode = btrfs_new_inode(trans, root, mnt_userns, dir,
+                       dentry->d_name.name, dentry->d_name.len,
+                       btrfs_ino(BTRFS_I(dir)), objectid,
                        S_IFDIR | mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
@@ -8194,9 +8247,10 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
        return dip;
 }
 
-static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
+static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter,
                struct bio *dio_bio, loff_t file_offset)
 {
+       struct inode *inode = iter->inode;
        const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
@@ -8206,13 +8260,13 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
        u64 start_sector;
        int async_submit = 0;
        u64 submit_len;
-       int clone_offset = 0;
-       int clone_len;
+       u64 clone_offset = 0;
+       u64 clone_len;
        u64 logical;
        int ret;
        blk_status_t status;
        struct btrfs_io_geometry geom;
-       struct btrfs_dio_data *dio_data = iomap->private;
+       struct btrfs_dio_data *dio_data = iter->iomap.private;
        struct extent_map *em = NULL;
 
        dip = btrfs_create_dio_private(dio_bio, inode, file_offset);
@@ -8255,9 +8309,9 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
                        status = errno_to_blk_status(ret);
                        goto out_err_em;
                }
-               ASSERT(geom.len <= INT_MAX);
 
-               clone_len = min_t(int, submit_len, geom.len);
+               clone_len = min(submit_len, geom.len);
+               ASSERT(clone_len <= UINT_MAX);
 
                /*
                 * This will never fail as it's passing GPF_NOFS and
@@ -8401,11 +8455,47 @@ static void btrfs_readahead(struct readahead_control *rac)
        extent_readahead(rac);
 }
 
+/*
+ * For releasepage() and invalidatepage() we have a race window where
+ * end_page_writeback() is called but the subpage spinlock is not yet released.
+ * If we continue to release/invalidate the page, we could cause use-after-free
+ * for subpage spinlock.  So this function is to spin and wait for subpage
+ * spinlock.
+ */
+static void wait_subpage_spinlock(struct page *page)
+{
+       struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
+       struct btrfs_subpage *subpage;
+
+       if (fs_info->sectorsize == PAGE_SIZE)
+               return;
+
+       ASSERT(PagePrivate(page) && page->private);
+       subpage = (struct btrfs_subpage *)page->private;
+
+       /*
+        * This may look insane as we just acquire the spinlock and release it,
+        * without doing anything.  But we just want to make sure no one is
+        * still holding the subpage spinlock.
+        * And since the page is not dirty nor writeback, and we have page
+        * locked, the only possible way to hold a spinlock is from the endio
+        * function to clear page writeback.
+        *
+        * Here we just acquire the spinlock so that all existing callers
+        * should exit and we're safe to release/invalidate the page.
+        */
+       spin_lock_irq(&subpage->lock);
+       spin_unlock_irq(&subpage->lock);
+}
+
 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
 {
        int ret = try_release_extent_mapping(page, gfp_flags);
-       if (ret == 1)
+
+       if (ret == 1) {
+               wait_subpage_spinlock(page);
                clear_page_extent_mapped(page);
+       }
        return ret;
 }
 
@@ -8469,6 +8559,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
         * do double ordered extent accounting on the same page.
         */
        wait_on_page_writeback(page);
+       wait_subpage_spinlock(page);
 
        /*
         * For subpage case, we have call sites like
@@ -8557,7 +8648,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
                spin_unlock_irq(&inode->ordered_tree.lock);
 
                if (btrfs_dec_test_ordered_pending(inode, &ordered,
-                                       cur, range_end + 1 - cur, 1)) {
+                                                  cur, range_end + 1 - cur)) {
                        btrfs_finish_ordered_io(ordered);
                        /*
                         * The ordered extent has finished, now we're again
@@ -8938,7 +9029,8 @@ out:
  */
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
                             struct btrfs_root *new_root,
-                            struct btrfs_root *parent_root)
+                            struct btrfs_root *parent_root,
+                            struct user_namespace *mnt_userns)
 {
        struct inode *inode;
        int err;
@@ -8949,7 +9041,8 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
        if (err < 0)
                return err;
 
-       inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, ino, ino,
+       inode = btrfs_new_inode(trans, new_root, mnt_userns, NULL, "..", 2,
+                               ino, ino,
                                S_IFDIR | (~current_umask() & S_IRWXUGO),
                                &index);
        if (IS_ERR(inode))
@@ -8993,6 +9086,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->defrag_bytes = 0;
        ei->disk_i_size = 0;
        ei->flags = 0;
+       ei->ro_flags = 0;
        ei->csum_bytes = 0;
        ei->index_cnt = (u64)-1;
        ei->dir_index = 0;
@@ -9174,6 +9268,7 @@ static int btrfs_getattr(struct user_namespace *mnt_userns,
        struct inode *inode = d_inode(path->dentry);
        u32 blocksize = inode->i_sb->s_blocksize;
        u32 bi_flags = BTRFS_I(inode)->flags;
+       u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
 
        stat->result_mask |= STATX_BTIME;
        stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
@@ -9186,13 +9281,15 @@ static int btrfs_getattr(struct user_namespace *mnt_userns,
                stat->attributes |= STATX_ATTR_IMMUTABLE;
        if (bi_flags & BTRFS_INODE_NODUMP)
                stat->attributes |= STATX_ATTR_NODUMP;
+       if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
+               stat->attributes |= STATX_ATTR_VERITY;
 
        stat->attributes_mask |= (STATX_ATTR_APPEND |
                                  STATX_ATTR_COMPRESSED |
                                  STATX_ATTR_IMMUTABLE |
                                  STATX_ATTR_NODUMP);
 
-       generic_fillattr(&init_user_ns, inode, stat);
+       generic_fillattr(mnt_userns, inode, stat);
        stat->dev = BTRFS_I(inode)->root->anon_dev;
 
        spin_lock(&BTRFS_I(inode)->lock);
@@ -9226,8 +9323,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        bool dest_log_pinned = false;
        bool need_abort = false;
 
-       /* we only allow rename subvolume link between subvolumes */
-       if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+       /*
+        * For non-subvolumes allow exchange only within one subvolume, in the
+        * same inode namespace. Two subvolumes (represented as directory) can
+        * be exchanged as they're a logical link and have a fixed inode number.
+        */
+       if (root != dest &&
+           (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
+            new_ino != BTRFS_FIRST_FREE_OBJECTID))
                return -EXDEV;
 
        /* close the race window with snapshot create/destroy ioctl */
@@ -9274,8 +9377,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                /* force full log commit if subvolume involved. */
                btrfs_set_log_full_commit(trans);
        } else {
-               btrfs_pin_log_trans(root);
-               root_log_pinned = true;
                ret = btrfs_insert_inode_ref(trans, dest,
                                             new_dentry->d_name.name,
                                             new_dentry->d_name.len,
@@ -9292,8 +9393,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                /* force full log commit if subvolume involved. */
                btrfs_set_log_full_commit(trans);
        } else {
-               btrfs_pin_log_trans(dest);
-               dest_log_pinned = true;
                ret = btrfs_insert_inode_ref(trans, root,
                                             old_dentry->d_name.name,
                                             old_dentry->d_name.len,
@@ -9324,6 +9423,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                BTRFS_I(new_inode), 1);
        }
 
+       /*
+        * Now pin the logs of the roots. We do it to ensure that no other task
+        * can sync the logs while we are in progress with the rename, because
+        * that could result in an inconsistency in case any of the inodes that
+        * are part of this rename operation were logged before.
+        *
+        * We pin the logs even if at this precise moment none of the inodes was
+        * logged before. This is because right after we checked for that, some
+        * other task fsyncing some other inode not involved with this rename
+        * operation could log that one of our inodes exists.
+        *
+        * We don't need to pin the logs before the above calls to
+        * btrfs_insert_inode_ref(), since those don't ever need to change a log.
+        */
+       if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
+               btrfs_pin_log_trans(root);
+               root_log_pinned = true;
+       }
+       if (new_ino != BTRFS_FIRST_FREE_OBJECTID) {
+               btrfs_pin_log_trans(dest);
+               dest_log_pinned = true;
+       }
+
        /* src is a subvolume */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
                ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
@@ -9405,8 +9527,7 @@ out_fail:
                if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
                    btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
                    btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
-                   (new_inode &&
-                    btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
+                   btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))
                        btrfs_set_log_full_commit(trans);
 
                if (root_log_pinned) {
@@ -9430,6 +9551,7 @@ out_notrans:
 
 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
+                                    struct user_namespace *mnt_userns,
                                     struct inode *dir,
                                     struct dentry *dentry)
 {
@@ -9442,7 +9564,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
        if (ret)
                return ret;
 
-       inode = btrfs_new_inode(trans, root, dir,
+       inode = btrfs_new_inode(trans, root, mnt_userns, dir,
                                dentry->d_name.name,
                                dentry->d_name.len,
                                btrfs_ino(BTRFS_I(dir)),
@@ -9479,9 +9601,10 @@ out:
        return ret;
 }
 
-static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-                          struct inode *new_dir, struct dentry *new_dentry,
-                          unsigned int flags)
+static int btrfs_rename(struct user_namespace *mnt_userns,
+                       struct inode *old_dir, struct dentry *old_dentry,
+                       struct inode *new_dir, struct dentry *new_dentry,
+                       unsigned int flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
        struct btrfs_trans_handle *trans;
@@ -9576,8 +9699,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                /* force full log commit if subvolume involved. */
                btrfs_set_log_full_commit(trans);
        } else {
-               btrfs_pin_log_trans(root);
-               log_pinned = true;
                ret = btrfs_insert_inode_ref(trans, dest,
                                             new_dentry->d_name.name,
                                             new_dentry->d_name.len,
@@ -9601,6 +9722,25 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
                ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
        } else {
+               /*
+                * Now pin the log. We do it to ensure that no other task can
+                * sync the log while we are in progress with the rename, as
+                * that could result in an inconsistency in case any of the
+                * inodes that are part of this rename operation were logged
+                * before.
+                *
+                * We pin the log even if at this precise moment none of the
+                * inodes was logged before. This is because right after we
+                * checked for that, some other task fsyncing some other inode
+                * not involved with this rename operation could log that one of
+                * our inodes exists.
+                *
+                * We don't need to pin the logs before the above call to
+                * btrfs_insert_inode_ref(), since that does not need to change
+                * a log.
+                */
+               btrfs_pin_log_trans(root);
+               log_pinned = true;
                ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
                                        BTRFS_I(d_inode(old_dentry)),
                                        old_dentry->d_name.name,
@@ -9654,8 +9794,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        }
 
        if (flags & RENAME_WHITEOUT) {
-               ret = btrfs_whiteout_for_rename(trans, root, old_dir,
-                                               old_dentry);
+               ret = btrfs_whiteout_for_rename(trans, root, mnt_userns,
+                                               old_dir, old_dentry);
 
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
@@ -9705,7 +9845,8 @@ static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_di
                return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
                                          new_dentry);
 
-       return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+       return btrfs_rename(mnt_userns, old_dir, old_dentry, new_dir,
+                           new_dentry, flags);
 }
 
 struct btrfs_delalloc_work {
@@ -9802,11 +9943,7 @@ static int start_delalloc_inodes(struct btrfs_root *root,
                        btrfs_queue_work(root->fs_info->flush_workers,
                                         &work->work);
                } else {
-                       ret = sync_inode(inode, wbc);
-                       if (!ret &&
-                           test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
-                                    &BTRFS_I(inode)->runtime_flags))
-                               ret = sync_inode(inode, wbc);
+                       ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
                        btrfs_add_delayed_iput(inode);
                        if (ret || wbc->nr_to_write <= 0)
                                goto out;
@@ -9941,9 +10078,10 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
        if (err)
                goto out_unlock;
 
-       inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
-                               objectid, S_IFLNK|S_IRWXUGO, &index);
+       inode = btrfs_new_inode(trans, root, mnt_userns, dir,
+                               dentry->d_name.name, dentry->d_name.len,
+                               btrfs_ino(BTRFS_I(dir)), objectid,
+                               S_IFLNK | S_IRWXUGO, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                inode = NULL;
@@ -10267,7 +10405,7 @@ static int btrfs_permission(struct user_namespace *mnt_userns,
                if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
                        return -EACCES;
        }
-       return generic_permission(&init_user_ns, inode, mask);
+       return generic_permission(mnt_userns, inode, mask);
 }
 
 static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
@@ -10292,7 +10430,7 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
        if (ret)
                goto out;
 
-       inode = btrfs_new_inode(trans, root, dir, NULL, 0,
+       inode = btrfs_new_inode(trans, root, mnt_userns, dir, NULL, 0,
                        btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
        if (IS_ERR(inode)) {
                ret = PTR_ERR(inode);
index 0ba98e0..41524f9 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/iversion.h>
 #include <linux/fileattr.h>
+#include <linux/fsverity.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "export.h"
@@ -103,9 +104,11 @@ static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
  * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
  * ioctl.
  */
-static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags)
+static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
 {
        unsigned int iflags = 0;
+       u32 flags = binode->flags;
+       u32 ro_flags = binode->ro_flags;
 
        if (flags & BTRFS_INODE_SYNC)
                iflags |= FS_SYNC_FL;
@@ -121,6 +124,8 @@ static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags)
                iflags |= FS_DIRSYNC_FL;
        if (flags & BTRFS_INODE_NODATACOW)
                iflags |= FS_NOCOW_FL;
+       if (ro_flags & BTRFS_INODE_RO_VERITY)
+               iflags |= FS_VERITY_FL;
 
        if (flags & BTRFS_INODE_NOCOMPRESS)
                iflags |= FS_NOCOMP_FL;
@@ -148,10 +153,12 @@ void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
                new_fl |= S_NOATIME;
        if (binode->flags & BTRFS_INODE_DIRSYNC)
                new_fl |= S_DIRSYNC;
+       if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
+               new_fl |= S_VERITY;
 
        set_mask_bits(&inode->i_flags,
-                     S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
-                     new_fl);
+                     S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
+                     S_VERITY, new_fl);
 }
 
 /*
@@ -200,7 +207,7 @@ int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
 {
        struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));
 
-       fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode->flags));
+       fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
        return 0;
 }
 
@@ -224,7 +231,7 @@ int btrfs_fileattr_set(struct user_namespace *mnt_userns,
                return -EOPNOTSUPP;
 
        fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
-       old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
+       old_fsflags = btrfs_inode_flags_to_fsflags(binode);
        ret = check_fsflags(old_fsflags, fsflags);
        if (ret)
                return ret;
@@ -492,8 +499,8 @@ int __pure btrfs_is_empty_uuid(u8 *uuid)
        return 1;
 }
 
-static noinline int create_subvol(struct inode *dir,
-                                 struct dentry *dentry,
+static noinline int create_subvol(struct user_namespace *mnt_userns,
+                                 struct inode *dir, struct dentry *dentry,
                                  const char *name, int namelen,
                                  struct btrfs_qgroup_inherit *inherit)
 {
@@ -638,7 +645,7 @@ static noinline int create_subvol(struct inode *dir,
                goto fail;
        }
 
-       ret = btrfs_create_subvol_root(trans, new_root, root);
+       ret = btrfs_create_subvol_root(trans, new_root, root, mnt_userns);
        btrfs_put_root(new_root);
        if (ret) {
                /* We potentially lose an unused inode item here */
@@ -830,7 +837,8 @@ free_pending:
  *     nfs_async_unlink().
  */
 
-static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
+static int btrfs_may_delete(struct user_namespace *mnt_userns,
+                           struct inode *dir, struct dentry *victim, int isdir)
 {
        int error;
 
@@ -840,12 +848,12 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
        BUG_ON(d_inode(victim->d_parent) != dir);
        audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
 
-       error = inode_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
+       error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
        if (error)
                return error;
        if (IS_APPEND(dir))
                return -EPERM;
-       if (check_sticky(&init_user_ns, dir, d_inode(victim)) ||
+       if (check_sticky(mnt_userns, dir, d_inode(victim)) ||
            IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
            IS_SWAPFILE(d_inode(victim)))
                return -EPERM;
@@ -864,13 +872,16 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
 }
 
 /* copy of may_create in fs/namei.c() */
-static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
+static inline int btrfs_may_create(struct user_namespace *mnt_userns,
+                                  struct inode *dir, struct dentry *child)
 {
        if (d_really_is_positive(child))
                return -EEXIST;
        if (IS_DEADDIR(dir))
                return -ENOENT;
-       return inode_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
+       if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
+               return -EOVERFLOW;
+       return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
 }
 
 /*
@@ -879,6 +890,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
  * inside this filesystem so it's quite a bit simpler.
  */
 static noinline int btrfs_mksubvol(const struct path *parent,
+                                  struct user_namespace *mnt_userns,
                                   const char *name, int namelen,
                                   struct btrfs_root *snap_src,
                                   bool readonly,
@@ -893,12 +905,12 @@ static noinline int btrfs_mksubvol(const struct path *parent,
        if (error == -EINTR)
                return error;
 
-       dentry = lookup_one_len(name, parent->dentry, namelen);
+       dentry = lookup_one(mnt_userns, name, parent->dentry, namelen);
        error = PTR_ERR(dentry);
        if (IS_ERR(dentry))
                goto out_unlock;
 
-       error = btrfs_may_create(dir, dentry);
+       error = btrfs_may_create(mnt_userns, dir, dentry);
        if (error)
                goto out_dput;
 
@@ -920,7 +932,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
        if (snap_src)
                error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
        else
-               error = create_subvol(dir, dentry, name, namelen, inherit);
+               error = create_subvol(mnt_userns, dir, dentry, name, namelen, inherit);
 
        if (!error)
                fsnotify_mkdir(dir, dentry);
@@ -934,6 +946,7 @@ out_unlock:
 }
 
 static noinline int btrfs_mksnapshot(const struct path *parent,
+                                  struct user_namespace *mnt_userns,
                                   const char *name, int namelen,
                                   struct btrfs_root *root,
                                   bool readonly,
@@ -963,7 +976,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
 
        btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
 
-       ret = btrfs_mksubvol(parent, name, namelen,
+       ret = btrfs_mksubvol(parent, mnt_userns, name, namelen,
                             root, readonly, inherit);
 out:
        if (snapshot_force_cow)
@@ -1792,6 +1805,7 @@ out_drop:
 }
 
 static noinline int __btrfs_ioctl_snap_create(struct file *file,
+                               struct user_namespace *mnt_userns,
                                const char *name, unsigned long fd, int subvol,
                                bool readonly,
                                struct btrfs_qgroup_inherit *inherit)
@@ -1819,8 +1833,8 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
        }
 
        if (subvol) {
-               ret = btrfs_mksubvol(&file->f_path, name, namelen,
-                                    NULL, readonly, inherit);
+               ret = btrfs_mksubvol(&file->f_path, mnt_userns, name,
+                                    namelen, NULL, readonly, inherit);
        } else {
                struct fd src = fdget(fd);
                struct inode *src_inode;
@@ -1834,16 +1848,17 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
                        btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
                                   "Snapshot src from another FS");
                        ret = -EXDEV;
-               } else if (!inode_owner_or_capable(&init_user_ns, src_inode)) {
+               } else if (!inode_owner_or_capable(mnt_userns, src_inode)) {
                        /*
                         * Subvolume creation is not restricted, but snapshots
                         * are limited to own subvolumes only
                         */
                        ret = -EPERM;
                } else {
-                       ret = btrfs_mksnapshot(&file->f_path, name, namelen,
-                                            BTRFS_I(src_inode)->root,
-                                            readonly, inherit);
+                       ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
+                                              name, namelen,
+                                              BTRFS_I(src_inode)->root,
+                                              readonly, inherit);
                }
                fdput(src);
        }
@@ -1867,8 +1882,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
                return PTR_ERR(vol_args);
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
 
-       ret = __btrfs_ioctl_snap_create(file, vol_args->name, vol_args->fd,
-                                       subvol, false, NULL);
+       ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
+                                       vol_args->name, vol_args->fd, subvol,
+                                       false, NULL);
 
        kfree(vol_args);
        return ret;
@@ -1926,8 +1942,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
                }
        }
 
-       ret = __btrfs_ioctl_snap_create(file, vol_args->name, vol_args->fd,
-                                       subvol, readonly, inherit);
+       ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
+                                       vol_args->name, vol_args->fd, subvol,
+                                       readonly, inherit);
        if (ret)
                goto free_inherit;
 free_inherit:
@@ -1971,7 +1988,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
        u64 flags;
        int ret = 0;
 
-       if (!inode_owner_or_capable(&init_user_ns, inode))
+       if (!inode_owner_or_capable(file_mnt_user_ns(file), inode))
                return -EPERM;
 
        ret = mnt_want_write_file(file);
@@ -2382,23 +2399,16 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
        key.offset = (u64)-1;
 
        while (1) {
-               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+               ret = btrfs_search_backwards(root, &key, path);
                if (ret < 0)
                        goto out;
                else if (ret > 0) {
-                       ret = btrfs_previous_item(root, path, dirid,
-                                                 BTRFS_INODE_REF_KEY);
-                       if (ret < 0)
-                               goto out;
-                       else if (ret > 0) {
-                               ret = -ENOENT;
-                               goto out;
-                       }
+                       ret = -ENOENT;
+                       goto out;
                }
 
                l = path->nodes[0];
                slot = path->slots[0];
-               btrfs_item_key_to_cpu(l, &key, slot);
 
                iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
                len = btrfs_inode_ref_name_len(l, iref);
@@ -2429,7 +2439,8 @@ out:
        return ret;
 }
 
-static int btrfs_search_path_in_tree_user(struct inode *inode,
+static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
+                               struct inode *inode,
                                struct btrfs_ioctl_ino_lookup_user_args *args)
 {
        struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
@@ -2473,23 +2484,16 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
                key.type = BTRFS_INODE_REF_KEY;
                key.offset = (u64)-1;
                while (1) {
-                       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-                       if (ret < 0) {
+                       ret = btrfs_search_backwards(root, &key, path);
+                       if (ret < 0)
+                               goto out_put;
+                       else if (ret > 0) {
+                               ret = -ENOENT;
                                goto out_put;
-                       } else if (ret > 0) {
-                               ret = btrfs_previous_item(root, path, dirid,
-                                                         BTRFS_INODE_REF_KEY);
-                               if (ret < 0) {
-                                       goto out_put;
-                               } else if (ret > 0) {
-                                       ret = -ENOENT;
-                                       goto out_put;
-                               }
                        }
 
                        leaf = path->nodes[0];
                        slot = path->slots[0];
-                       btrfs_item_key_to_cpu(leaf, &key, slot);
 
                        iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
                        len = btrfs_inode_ref_name_len(leaf, iref);
@@ -2527,7 +2531,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
                                ret = PTR_ERR(temp_inode);
                                goto out_put;
                        }
-                       ret = inode_permission(&init_user_ns, temp_inode,
+                       ret = inode_permission(mnt_userns, temp_inode,
                                               MAY_READ | MAY_EXEC);
                        iput(temp_inode);
                        if (ret) {
@@ -2669,7 +2673,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
                return -EACCES;
        }
 
-       ret = btrfs_search_path_in_tree_user(inode, args);
+       ret = btrfs_search_path_in_tree_user(file_mnt_user_ns(file), inode, args);
 
        if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
                ret = -EFAULT;
@@ -2905,6 +2909,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
        struct btrfs_root *dest = NULL;
        struct btrfs_ioctl_vol_args *vol_args = NULL;
        struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
+       struct user_namespace *mnt_userns = file_mnt_user_ns(file);
        char *subvol_name, *subvol_name_ptr = NULL;
        int subvol_namelen;
        int err = 0;
@@ -2932,6 +2937,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                        if (err)
                                goto out;
                } else {
+                       struct inode *old_dir;
+
                        if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
                                err = -EINVAL;
                                goto out;
@@ -2968,6 +2975,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                                err = PTR_ERR(parent);
                                goto out_drop_write;
                        }
+                       old_dir = dir;
                        dir = d_inode(parent);
 
                        /*
@@ -2978,6 +2986,20 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                         */
                        destroy_parent = true;
 
+                       /*
+                        * On idmapped mounts, deletion via subvolid is
+                        * restricted to subvolumes that are immediate
+                        * ancestors of the inode referenced by the file
+                        * descriptor in the ioctl. Otherwise the idmapping
+                        * could potentially be abused to delete subvolumes
+                        * anywhere in the filesystem the user wouldn't be able
+                        * to delete without an idmapped mount.
+                        */
+                       if (old_dir != dir && mnt_userns != &init_user_ns) {
+                               err = -EOPNOTSUPP;
+                               goto free_parent;
+                       }
+
                        subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
                                                fs_info, vol_args2->subvolid);
                        if (IS_ERR(subvol_name_ptr)) {
@@ -3016,7 +3038,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
        err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
        if (err == -EINTR)
                goto free_subvol_name;
-       dentry = lookup_one_len(subvol_name, parent, subvol_namelen);
+       dentry = lookup_one(mnt_userns, subvol_name, parent, subvol_namelen);
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                goto out_unlock_dir;
@@ -3058,14 +3080,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                if (root == dest)
                        goto out_dput;
 
-               err = inode_permission(&init_user_ns, inode,
-                                      MAY_WRITE | MAY_EXEC);
+               err = inode_permission(mnt_userns, inode, MAY_WRITE | MAY_EXEC);
                if (err)
                        goto out_dput;
        }
 
        /* check if subvolume may be deleted by a user */
-       err = btrfs_may_delete(dir, dentry, 1);
+       err = btrfs_may_delete(mnt_userns, dir, dentry, 1);
        if (err)
                goto out_dput;
 
@@ -3103,7 +3124,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
 {
        struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct btrfs_ioctl_defrag_range_args *range;
+       struct btrfs_ioctl_defrag_range_args range = {0};
        int ret;
 
        ret = mnt_want_write_file(file);
@@ -3115,6 +3136,12 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                goto out;
        }
 
+       /* Subpage defrag will be supported in later commits */
+       if (root->fs_info->sectorsize < PAGE_SIZE) {
+               ret = -ENOTTY;
+               goto out;
+       }
+
        switch (inode->i_mode & S_IFMT) {
        case S_IFDIR:
                if (!capable(CAP_SYS_ADMIN)) {
@@ -3135,33 +3162,24 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                        goto out;
                }
 
-               range = kzalloc(sizeof(*range), GFP_KERNEL);
-               if (!range) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
                if (argp) {
-                       if (copy_from_user(range, argp,
-                                          sizeof(*range))) {
+                       if (copy_from_user(&range, argp, sizeof(range))) {
                                ret = -EFAULT;
-                               kfree(range);
                                goto out;
                        }
                        /* compression requires us to start the IO */
-                       if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
-                               range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
-                               range->extent_thresh = (u32)-1;
+                       if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
+                               range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
+                               range.extent_thresh = (u32)-1;
                        }
                } else {
                        /* the rest are all set to zero by kzalloc */
-                       range->len = (u64)-1;
+                       range.len = (u64)-1;
                }
                ret = btrfs_defrag_file(file_inode(file), file,
-                                       range, BTRFS_OLDEST_GENERATION, 0);
+                                       &range, BTRFS_OLDEST_GENERATION, 0);
                if (ret > 0)
                        ret = 0;
-               kfree(range);
                break;
        default:
                ret = -EINVAL;
@@ -4404,25 +4422,20 @@ drop_write:
 static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
                                                void __user *arg)
 {
-       struct btrfs_ioctl_quota_rescan_args *qsa;
+       struct btrfs_ioctl_quota_rescan_args qsa = {0};
        int ret = 0;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
-       if (!qsa)
-               return -ENOMEM;
-
        if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
-               qsa->flags = 1;
-               qsa->progress = fs_info->qgroup_rescan_progress.objectid;
+               qsa.flags = 1;
+               qsa.progress = fs_info->qgroup_rescan_progress.objectid;
        }
 
-       if (copy_to_user(arg, qsa, sizeof(*qsa)))
+       if (copy_to_user(arg, &qsa, sizeof(qsa)))
                ret = -EFAULT;
 
-       kfree(qsa);
        return ret;
 }
 
@@ -4436,6 +4449,7 @@ static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
 }
 
 static long _btrfs_ioctl_set_received_subvol(struct file *file,
+                                           struct user_namespace *mnt_userns,
                                            struct btrfs_ioctl_received_subvol_args *sa)
 {
        struct inode *inode = file_inode(file);
@@ -4447,7 +4461,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
        int ret = 0;
        int received_uuid_changed;
 
-       if (!inode_owner_or_capable(&init_user_ns, inode))
+       if (!inode_owner_or_capable(mnt_userns, inode))
                return -EPERM;
 
        ret = mnt_want_write_file(file);
@@ -4552,7 +4566,7 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file,
        args64->rtime.nsec = args32->rtime.nsec;
        args64->flags = args32->flags;
 
-       ret = _btrfs_ioctl_set_received_subvol(file, args64);
+       ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), args64);
        if (ret)
                goto out;
 
@@ -4586,7 +4600,7 @@ static long btrfs_ioctl_set_received_subvol(struct file *file,
        if (IS_ERR(sa))
                return PTR_ERR(sa);
 
-       ret = _btrfs_ioctl_set_received_subvol(file, sa);
+       ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), sa);
 
        if (ret)
                goto out;
@@ -5013,6 +5027,10 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_get_subvol_rootref(file, argp);
        case BTRFS_IOC_INO_LOOKUP_USER:
                return btrfs_ioctl_ino_lookup_user(file, argp);
+       case FS_IOC_ENABLE_VERITY:
+               return fsverity_ioctl_enable(file, (const void __user *)argp);
+       case FS_IOC_MEASURE_VERITY:
+               return fsverity_ioctl_measure(file, argp);
        }
 
        return -ENOTTY;
index cd042c7..c25dfd1 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/lzo.h>
 #include <linux/refcount.h>
 #include "compression.h"
+#include "ctree.h"
 
 #define LZO_LEN        4
 
@@ -140,18 +141,18 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
        *total_in = 0;
 
        in_page = find_get_page(mapping, start >> PAGE_SHIFT);
-       data_in = kmap(in_page);
+       data_in = page_address(in_page);
 
        /*
         * store the size of all chunks of compressed data in
         * the first 4 bytes
         */
-       out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       out_page = alloc_page(GFP_NOFS);
        if (out_page == NULL) {
                ret = -ENOMEM;
                goto out;
        }
-       cpage_out = kmap(out_page);
+       cpage_out = page_address(out_page);
        out_offset = LZO_LEN;
        tot_out = LZO_LEN;
        pages[0] = out_page;
@@ -209,19 +210,18 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
                                if (out_len == 0 && tot_in >= len)
                                        break;
 
-                               kunmap(out_page);
                                if (nr_pages == nr_dest_pages) {
                                        out_page = NULL;
                                        ret = -E2BIG;
                                        goto out;
                                }
 
-                               out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+                               out_page = alloc_page(GFP_NOFS);
                                if (out_page == NULL) {
                                        ret = -ENOMEM;
                                        goto out;
                                }
-                               cpage_out = kmap(out_page);
+                               cpage_out = page_address(out_page);
                                pages[nr_pages++] = out_page;
 
                                pg_bytes_left = PAGE_SIZE;
@@ -243,12 +243,11 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
                        break;
 
                bytes_left = len - tot_in;
-               kunmap(in_page);
                put_page(in_page);
 
                start += PAGE_SIZE;
                in_page = find_get_page(mapping, start >> PAGE_SHIFT);
-               data_in = kmap(in_page);
+               data_in = page_address(in_page);
                in_len = min(bytes_left, PAGE_SIZE);
        }
 
@@ -258,164 +257,130 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
        }
 
        /* store the size of all chunks of compressed data */
-       sizes_ptr = kmap_local_page(pages[0]);
+       sizes_ptr = page_address(pages[0]);
        write_compress_length(sizes_ptr, tot_out);
-       kunmap_local(sizes_ptr);
 
        ret = 0;
        *total_out = tot_out;
        *total_in = tot_in;
 out:
        *out_pages = nr_pages;
-       if (out_page)
-               kunmap(out_page);
 
-       if (in_page) {
-               kunmap(in_page);
+       if (in_page)
                put_page(in_page);
-       }
 
        return ret;
 }
 
+/*
+ * Copy the compressed segment payload into @dest.
+ *
+ * For the payload there will be no padding, just need to do page switching.
+ */
+static void copy_compressed_segment(struct compressed_bio *cb,
+                                   char *dest, u32 len, u32 *cur_in)
+{
+       u32 orig_in = *cur_in;
+
+       while (*cur_in < orig_in + len) {
+               struct page *cur_page;
+               u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
+                                         orig_in + len - *cur_in);
+
+               ASSERT(copy_len);
+               cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
+
+               memcpy(dest + *cur_in - orig_in,
+                       page_address(cur_page) + offset_in_page(*cur_in),
+                       copy_len);
+
+               *cur_in += copy_len;
+       }
+}
+
 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 {
        struct workspace *workspace = list_entry(ws, struct workspace, list);
-       int ret = 0, ret2;
-       char *data_in;
-       unsigned long page_in_index = 0;
-       size_t srclen = cb->compressed_len;
-       unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
-       unsigned long buf_start;
-       unsigned long buf_offset = 0;
-       unsigned long bytes;
-       unsigned long working_bytes;
-       size_t in_len;
-       size_t out_len;
-       const size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
-       unsigned long in_offset;
-       unsigned long in_page_bytes_left;
-       unsigned long tot_in;
-       unsigned long tot_out;
-       unsigned long tot_len;
-       char *buf;
-       bool may_late_unmap, need_unmap;
-       struct page **pages_in = cb->compressed_pages;
-       u64 disk_start = cb->start;
-       struct bio *orig_bio = cb->orig_bio;
+       const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
+       const u32 sectorsize = fs_info->sectorsize;
+       int ret;
+       /* Compressed data length, can be unaligned */
+       u32 len_in;
+       /* Offset inside the compressed data */
+       u32 cur_in = 0;
+       /* Bytes decompressed so far */
+       u32 cur_out = 0;
+
+       len_in = read_compress_length(page_address(cb->compressed_pages[0]));
+       cur_in += LZO_LEN;
 
-       data_in = kmap(pages_in[0]);
-       tot_len = read_compress_length(data_in);
        /*
-        * Compressed data header check.
+        * LZO header length check
         *
-        * The real compressed size can't exceed the maximum extent length, and
-        * all pages should be used (whole unused page with just the segment
-        * header is not possible).  If this happens it means the compressed
-        * extent is corrupted.
+        * The total length should not exceed the maximum extent length,
+        * and all sectors should be used.
+        * If this happens, it means the compressed extent is corrupted.
         */
-       if (tot_len > min_t(size_t, BTRFS_MAX_COMPRESSED, srclen) ||
-           tot_len < srclen - PAGE_SIZE) {
-               ret = -EUCLEAN;
-               goto done;
+       if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
+           round_up(len_in, sectorsize) < cb->compressed_len) {
+               btrfs_err(fs_info,
+                       "invalid lzo header, lzo len %u compressed len %u",
+                       len_in, cb->compressed_len);
+               return -EUCLEAN;
        }
 
-       tot_in = LZO_LEN;
-       in_offset = LZO_LEN;
-       in_page_bytes_left = PAGE_SIZE - LZO_LEN;
-
-       tot_out = 0;
-
-       while (tot_in < tot_len) {
-               in_len = read_compress_length(data_in + in_offset);
-               in_page_bytes_left -= LZO_LEN;
-               in_offset += LZO_LEN;
-               tot_in += LZO_LEN;
+       /* Go through each lzo segment */
+       while (cur_in < len_in) {
+               struct page *cur_page;
+               /* Length of the compressed segment */
+               u32 seg_len;
+               u32 sector_bytes_left;
+               size_t out_len = lzo1x_worst_compress(sectorsize);
 
                /*
-                * Segment header check.
-                *
-                * The segment length must not exceed the maximum LZO
-                * compression size, nor the total compressed size.
+                * We should always have enough space for one segment header
+                * inside current sector.
                 */
-               if (in_len > max_segment_len || tot_in + in_len > tot_len) {
-                       ret = -EUCLEAN;
-                       goto done;
-               }
-
-               tot_in += in_len;
-               working_bytes = in_len;
-               may_late_unmap = need_unmap = false;
-
-               /* fast path: avoid using the working buffer */
-               if (in_page_bytes_left >= in_len) {
-                       buf = data_in + in_offset;
-                       bytes = in_len;
-                       may_late_unmap = true;
-                       goto cont;
-               }
-
-               /* copy bytes from the pages into the working buffer */
-               buf = workspace->cbuf;
-               buf_offset = 0;
-               while (working_bytes) {
-                       bytes = min(working_bytes, in_page_bytes_left);
-
-                       memcpy(buf + buf_offset, data_in + in_offset, bytes);
-                       buf_offset += bytes;
-cont:
-                       working_bytes -= bytes;
-                       in_page_bytes_left -= bytes;
-                       in_offset += bytes;
-
-                       /* check if we need to pick another page */
-                       if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN)
-                           || in_page_bytes_left == 0) {
-                               tot_in += in_page_bytes_left;
-
-                               if (working_bytes == 0 && tot_in >= tot_len)
-                                       break;
-
-                               if (page_in_index + 1 >= total_pages_in) {
-                                       ret = -EIO;
-                                       goto done;
-                               }
-
-                               if (may_late_unmap)
-                                       need_unmap = true;
-                               else
-                                       kunmap(pages_in[page_in_index]);
-
-                               data_in = kmap(pages_in[++page_in_index]);
-
-                               in_page_bytes_left = PAGE_SIZE;
-                               in_offset = 0;
-                       }
-               }
-
-               out_len = max_segment_len;
-               ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
-                                           &out_len);
-               if (need_unmap)
-                       kunmap(pages_in[page_in_index - 1]);
+               ASSERT(cur_in / sectorsize ==
+                      (cur_in + LZO_LEN - 1) / sectorsize);
+               cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
+               ASSERT(cur_page);
+               seg_len = read_compress_length(page_address(cur_page) +
+                                              offset_in_page(cur_in));
+               cur_in += LZO_LEN;
+
+               /* Copy the compressed segment payload into workspace */
+               copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
+
+               /* Decompress the data */
+               ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
+                                           workspace->buf, &out_len);
                if (ret != LZO_E_OK) {
-                       pr_warn("BTRFS: decompress failed\n");
+                       btrfs_err(fs_info, "failed to decompress");
                        ret = -EIO;
-                       break;
+                       goto out;
                }
 
-               buf_start = tot_out;
-               tot_out += out_len;
+               /* Copy the data into inode pages */
+               ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
+               cur_out += out_len;
 
-               ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
-                                                tot_out, disk_start, orig_bio);
-               if (ret2 == 0)
-                       break;
+               /* All data read, exit */
+               if (ret == 0)
+                       goto out;
+               ret = 0;
+
+               /* Check if the sector has enough space for a segment header */
+               sector_bytes_left = sectorsize - (cur_in % sectorsize);
+               if (sector_bytes_left >= LZO_LEN)
+                       continue;
+
+               /* Skip the padding zeros */
+               cur_in += sector_bytes_left;
        }
-done:
-       kunmap(pages_in[page_in_index]);
+out:
        if (!ret)
-               zero_fill_bio(orig_bio);
+               zero_fill_bio(cb->orig_bio);
        return ret;
 }
 
@@ -466,7 +431,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
        destlen = min_t(unsigned long, destlen, PAGE_SIZE);
        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 
-       kaddr = kmap_local_page(dest_page);
+       kaddr = page_address(dest_page);
        memcpy(kaddr, workspace->buf + start_byte, bytes);
 
        /*
@@ -476,7 +441,6 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
         */
        if (bytes < destlen)
                memset(kaddr+bytes, 0, destlen-bytes);
-       kunmap_local(kaddr);
 out:
        return ret;
 }
index 5c0f848..edb65ab 100644 (file)
@@ -446,7 +446,6 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
  *              Will be also used to store the finished ordered extent.
  * @file_offset: File offset for the finished IO
  * @io_size:    Length of the finish IO range
- * @uptodate:   If the IO finishes without problem
  *
  * Return true if the ordered extent is finished in the range, and update
  * @cached.
@@ -457,7 +456,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
  */
 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
                                    struct btrfs_ordered_extent **cached,
-                                   u64 file_offset, u64 io_size, int uptodate)
+                                   u64 file_offset, u64 io_size)
 {
        struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
        struct rb_node *node;
@@ -486,8 +485,6 @@ have_entry:
                       entry->bytes_left, io_size);
 
        entry->bytes_left -= io_size;
-       if (!uptodate)
-               set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 
        if (entry->bytes_left == 0) {
                /*
index b2d88ab..4194e96 100644 (file)
@@ -177,7 +177,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
                                bool uptodate);
 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
                                    struct btrfs_ordered_extent **cached,
-                                   u64 file_offset, u64 io_size, int uptodate);
+                                   u64 file_offset, u64 io_size);
 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
                             u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
                             int type);
index 0fa1211..db680f5 100644 (file)
@@ -1733,7 +1733,7 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
        ASSERT(trans != NULL);
 
        ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root,
-                                  false, true);
+                                  true);
        if (ret < 0) {
                trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
                btrfs_warn(trans->fs_info,
@@ -2651,7 +2651,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
                                /* Search commit root to find old_roots */
                                ret = btrfs_find_all_roots(NULL, fs_info,
                                                record->bytenr, 0,
-                                               &record->old_roots, false, false);
+                                               &record->old_roots, false);
                                if (ret < 0)
                                        goto cleanup;
                        }
@@ -2667,7 +2667,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
                         * current root. It's safe inside commit_transaction().
                         */
                        ret = btrfs_find_all_roots(trans, fs_info,
-                          record->bytenr, BTRFS_SEQ_LAST, &new_roots, false, false);
+                          record->bytenr, BTRFS_SEQ_LAST, &new_roots, false);
                        if (ret < 0)
                                goto cleanup;
                        if (qgroup_to_skip) {
@@ -3201,7 +3201,7 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
                        num_bytes = found.offset;
 
                ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
-                                          &roots, false, false);
+                                          &roots, false);
                if (ret < 0)
                        goto out;
                /* For rescan, just pass old_roots as NULL */
index 244d499..d8d268c 100644 (file)
@@ -1035,7 +1035,7 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
        for (i = 0; i < rbio->nr_pages; i++) {
                if (rbio->stripe_pages[i])
                        continue;
-               page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+               page = alloc_page(GFP_NOFS);
                if (!page)
                        return -ENOMEM;
                rbio->stripe_pages[i] = page;
@@ -1054,7 +1054,7 @@ static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
        for (; i < rbio->nr_pages; i++) {
                if (rbio->stripe_pages[i])
                        continue;
-               page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+               page = alloc_page(GFP_NOFS);
                if (!page)
                        return -ENOMEM;
                rbio->stripe_pages[i] = page;
@@ -1636,10 +1636,10 @@ struct btrfs_plug_cb {
 static int plug_cmp(void *priv, const struct list_head *a,
                    const struct list_head *b)
 {
-       struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
-                                                plug_list);
-       struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
-                                                plug_list);
+       const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
+                                                      plug_list);
+       const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
+                                                      plug_list);
        u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
        u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
 
@@ -2300,7 +2300,7 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
                        if (rbio->stripe_pages[index])
                                continue;
 
-                       page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+                       page = alloc_page(GFP_NOFS);
                        if (!page)
                                return -ENOMEM;
                        rbio->stripe_pages[index] = page;
@@ -2350,14 +2350,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
        if (!need_check)
                goto writeback;
 
-       p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       p_page = alloc_page(GFP_NOFS);
        if (!p_page)
                goto cleanup;
        SetPageUptodate(p_page);
 
        if (has_qstripe) {
                /* RAID6, allocate and map temp space for the Q stripe */
-               q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+               q_page = alloc_page(GFP_NOFS);
                if (!q_page) {
                        __free_page(p_page);
                        goto cleanup;
index 8e026de..d2062d5 100644 (file)
@@ -264,8 +264,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
        struct block_entry *be = NULL, *exist;
        struct root_entry *re = NULL;
 
-       re = kzalloc(sizeof(struct root_entry), GFP_KERNEL);
-       be = kzalloc(sizeof(struct block_entry), GFP_KERNEL);
+       re = kzalloc(sizeof(struct root_entry), GFP_NOFS);
+       be = kzalloc(sizeof(struct block_entry), GFP_NOFS);
        if (!be || !re) {
                kfree(re);
                kfree(be);
@@ -313,7 +313,7 @@ static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root,
        struct root_entry *re;
        struct ref_entry *ref = NULL, *exist;
 
-       ref = kmalloc(sizeof(struct ref_entry), GFP_KERNEL);
+       ref = kmalloc(sizeof(struct ref_entry), GFP_NOFS);
        if (!ref)
                return -ENOMEM;
 
@@ -358,7 +358,7 @@ static int add_shared_data_ref(struct btrfs_fs_info *fs_info,
        struct block_entry *be;
        struct ref_entry *ref;
 
-       ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
+       ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
        if (!ref)
                return -ENOMEM;
        be = add_block_entry(fs_info, bytenr, num_bytes, 0);
@@ -393,7 +393,7 @@ static int add_extent_data_ref(struct btrfs_fs_info *fs_info,
        u64 offset = btrfs_extent_data_ref_offset(leaf, dref);
        u32 num_refs = btrfs_extent_data_ref_count(leaf, dref);
 
-       ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
+       ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
        if (!ref)
                return -ENOMEM;
        be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
index fc83159..914d403 100644 (file)
@@ -24,6 +24,7 @@
 #include "block-group.h"
 #include "backref.h"
 #include "misc.h"
+#include "subpage.h"
 
 /*
  * Relocation overview
@@ -2781,10 +2782,70 @@ static noinline_for_stack int prealloc_file_extent_cluster(
        u64 num_bytes;
        int nr;
        int ret = 0;
+       u64 i_size = i_size_read(&inode->vfs_inode);
        u64 prealloc_start = cluster->start - offset;
        u64 prealloc_end = cluster->end - offset;
        u64 cur_offset = prealloc_start;
 
+       /*
+        * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
+        * This means the range [i_size, PAGE_END + 1) is filled with zeros by
+        * btrfs_do_readpage() call of previously relocated file cluster.
+        *
+        * If the current cluster starts in the above range, btrfs_do_readpage()
+        * will skip the read, and relocate_one_page() will later writeback
+        * the padding zeros as new data, causing data corruption.
+        *
+        * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
+        */
+       if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
+               struct address_space *mapping = inode->vfs_inode.i_mapping;
+               struct btrfs_fs_info *fs_info = inode->root->fs_info;
+               const u32 sectorsize = fs_info->sectorsize;
+               struct page *page;
+
+               ASSERT(sectorsize < PAGE_SIZE);
+               ASSERT(IS_ALIGNED(i_size, sectorsize));
+
+               /*
+                * Subpage can't handle page with DIRTY but without UPTODATE
+                * bit as it can lead to the following deadlock:
+                *
+                * btrfs_readpage()
+                * | Page already *locked*
+                * |- btrfs_lock_and_flush_ordered_range()
+                *    |- btrfs_start_ordered_extent()
+                *       |- extent_write_cache_pages()
+                *          |- lock_page()
+                *             We try to lock the page we already hold.
+                *
+                * Here we just writeback the whole data reloc inode, so that
+                * we will be ensured to have no dirty range in the page, and
+                * are safe to clear the uptodate bits.
+                *
+                * This shouldn't cause too much overhead, as we need to write
+                * the data back anyway.
+                */
+               ret = filemap_write_and_wait(mapping);
+               if (ret < 0)
+                       return ret;
+
+               clear_extent_bits(&inode->io_tree, i_size,
+                                 round_up(i_size, PAGE_SIZE) - 1,
+                                 EXTENT_UPTODATE);
+               page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
+               /*
+                * If page is freed we don't need to do anything then, as we
+                * will re-read the whole page anyway.
+                */
+               if (page) {
+                       btrfs_subpage_clear_uptodate(fs_info, page, i_size,
+                                       round_up(i_size, PAGE_SIZE) - i_size);
+                       unlock_page(page);
+                       put_page(page);
+               }
+       }
+
        BUG_ON(cluster->start != cluster->boundary[0]);
        ret = btrfs_alloc_data_chunk_ondemand(inode,
                                              prealloc_end + 1 - prealloc_start);
@@ -2886,19 +2947,149 @@ noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
 }
 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
 
-static int relocate_file_extent_cluster(struct inode *inode,
-                                       struct file_extent_cluster *cluster)
+static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
+                                   int cluster_nr)
+{
+       /* Last extent, use cluster end directly */
+       if (cluster_nr >= cluster->nr - 1)
+               return cluster->end;
+
+       /* Use next boundary start*/
+       return cluster->boundary[cluster_nr + 1] - 1;
+}
+
+static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+                            struct file_extent_cluster *cluster,
+                            int *cluster_nr, unsigned long page_index)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+       u64 offset = BTRFS_I(inode)->index_cnt;
+       const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
+       gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
+       struct page *page;
        u64 page_start;
        u64 page_end;
+       u64 cur;
+       int ret;
+
+       ASSERT(page_index <= last_index);
+       page = find_lock_page(inode->i_mapping, page_index);
+       if (!page) {
+               page_cache_sync_readahead(inode->i_mapping, ra, NULL,
+                               page_index, last_index + 1 - page_index);
+               page = find_or_create_page(inode->i_mapping, page_index, mask);
+               if (!page)
+                       return -ENOMEM;
+       }
+       ret = set_page_extent_mapped(page);
+       if (ret < 0)
+               goto release_page;
+
+       if (PageReadahead(page))
+               page_cache_async_readahead(inode->i_mapping, ra, NULL, page,
+                                  page_index, last_index + 1 - page_index);
+
+       if (!PageUptodate(page)) {
+               btrfs_readpage(NULL, page);
+               lock_page(page);
+               if (!PageUptodate(page)) {
+                       ret = -EIO;
+                       goto release_page;
+               }
+       }
+
+       page_start = page_offset(page);
+       page_end = page_start + PAGE_SIZE - 1;
+
+       /*
+        * Start from the cluster, as for subpage case, the cluster can start
+        * inside the page.
+        */
+       cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
+       while (cur <= page_end) {
+               u64 extent_start = cluster->boundary[*cluster_nr] - offset;
+               u64 extent_end = get_cluster_boundary_end(cluster,
+                                               *cluster_nr) - offset;
+               u64 clamped_start = max(page_start, extent_start);
+               u64 clamped_end = min(page_end, extent_end);
+               u32 clamped_len = clamped_end + 1 - clamped_start;
+
+               /* Reserve metadata for this range */
+               ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
+                                                     clamped_len);
+               if (ret)
+                       goto release_page;
+
+               /* Mark the range delalloc and dirty for later writeback */
+               lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end);
+               ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
+                                               clamped_end, 0, NULL);
+               if (ret) {
+                       clear_extent_bits(&BTRFS_I(inode)->io_tree,
+                                       clamped_start, clamped_end,
+                                       EXTENT_LOCKED | EXTENT_BOUNDARY);
+                       btrfs_delalloc_release_metadata(BTRFS_I(inode),
+                                                       clamped_len, true);
+                       btrfs_delalloc_release_extents(BTRFS_I(inode),
+                                                      clamped_len);
+                       goto release_page;
+               }
+               btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
+
+               /*
+                * Set the boundary if it's inside the page.
+                * Data relocation requires the destination extents to have the
+                * same size as the source.
+                * EXTENT_BOUNDARY bit prevents current extent from being merged
+                * with previous extent.
+                */
+               if (in_range(cluster->boundary[*cluster_nr] - offset,
+                            page_start, PAGE_SIZE)) {
+                       u64 boundary_start = cluster->boundary[*cluster_nr] -
+                                               offset;
+                       u64 boundary_end = boundary_start +
+                                          fs_info->sectorsize - 1;
+
+                       set_extent_bits(&BTRFS_I(inode)->io_tree,
+                                       boundary_start, boundary_end,
+                                       EXTENT_BOUNDARY);
+               }
+               unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
+               cur += clamped_len;
+
+               /* Crossed extent end, go to next extent */
+               if (cur >= extent_end) {
+                       (*cluster_nr)++;
+                       /* Just finished the last extent of the cluster, exit. */
+                       if (*cluster_nr >= cluster->nr)
+                               break;
+               }
+       }
+       unlock_page(page);
+       put_page(page);
+
+       balance_dirty_pages_ratelimited(inode->i_mapping);
+       btrfs_throttle(fs_info);
+       if (btrfs_should_cancel_balance(fs_info))
+               ret = -ECANCELED;
+       return ret;
+
+release_page:
+       unlock_page(page);
+       put_page(page);
+       return ret;
+}
+
+static int relocate_file_extent_cluster(struct inode *inode,
+                                       struct file_extent_cluster *cluster)
+{
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        u64 offset = BTRFS_I(inode)->index_cnt;
        unsigned long index;
        unsigned long last_index;
-       struct page *page;
        struct file_ra_state *ra;
-       gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
-       int nr = 0;
+       int cluster_nr = 0;
        int ret = 0;
 
        if (!cluster->nr)
@@ -2919,109 +3110,14 @@ static int relocate_file_extent_cluster(struct inode *inode,
        if (ret)
                goto out;
 
-       index = (cluster->start - offset) >> PAGE_SHIFT;
        last_index = (cluster->end - offset) >> PAGE_SHIFT;
-       while (index <= last_index) {
-               ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
-                               PAGE_SIZE);
-               if (ret)
-                       goto out;
-
-               page = find_lock_page(inode->i_mapping, index);
-               if (!page) {
-                       page_cache_sync_readahead(inode->i_mapping,
-                                                 ra, NULL, index,
-                                                 last_index + 1 - index);
-                       page = find_or_create_page(inode->i_mapping, index,
-                                                  mask);
-                       if (!page) {
-                               btrfs_delalloc_release_metadata(BTRFS_I(inode),
-                                                       PAGE_SIZE, true);
-                               btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                       PAGE_SIZE);
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-               }
-               ret = set_page_extent_mapped(page);
-               if (ret < 0) {
-                       btrfs_delalloc_release_metadata(BTRFS_I(inode),
-                                                       PAGE_SIZE, true);
-                       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
-                       unlock_page(page);
-                       put_page(page);
-                       goto out;
-               }
-
-               if (PageReadahead(page)) {
-                       page_cache_async_readahead(inode->i_mapping,
-                                                  ra, NULL, page, index,
-                                                  last_index + 1 - index);
-               }
-
-               if (!PageUptodate(page)) {
-                       btrfs_readpage(NULL, page);
-                       lock_page(page);
-                       if (!PageUptodate(page)) {
-                               unlock_page(page);
-                               put_page(page);
-                               btrfs_delalloc_release_metadata(BTRFS_I(inode),
-                                                       PAGE_SIZE, true);
-                               btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                              PAGE_SIZE);
-                               ret = -EIO;
-                               goto out;
-                       }
-               }
-
-               page_start = page_offset(page);
-               page_end = page_start + PAGE_SIZE - 1;
-
-               lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
-
-               if (nr < cluster->nr &&
-                   page_start + offset == cluster->boundary[nr]) {
-                       set_extent_bits(&BTRFS_I(inode)->io_tree,
-                                       page_start, page_end,
-                                       EXTENT_BOUNDARY);
-                       nr++;
-               }
-
-               ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start,
-                                               page_end, 0, NULL);
-               if (ret) {
-                       unlock_page(page);
-                       put_page(page);
-                       btrfs_delalloc_release_metadata(BTRFS_I(inode),
-                                                        PAGE_SIZE, true);
-                       btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      PAGE_SIZE);
-
-                       clear_extent_bits(&BTRFS_I(inode)->io_tree,
-                                         page_start, page_end,
-                                         EXTENT_LOCKED | EXTENT_BOUNDARY);
-                       goto out;
-
-               }
-               set_page_dirty(page);
-
-               unlock_extent(&BTRFS_I(inode)->io_tree,
-                             page_start, page_end);
-               unlock_page(page);
-               put_page(page);
-
-               index++;
-               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
-               balance_dirty_pages_ratelimited(inode->i_mapping);
-               btrfs_throttle(fs_info);
-               if (btrfs_should_cancel_balance(fs_info)) {
-                       ret = -ECANCELED;
-                       goto out;
-               }
-       }
-       WARN_ON(nr != cluster->nr);
+       for (index = (cluster->start - offset) >> PAGE_SHIFT;
+            index <= last_index && !ret; index++)
+               ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
        if (btrfs_is_zoned(fs_info) && !ret)
                ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+       if (ret == 0)
+               WARN_ON(cluster_nr != cluster->nr);
 out:
        kfree(ra);
        return ret;
index 6ac37ae..72f9b86 100644 (file)
@@ -1198,7 +1198,7 @@ struct backref_ctx {
 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
 {
        u64 root = (u64)(uintptr_t)key;
-       struct clone_root *cr = (struct clone_root *)elt;
+       const struct clone_root *cr = elt;
 
        if (root < cr->root->root_key.objectid)
                return -1;
@@ -1209,8 +1209,8 @@ static int __clone_root_cmp_bsearch(const void *key, const void *elt)
 
 static int __clone_root_cmp_sort(const void *e1, const void *e2)
 {
-       struct clone_root *cr1 = (struct clone_root *)e1;
-       struct clone_root *cr2 = (struct clone_root *)e2;
+       const struct clone_root *cr1 = e1;
+       const struct clone_root *cr2 = e2;
 
        if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
                return -1;
@@ -1307,7 +1307,7 @@ static int find_extent_clone(struct send_ctx *sctx,
        u64 flags = 0;
        struct btrfs_file_extent_item *fi;
        struct extent_buffer *eb = path->nodes[0];
-       struct backref_ctx *backref_ctx = NULL;
+       struct backref_ctx backref_ctx = {0};
        struct clone_root *cur_clone_root;
        struct btrfs_key found_key;
        struct btrfs_path *tmp_path;
@@ -1322,12 +1322,6 @@ static int find_extent_clone(struct send_ctx *sctx,
        /* We only use this path under the commit sem */
        tmp_path->need_commit_sem = 0;
 
-       backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
-       if (!backref_ctx) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        if (data_offset >= ino_size) {
                /*
                 * There may be extents that lie behind the file's size.
@@ -1392,12 +1386,12 @@ static int find_extent_clone(struct send_ctx *sctx,
                cur_clone_root->found_refs = 0;
        }
 
-       backref_ctx->sctx = sctx;
-       backref_ctx->found = 0;
-       backref_ctx->cur_objectid = ino;
-       backref_ctx->cur_offset = data_offset;
-       backref_ctx->found_itself = 0;
-       backref_ctx->extent_len = num_bytes;
+       backref_ctx.sctx = sctx;
+       backref_ctx.found = 0;
+       backref_ctx.cur_objectid = ino;
+       backref_ctx.cur_offset = data_offset;
+       backref_ctx.found_itself = 0;
+       backref_ctx.extent_len = num_bytes;
 
        /*
         * The last extent of a file may be too large due to page alignment.
@@ -1405,7 +1399,7 @@ static int find_extent_clone(struct send_ctx *sctx,
         * __iterate_backrefs work.
         */
        if (data_offset + num_bytes >= ino_size)
-               backref_ctx->extent_len = ino_size - data_offset;
+               backref_ctx.extent_len = ino_size - data_offset;
 
        /*
         * Now collect all backrefs.
@@ -1416,12 +1410,12 @@ static int find_extent_clone(struct send_ctx *sctx,
                extent_item_pos = 0;
        ret = iterate_extent_inodes(fs_info, found_key.objectid,
                                    extent_item_pos, 1, __iterate_backrefs,
-                                   backref_ctx, false);
+                                   &backref_ctx, false);
 
        if (ret < 0)
                goto out;
 
-       if (!backref_ctx->found_itself) {
+       if (!backref_ctx.found_itself) {
                /* found a bug in backref code? */
                ret = -EIO;
                btrfs_err(fs_info,
@@ -1434,7 +1428,7 @@ static int find_extent_clone(struct send_ctx *sctx,
                    "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
                    data_offset, ino, num_bytes, logical);
 
-       if (!backref_ctx->found)
+       if (!backref_ctx.found)
                btrfs_debug(fs_info, "no clones found");
 
        cur_clone_root = NULL;
@@ -1458,7 +1452,6 @@ static int find_extent_clone(struct send_ctx *sctx,
 
 out:
        btrfs_free_path(tmp_path);
-       kfree(backref_ctx);
        return ret;
 }
 
index f79bf85..5ada02e 100644 (file)
@@ -493,6 +493,11 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
        long time_left;
        int loops;
 
+       delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+       ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+       if (delalloc_bytes == 0 && ordered_bytes == 0)
+               return;
+
        /* Calc the number of the pages we need flush for space reservation */
        if (to_reclaim == U64_MAX) {
                items = U64_MAX;
@@ -500,22 +505,21 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
                /*
                 * to_reclaim is set to however much metadata we need to
                 * reclaim, but reclaiming that much data doesn't really track
-                * exactly, so increase the amount to reclaim by 2x in order to
-                * make sure we're flushing enough delalloc to hopefully reclaim
-                * some metadata reservations.
+                * exactly.  What we really want to do is reclaim full inode's
+                * worth of reservations, however that's not available to us
+                * here.  We will take a fraction of the delalloc bytes for our
+                * flushing loops and hope for the best.  Delalloc will expand
+                * the amount we write to cover an entire dirty extent, which
+                * will reclaim the metadata reservation for that range.  If
+                * it's not enough subsequent flush stages will be more
+                * aggressive.
                 */
+               to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
                items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
-               to_reclaim = items * EXTENT_SIZE_PER_ITEM;
        }
 
        trans = (struct btrfs_trans_handle *)current->journal_info;
 
-       delalloc_bytes = percpu_counter_sum_positive(
-                                               &fs_info->delalloc_bytes);
-       ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
-       if (delalloc_bytes == 0 && ordered_bytes == 0)
-               return;
-
        /*
         * If we are doing more ordered than delalloc we need to just wait on
         * ordered extents, otherwise we'll waste time trying to flush delalloc
@@ -528,9 +532,49 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
        while ((delalloc_bytes || ordered_bytes) && loops < 3) {
                u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
                long nr_pages = min_t(u64, temp, LONG_MAX);
+               int async_pages;
 
                btrfs_start_delalloc_roots(fs_info, nr_pages, true);
 
+               /*
+                * We need to make sure any outstanding async pages are now
+                * processed before we continue.  This is because things like
+                * sync_inode() try to be smart and skip writing if the inode is
+                * marked clean.  We don't use filemap_fwrite for flushing
+                * because we want to control how many pages we write out at a
+                * time, thus this is the only safe way to make sure we've
+                * waited for outstanding compressed workers to have started
+                * their jobs and thus have ordered extents set up properly.
+                *
+                * This exists because we do not want to wait for each
+                * individual inode to finish its async work, we simply want to
+                * start the IO on everybody, and then come back here and wait
+                * for all of the async work to catch up.  Once we're done with
+                * that we know we'll have ordered extents for everything and we
+                * can decide if we wait for that or not.
+                *
+                * If we choose to replace this in the future, make absolutely
+                * sure that the proper waiting is being done in the async case,
+                * as there have been bugs in that area before.
+                */
+               async_pages = atomic_read(&fs_info->async_delalloc_pages);
+               if (!async_pages)
+                       goto skip_async;
+
+               /*
+                * We don't want to wait forever, if we wrote less pages in this
+                * loop than we have outstanding, only wait for that number of
+                * pages, otherwise we can wait for all async pages to finish
+                * before continuing.
+                */
+               if (async_pages > nr_pages)
+                       async_pages -= nr_pages;
+               else
+                       async_pages = 0;
+               wait_event(fs_info->async_submit_wait,
+                          atomic_read(&fs_info->async_delalloc_pages) <=
+                          async_pages);
+skip_async:
                loops++;
                if (wait_ordered && !trans) {
                        btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
@@ -595,8 +639,11 @@ static void flush_space(struct btrfs_fs_info *fs_info,
                break;
        case FLUSH_DELALLOC:
        case FLUSH_DELALLOC_WAIT:
+       case FLUSH_DELALLOC_FULL:
+               if (state == FLUSH_DELALLOC_FULL)
+                       num_bytes = U64_MAX;
                shrink_delalloc(fs_info, space_info, num_bytes,
-                               state == FLUSH_DELALLOC_WAIT, for_preempt);
+                               state != FLUSH_DELALLOC, for_preempt);
                break;
        case FLUSH_DELAYED_REFS_NR:
        case FLUSH_DELAYED_REFS:
@@ -686,7 +733,7 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
 {
        u64 global_rsv_size = fs_info->global_block_rsv.reserved;
        u64 ordered, delalloc;
-       u64 thresh = div_factor_fine(space_info->total_bytes, 98);
+       u64 thresh = div_factor_fine(space_info->total_bytes, 90);
        u64 used;
 
        /* If we're just plain full then async reclaim just slows us down. */
@@ -694,6 +741,20 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
             global_rsv_size) >= thresh)
                return false;
 
+       used = space_info->bytes_may_use + space_info->bytes_pinned;
+
+       /* The total flushable belongs to the global rsv, don't flush. */
+       if (global_rsv_size >= used)
+               return false;
+
+       /*
+        * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
+        * that devoted to other reservations then there's no sense in flushing,
+        * we don't have a lot of things that need flushing.
+        */
+       if (used - global_rsv_size <= SZ_128M)
+               return false;
+
        /*
         * We have tickets queued, bail so we don't compete with the async
         * flushers.
@@ -824,6 +885,8 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
        struct reserve_ticket *ticket;
        u64 tickets_id = space_info->tickets_id;
 
+       trace_btrfs_fail_all_tickets(fs_info, space_info);
+
        if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
                btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
                __btrfs_dump_space_info(fs_info, space_info);
@@ -904,6 +967,14 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                                commit_cycles--;
                }
 
+               /*
+                * We do not want to empty the system of delalloc unless we're
+                * under heavy pressure, so allow one trip through the flushing
+                * logic before we start doing a FLUSH_DELALLOC_FULL.
+                */
+               if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
+                       flush_state++;
+
                /*
                 * We don't want to force a chunk allocation until we've tried
                 * pretty hard to reclaim space.  Think of the case where we
@@ -1067,7 +1138,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
  *   so if we now have space to allocate do the force chunk allocation.
  */
 static const enum btrfs_flush_state data_flush_states[] = {
-       FLUSH_DELALLOC_WAIT,
+       FLUSH_DELALLOC_FULL,
        RUN_DELAYED_IPUTS,
        COMMIT_TRANS,
        ALLOC_CHUNK_FORCE,
@@ -1156,6 +1227,7 @@ static const enum btrfs_flush_state evict_flush_states[] = {
        FLUSH_DELAYED_REFS,
        FLUSH_DELALLOC,
        FLUSH_DELALLOC_WAIT,
+       FLUSH_DELALLOC_FULL,
        ALLOC_CHUNK,
        COMMIT_TRANS,
 };
index 8260f8b..f429256 100644 (file)
@@ -73,7 +73,7 @@ u##bits btrfs_get_token_##bits(struct btrfs_map_token *token,         \
        }                                                               \
        token->kaddr = page_address(token->eb->pages[idx]);             \
        token->offset = idx << PAGE_SHIFT;                              \
-       if (oip + size <= PAGE_SIZE)                                    \
+       if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE ) \
                return get_unaligned_le##bits(token->kaddr + oip);      \
                                                                        \
        memcpy(lebytes, token->kaddr + oip, part);                      \
@@ -94,7 +94,7 @@ u##bits btrfs_get_##bits(const struct extent_buffer *eb,              \
        u8 lebytes[sizeof(u##bits)];                                    \
                                                                        \
        ASSERT(check_setget_bounds(eb, ptr, off, size));                \
-       if (oip + size <= PAGE_SIZE)                                    \
+       if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) \
                return get_unaligned_le##bits(kaddr + oip);             \
                                                                        \
        memcpy(lebytes, kaddr + oip, part);                             \
@@ -124,7 +124,7 @@ void btrfs_set_token_##bits(struct btrfs_map_token *token,          \
        }                                                               \
        token->kaddr = page_address(token->eb->pages[idx]);             \
        token->offset = idx << PAGE_SHIFT;                              \
-       if (oip + size <= PAGE_SIZE) {                                  \
+       if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
                put_unaligned_le##bits(val, token->kaddr + oip);        \
                return;                                                 \
        }                                                               \
@@ -146,7 +146,7 @@ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr,    \
        u8 lebytes[sizeof(u##bits)];                                    \
                                                                        \
        ASSERT(check_setget_bounds(eb, ptr, off, size));                \
-       if (oip + size <= PAGE_SIZE) {                                  \
+       if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
                put_unaligned_le##bits(val, kaddr + oip);               \
                return;                                                 \
        }                                                               \
index 640bcd2..cb10e56 100644 (file)
@@ -435,8 +435,10 @@ void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
 
        spin_lock_irqsave(&subpage->lock, flags);
        subpage->writeback_bitmap &= ~tmp;
-       if (subpage->writeback_bitmap == 0)
+       if (subpage->writeback_bitmap == 0) {
+               ASSERT(PageWriteback(page));
                end_page_writeback(page);
+       }
        spin_unlock_irqrestore(&subpage->lock, flags);
 }
 
@@ -559,3 +561,23 @@ IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
                         PageWriteback);
 IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
                         PageOrdered);
+
+/*
+ * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
+ * is cleared.
+ */
+void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+                                struct page *page)
+{
+       struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+
+       if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
+               return;
+
+       ASSERT(!PageDirty(page));
+       if (fs_info->sectorsize == PAGE_SIZE)
+               return;
+
+       ASSERT(PagePrivate(page) && page->private);
+       ASSERT(subpage->dirty_bitmap == 0);
+}
index 4d7aca8..0120948 100644 (file)
@@ -126,4 +126,7 @@ DECLARE_BTRFS_SUBPAGE_OPS(ordered);
 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
                struct page *page, u64 start, u32 len);
 
+void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+                                struct page *page);
+
 #endif
index d07b18b..537d90b 100644 (file)
@@ -1201,21 +1201,14 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
                key.type = BTRFS_ROOT_BACKREF_KEY;
                key.offset = (u64)-1;
 
-               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+               ret = btrfs_search_backwards(root, &key, path);
                if (ret < 0) {
                        goto err;
                } else if (ret > 0) {
-                       ret = btrfs_previous_item(root, path, subvol_objectid,
-                                                 BTRFS_ROOT_BACKREF_KEY);
-                       if (ret < 0) {
-                               goto err;
-                       } else if (ret > 0) {
-                               ret = -ENOENT;
-                               goto err;
-                       }
+                       ret = -ENOENT;
+                       goto err;
                }
 
-               btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
                subvol_objectid = key.offset;
 
                root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -1248,21 +1241,14 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
                        key.type = BTRFS_INODE_REF_KEY;
                        key.offset = (u64)-1;
 
-                       ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
+                       ret = btrfs_search_backwards(fs_root, &key, path);
                        if (ret < 0) {
                                goto err;
                        } else if (ret > 0) {
-                               ret = btrfs_previous_item(fs_root, path, dirid,
-                                                         BTRFS_INODE_REF_KEY);
-                               if (ret < 0) {
-                                       goto err;
-                               } else if (ret > 0) {
-                                       ret = -ENOENT;
-                                       goto err;
-                               }
+                               ret = -ENOENT;
+                               goto err;
                        }
 
-                       btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
                        dirid = key.offset;
 
                        inode_ref = btrfs_item_ptr(path->nodes[0],
@@ -1353,6 +1339,9 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_op = &btrfs_super_ops;
        sb->s_d_op = &btrfs_dentry_operations;
        sb->s_export_op = &btrfs_export_ops;
+#ifdef CONFIG_FS_VERITY
+       sb->s_vop = &btrfs_verityops;
+#endif
        sb->s_xattr = btrfs_xattr_handlers;
        sb->s_time_gran = 1;
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
@@ -2041,13 +2030,6 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        ret = -EINVAL;
                        goto restore;
                }
-               if (fs_info->sectorsize < PAGE_SIZE) {
-                       btrfs_warn(fs_info,
-       "read-write mount is not yet allowed for sectorsize %u page size %lu",
-                                  fs_info->sectorsize, PAGE_SIZE);
-                       ret = -EINVAL;
-                       goto restore;
-               }
 
                /*
                 * NOTE: when remounting with a change that does writes, don't
@@ -2096,16 +2078,15 @@ restore:
 }
 
 /* Used to sort the devices by max_avail(descending sort) */
-static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
-                                      const void *dev_info2)
+static int btrfs_cmp_device_free_bytes(const void *a, const void *b)
 {
-       if (((struct btrfs_device_info *)dev_info1)->max_avail >
-           ((struct btrfs_device_info *)dev_info2)->max_avail)
+       const struct btrfs_device_info *dev_info1 = a;
+       const struct btrfs_device_info *dev_info2 = b;
+
+       if (dev_info1->max_avail > dev_info2->max_avail)
                return -1;
-       else if (((struct btrfs_device_info *)dev_info1)->max_avail <
-                ((struct btrfs_device_info *)dev_info2)->max_avail)
+       else if (dev_info1->max_avail < dev_info2->max_avail)
                return 1;
-       else
        return 0;
 }
 
@@ -2381,7 +2362,7 @@ static struct file_system_type btrfs_root_fs_type = {
        .name           = "btrfs",
        .mount          = btrfs_mount_root,
        .kill_sb        = btrfs_kill_super,
-       .fs_flags       = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
+       .fs_flags       = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
 };
 
 MODULE_ALIAS_FS("btrfs");
@@ -2571,6 +2552,11 @@ static void __init btrfs_print_mod_info(void)
                        ", zoned=yes"
 #else
                        ", zoned=no"
+#endif
+#ifdef CONFIG_FS_VERITY
+                       ", fsverity=yes"
+#else
+                       ", fsverity=no"
 #endif
                        ;
        pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
index 9d1d140..25a6f58 100644 (file)
 #include "block-group.h"
 #include "qgroup.h"
 
+/*
+ * Structure name                       Path
+ * --------------------------------------------------------------------------
+ * btrfs_supported_static_feature_attrs /sys/fs/btrfs/features
+ * btrfs_supported_feature_attrs       /sys/fs/btrfs/features and
+ *                                     /sys/fs/btrfs/<uuid>/features
+ * btrfs_attrs                         /sys/fs/btrfs/<uuid>
+ * devid_attrs                         /sys/fs/btrfs/<uuid>/devinfo/<devid>
+ * allocation_attrs                    /sys/fs/btrfs/<uuid>/allocation
+ * qgroup_attrs                                /sys/fs/btrfs/<uuid>/qgroups/<level>_<qgroupid>
+ * space_info_attrs                    /sys/fs/btrfs/<uuid>/allocation/<bg-type>
+ * raid_attrs                          /sys/fs/btrfs/<uuid>/allocation/<bg-type>/<bg-profile>
+ *
+ * When built with BTRFS_CONFIG_DEBUG:
+ *
+ * btrfs_debug_feature_attrs           /sys/fs/btrfs/debug
+ * btrfs_debug_mount_attrs             /sys/fs/btrfs/<uuid>/debug
+ * discard_debug_attrs                 /sys/fs/btrfs/<uuid>/debug/discard
+ */
+
 struct btrfs_feature_attr {
        struct kobj_attribute kobj_attr;
        enum btrfs_feature_set feature_set;
@@ -267,7 +287,17 @@ BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
 #ifdef CONFIG_BTRFS_DEBUG
 BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
 #endif
+#ifdef CONFIG_FS_VERITY
+BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY);
+#endif
 
+/*
+ * Features which depend on feature bits and may differ between each fs.
+ *
+ * /sys/fs/btrfs/features      - all available features implemeted by this version
+ * /sys/fs/btrfs/UUID/features - features of the fs which are enabled or
+ *                               can be changed on a mounted filesystem.
+ */
 static struct attribute *btrfs_supported_feature_attrs[] = {
        BTRFS_FEAT_ATTR_PTR(mixed_backref),
        BTRFS_FEAT_ATTR_PTR(default_subvol),
@@ -284,17 +314,13 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
        BTRFS_FEAT_ATTR_PTR(raid1c34),
 #ifdef CONFIG_BTRFS_DEBUG
        BTRFS_FEAT_ATTR_PTR(zoned),
+#endif
+#ifdef CONFIG_FS_VERITY
+       BTRFS_FEAT_ATTR_PTR(verity),
 #endif
        NULL
 };
 
-/*
- * Features which depend on feature bits and may differ between each fs.
- *
- * /sys/fs/btrfs/features lists all available features of this kernel while
- * /sys/fs/btrfs/UUID/features shows features of the fs which are enabled or
- * can be changed online.
- */
 static const struct attribute_group btrfs_feature_attr_group = {
        .name = "features",
        .is_visible = btrfs_feature_visible,
@@ -366,6 +392,10 @@ static ssize_t supported_sectorsizes_show(struct kobject *kobj,
 {
        ssize_t ret = 0;
 
+       /* 4K sector size is also supported with 64K page size */
+       if (PAGE_SIZE == SZ_64K)
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%u ", SZ_4K);
+
        /* Only sectorsize == PAGE_SIZE is now supported */
        ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%lu\n", PAGE_SIZE);
 
@@ -374,6 +404,12 @@ static ssize_t supported_sectorsizes_show(struct kobject *kobj,
 BTRFS_ATTR(static_feature, supported_sectorsizes,
           supported_sectorsizes_show);
 
+/*
+ * Features which only depend on kernel version.
+ *
+ * These are listed in /sys/fs/btrfs/features along with
+ * btrfs_supported_feature_attrs.
+ */
 static struct attribute *btrfs_supported_static_feature_attrs[] = {
        BTRFS_ATTR_PTR(static_feature, rmdir_subvol),
        BTRFS_ATTR_PTR(static_feature, supported_checksums),
@@ -383,12 +419,6 @@ static struct attribute *btrfs_supported_static_feature_attrs[] = {
        NULL
 };
 
-/*
- * Features which only depend on kernel version.
- *
- * These are listed in /sys/fs/btrfs/features along with
- * btrfs_feature_attr_group
- */
 static const struct attribute_group btrfs_static_feature_attr_group = {
        .name = "features",
        .attrs = btrfs_supported_static_feature_attrs,
@@ -547,6 +577,11 @@ static ssize_t btrfs_discard_max_discard_size_store(struct kobject *kobj,
 BTRFS_ATTR_RW(discard, max_discard_size, btrfs_discard_max_discard_size_show,
              btrfs_discard_max_discard_size_store);
 
+/*
+ * Per-filesystem debugging of discard (when mounted with discard=async).
+ *
+ * Path: /sys/fs/btrfs/<uuid>/debug/discard/
+ */
 static const struct attribute *discard_debug_attrs[] = {
        BTRFS_ATTR_PTR(discard, discardable_bytes),
        BTRFS_ATTR_PTR(discard, discardable_extents),
@@ -560,15 +595,19 @@ static const struct attribute *discard_debug_attrs[] = {
 };
 
 /*
- * Runtime debugging exported via sysfs
+ * Per-filesystem runtime debugging exported via sysfs.
  *
- * /sys/fs/btrfs/debug - applies to module or all filesystems
- * /sys/fs/btrfs/UUID  - applies only to the given filesystem
+ * Path: /sys/fs/btrfs/UUID/debug/
  */
 static const struct attribute *btrfs_debug_mount_attrs[] = {
        NULL,
 };
 
+/*
+ * Runtime debugging exported via sysfs, applies to all mounted filesystems.
+ *
+ * Path: /sys/fs/btrfs/debug
+ */
 static struct attribute *btrfs_debug_feature_attrs[] = {
        NULL
 };
@@ -637,6 +676,11 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
        return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
 }
 
+/*
+ * Allocation information about block group profiles.
+ *
+ * Path: /sys/fs/btrfs/<uuid>/allocation/<bg-type>/<bg-profile>/
+ */
 static struct attribute *raid_attrs[] = {
        BTRFS_ATTR_PTR(raid, total_bytes),
        BTRFS_ATTR_PTR(raid, used_bytes),
@@ -676,6 +720,11 @@ SPACE_INFO_ATTR(bytes_zone_unusable);
 SPACE_INFO_ATTR(disk_used);
 SPACE_INFO_ATTR(disk_total);
 
+/*
+ * Allocation information about block group types.
+ *
+ * Path: /sys/fs/btrfs/<uuid>/allocation/<bg-type>/
+ */
 static struct attribute *space_info_attrs[] = {
        BTRFS_ATTR_PTR(space_info, flags),
        BTRFS_ATTR_PTR(space_info, total_bytes),
@@ -703,6 +752,11 @@ static struct kobj_type space_info_ktype = {
        .default_groups = space_info_groups,
 };
 
+/*
+ * Allocation information about block groups.
+ *
+ * Path: /sys/fs/btrfs/<uuid>/allocation/
+ */
 static const struct attribute *allocation_attrs[] = {
        BTRFS_ATTR_PTR(allocation, global_rsv_reserved),
        BTRFS_ATTR_PTR(allocation, global_rsv_size),
@@ -974,7 +1028,8 @@ static ssize_t btrfs_bg_reclaim_threshold_show(struct kobject *kobj,
        struct btrfs_fs_info *fs_info = to_fs_info(kobj);
        ssize_t ret;
 
-       ret = scnprintf(buf, PAGE_SIZE, "%d\n", fs_info->bg_reclaim_threshold);
+       ret = scnprintf(buf, PAGE_SIZE, "%d\n",
+                       READ_ONCE(fs_info->bg_reclaim_threshold));
 
        return ret;
 }
@@ -991,16 +1046,21 @@ static ssize_t btrfs_bg_reclaim_threshold_store(struct kobject *kobj,
        if (ret)
                return ret;
 
-       if (thresh <= 50 || thresh > 100)
+       if (thresh != 0 && (thresh <= 50 || thresh > 100))
                return -EINVAL;
 
-       fs_info->bg_reclaim_threshold = thresh;
+       WRITE_ONCE(fs_info->bg_reclaim_threshold, thresh);
 
        return len;
 }
 BTRFS_ATTR_RW(, bg_reclaim_threshold, btrfs_bg_reclaim_threshold_show,
              btrfs_bg_reclaim_threshold_store);
 
+/*
+ * Per-filesystem information and stats.
+ *
+ * Path: /sys/fs/btrfs/<uuid>/
+ */
 static const struct attribute *btrfs_attrs[] = {
        BTRFS_ATTR_PTR(, label),
        BTRFS_ATTR_PTR(, nodesize),
@@ -1510,6 +1570,11 @@ static ssize_t btrfs_devinfo_error_stats_show(struct kobject *kobj,
 }
 BTRFS_ATTR(devid, error_stats, btrfs_devinfo_error_stats_show);
 
+/*
+ * Information about one device.
+ *
+ * Path: /sys/fs/btrfs/<uuid>/devinfo/<devid>/
+ */
 static struct attribute *devid_attrs[] = {
        BTRFS_ATTR_PTR(devid, error_stats),
        BTRFS_ATTR_PTR(devid, in_fs_metadata),
@@ -1799,6 +1864,11 @@ QGROUP_RSV_ATTR(data, BTRFS_QGROUP_RSV_DATA);
 QGROUP_RSV_ATTR(meta_pertrans, BTRFS_QGROUP_RSV_META_PERTRANS);
 QGROUP_RSV_ATTR(meta_prealloc, BTRFS_QGROUP_RSV_META_PREALLOC);
 
+/*
+ * Qgroup information.
+ *
+ * Path: /sys/fs/btrfs/<uuid>/qgroups/<level>_<qgroupid>/
+ */
 static struct attribute *qgroup_attrs[] = {
        BTRFS_ATTR_PTR(qgroup, referenced),
        BTRFS_ATTR_PTR(qgroup, exclusive),
index 98b5aab..19ba7d5 100644 (file)
@@ -223,8 +223,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
         * we can only call btrfs_qgroup_account_extent() directly to test
         * quota.
         */
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -236,8 +235,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -260,8 +258,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
        old_roots = NULL;
        new_roots = NULL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -272,8 +269,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
        if (ret)
                return -EINVAL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -324,8 +320,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return ret;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -337,8 +332,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -359,8 +353,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -372,8 +365,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -400,8 +392,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -413,8 +404,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false, false);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
index a8b2e0d..7733e8a 100644 (file)
@@ -24,6 +24,7 @@
 #include "compression.h"
 #include "volumes.h"
 #include "misc.h"
+#include "btrfs_inode.h"
 
 /*
  * Error message should follow the following format:
@@ -873,13 +874,22 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
                }
        }
 
-       if (unlikely((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
-                    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
-                    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
-                    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
-                    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
+       if (unlikely((type & BTRFS_BLOCK_GROUP_RAID10 &&
+                     sub_stripes != btrfs_raid_array[BTRFS_RAID_RAID10].sub_stripes) ||
+                    (type & BTRFS_BLOCK_GROUP_RAID1 &&
+                     num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1].devs_min) ||
+                    (type & BTRFS_BLOCK_GROUP_RAID1C3 &&
+                     num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C3].devs_min) ||
+                    (type & BTRFS_BLOCK_GROUP_RAID1C4 &&
+                     num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C4].devs_min) ||
+                    (type & BTRFS_BLOCK_GROUP_RAID5 &&
+                     num_stripes < btrfs_raid_array[BTRFS_RAID_RAID5].devs_min) ||
+                    (type & BTRFS_BLOCK_GROUP_RAID6 &&
+                     num_stripes < btrfs_raid_array[BTRFS_RAID_RAID6].devs_min) ||
+                    (type & BTRFS_BLOCK_GROUP_DUP &&
+                     num_stripes != btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes) ||
                     ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
-                     num_stripes != 1))) {
+                     num_stripes != btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes))) {
                chunk_err(leaf, chunk, logical,
                        "invalid num_stripes:sub_stripes %u:%u for profile %llu",
                        num_stripes, sub_stripes,
@@ -999,6 +1009,8 @@ static int check_inode_item(struct extent_buffer *leaf,
        u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
        u32 mode;
        int ret;
+       u32 flags;
+       u32 ro_flags;
 
        ret = check_inode_key(leaf, key, slot);
        if (unlikely(ret < 0))
@@ -1054,11 +1066,17 @@ static int check_inode_item(struct extent_buffer *leaf,
                        btrfs_inode_nlink(leaf, iitem));
                return -EUCLEAN;
        }
-       if (unlikely(btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK)) {
+       btrfs_inode_split_flags(btrfs_inode_flags(leaf, iitem), &flags, &ro_flags);
+       if (unlikely(flags & ~BTRFS_INODE_FLAG_MASK)) {
                inode_item_err(leaf, slot,
-                              "unknown flags detected: 0x%llx",
-                              btrfs_inode_flags(leaf, iitem) &
-                              ~BTRFS_INODE_FLAG_MASK);
+                              "unknown incompat flags detected: 0x%x", flags);
+               return -EUCLEAN;
+       }
+       if (unlikely(!sb_rdonly(fs_info->sb) &&
+                    (ro_flags & ~BTRFS_INODE_RO_FLAG_MASK))) {
+               inode_item_err(leaf, slot,
+                       "unknown ro-compat flags detected on writeable mount: 0x%x",
+                       ro_flags);
                return -EUCLEAN;
        }
        return 0;
index e6430ac..f7efc26 100644 (file)
@@ -753,7 +753,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                         */
                        ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
                                                ins.offset);
-                       if (ret == 0) {
+                       if (ret < 0) {
+                               goto out;
+                       } else if (ret == 0) {
                                btrfs_init_generic_ref(&ref,
                                                BTRFS_ADD_DELAYED_REF,
                                                ins.objectid, ins.offset, 0);
@@ -3039,8 +3041,6 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
                list_del_init(&ctx->list);
                ctx->log_ret = error;
        }
-
-       INIT_LIST_HEAD(&root->log_ctxs[index]);
 }
 
 /*
@@ -3328,10 +3328,16 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                goto out_wake_log_root;
        }
 
-       mutex_lock(&root->log_mutex);
-       if (root->last_log_commit < log_transid)
-               root->last_log_commit = log_transid;
-       mutex_unlock(&root->log_mutex);
+       /*
+        * We know there can only be one task here, since we have not yet set
+        * root->log_commit[index1] to 0 and any task attempting to sync the
+        * log must wait for the previous log transaction to commit if it's
+        * still in progress or wait for the current log transaction commit if
+        * someone else already started it. We use <= and not < because the
+        * first log transaction has an ID of 0.
+        */
+       ASSERT(root->last_log_commit <= log_transid);
+       root->last_log_commit = log_transid;
 
 out_wake_log_root:
        mutex_lock(&log_root_tree->log_mutex);
@@ -3417,14 +3423,10 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
 }
 
 /*
- * Check if an inode was logged in the current transaction. We can't always rely
- * on an inode's logged_trans value, because it's an in-memory only field and
- * therefore not persisted. This means that its value is lost if the inode gets
- * evicted and loaded again from disk (in which case it has a value of 0, and
- * certainly it is smaller then any possible transaction ID), when that happens
- * the full_sync flag is set in the inode's runtime flags, so on that case we
- * assume eviction happened and ignore the logged_trans value, assuming the
- * worst case, that the inode was logged before in the current transaction.
+ * Check if an inode was logged in the current transaction. This may often
+ * return some false positives, because logged_trans is an in memory only field,
+ * not persisted anywhere. This is meant to be used in contexts where a false
+ * positive has no functional consequences.
  */
 static bool inode_logged(struct btrfs_trans_handle *trans,
                         struct btrfs_inode *inode)
@@ -3432,8 +3434,17 @@ static bool inode_logged(struct btrfs_trans_handle *trans,
        if (inode->logged_trans == trans->transid)
                return true;
 
-       if (inode->last_trans == trans->transid &&
-           test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
+       /*
+        * The inode's logged_trans is always 0 when we load it (because it is
+        * not persisted in the inode item or elsewhere). So if it is 0, the
+        * inode was last modified in the current transaction then the inode may
+        * have been logged before in the current transaction, then evicted and
+        * loaded again in the current transaction - or may have never been logged
+        * in the current transaction, but since we can not be sure, we have to
+        * assume it was, otherwise our callers can leave an inconsistent log.
+        */
+       if (inode->logged_trans == 0 &&
+           inode->last_trans == trans->transid &&
            !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
                return true;
 
@@ -3913,6 +3924,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                            u64 logged_isize)
 {
        struct btrfs_map_token token;
+       u64 flags;
 
        btrfs_init_map_token(&token, leaf);
 
@@ -3962,20 +3974,49 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
        btrfs_set_token_inode_transid(&token, item, trans->transid);
        btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
-       btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags);
+       flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
+                                         BTRFS_I(inode)->ro_flags);
+       btrfs_set_token_inode_flags(&token, item, flags);
        btrfs_set_token_inode_block_group(&token, item, 0);
 }
 
 static int log_inode_item(struct btrfs_trans_handle *trans,
                          struct btrfs_root *log, struct btrfs_path *path,
-                         struct btrfs_inode *inode)
+                         struct btrfs_inode *inode, bool inode_item_dropped)
 {
        struct btrfs_inode_item *inode_item;
        int ret;
 
-       ret = btrfs_insert_empty_item(trans, log, path,
-                                     &inode->location, sizeof(*inode_item));
-       if (ret && ret != -EEXIST)
+       /*
+        * If we are doing a fast fsync and the inode was logged before in the
+        * current transaction, then we know the inode was previously logged and
+        * it exists in the log tree. For performance reasons, in this case use
+        * btrfs_search_slot() directly with ins_len set to 0 so that we never
+        * attempt a write lock on the leaf's parent, which adds unnecessary lock
+        * contention in case there are concurrent fsyncs for other inodes of the
+        * same subvolume. Using btrfs_insert_empty_item() when the inode item
+        * already exists can also result in unnecessarily splitting a leaf.
+        */
+       if (!inode_item_dropped && inode->logged_trans == trans->transid) {
+               ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
+               ASSERT(ret <= 0);
+               if (ret > 0)
+                       ret = -ENOENT;
+       } else {
+               /*
+                * This means it is the first fsync in the current transaction,
+                * so the inode item is not in the log and we need to insert it.
+                * We can never get -EEXIST because we are only called for a fast
+                * fsync and in case an inode eviction happens after the inode was
+                * logged before in the current transaction, when we load again
+                * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime
+                * flags and set ->logged_trans to 0.
+                */
+               ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
+                                             sizeof(*inode_item));
+               ASSERT(ret != -EEXIST);
+       }
+       if (ret)
                return ret;
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                    struct btrfs_inode_item);
@@ -4160,7 +4201,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 static int extent_cmp(void *priv, const struct list_head *a,
                      const struct list_head *b)
 {
-       struct extent_map *em1, *em2;
+       const struct extent_map *em1, *em2;
 
        em1 = list_entry(a, struct extent_map, list);
        em2 = list_entry(b, struct extent_map, list);
@@ -5053,8 +5094,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
                /*
                 * Check the inode's logged_trans only instead of
                 * btrfs_inode_in_log(). This is because the last_log_commit of
-                * the inode is not updated when we only log that it exists and
-                * it has the full sync bit set (see btrfs_log_inode()).
+                * the inode is not updated when we only log that it exists (see
+                * btrfs_log_inode()).
                 */
                if (BTRFS_I(inode)->logged_trans == trans->transid) {
                        spin_unlock(&BTRFS_I(inode)->lock);
@@ -5299,6 +5340,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        bool need_log_inode_item = true;
        bool xattrs_logged = false;
        bool recursive_logging = false;
+       bool inode_item_dropped = true;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -5433,6 +5475,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                } else {
                        if (inode_only == LOG_INODE_ALL)
                                fast_search = true;
+                       inode_item_dropped = false;
                        goto log_extents;
                }
 
@@ -5466,7 +5509,7 @@ log_extents:
        btrfs_release_path(path);
        btrfs_release_path(dst_path);
        if (need_log_inode_item) {
-               err = log_inode_item(trans, log, dst_path, inode);
+               err = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
                if (err)
                        goto out_unlock;
                /*
@@ -5572,6 +5615,13 @@ out_unlock:
 static bool need_log_inode(struct btrfs_trans_handle *trans,
                           struct btrfs_inode *inode)
 {
+       /*
+        * If a directory was not modified, no dentries added or removed, we can
+        * and should avoid logging it.
+        */
+       if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
+               return false;
+
        /*
         * If this inode does not have new/updated/deleted xattrs since the last
         * time it was logged and is flagged as logged in the current transaction,
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
new file mode 100644 (file)
index 0000000..28d443d
--- /dev/null
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/xattr.h>
+#include <linux/security.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/iversion.h>
+#include <linux/fsverity.h>
+#include <linux/sched/mm.h>
+#include "ctree.h"
+#include "btrfs_inode.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "locking.h"
+
+/*
+ * Implementation of the interface defined in struct fsverity_operations.
+ *
+ * The main question is how and where to store the verity descriptor and the
+ * Merkle tree. We store both in dedicated btree items in the filesystem tree,
+ * together with the rest of the inode metadata. This means we'll need to do
+ * extra work to encrypt them once encryption is supported in btrfs, but btrfs
+ * has a lot of careful code around i_size and it seems better to make a new key
+ * type than try and adjust all of our expectations for i_size.
+ *
+ * Note that this differs from the implementation in ext4 and f2fs, where
+ * this data is stored as if it were in the file, but past EOF. However, btrfs
+ * does not have a widespread mechanism for caching opaque metadata pages, so we
+ * do pretend that the Merkle tree pages themselves are past EOF for the
+ * purposes of caching them (as opposed to creating a virtual inode).
+ *
+ * fs verity items are stored under two different key types on disk.
+ * The descriptor items:
+ * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
+ *
+ * At offset 0, we store a btrfs_verity_descriptor_item which tracks the
+ * size of the descriptor item and some extra data for encryption.
+ * Starting at offset 1, these hold the generic fs verity descriptor.
+ * The latter are opaque to btrfs, we just read and write them as a blob for
+ * the higher level verity code.  The most common descriptor size is 256 bytes.
+ *
+ * The merkle tree items:
+ * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
+ *
+ * These also start at offset 0, and correspond to the merkle tree bytes.
+ * So when fsverity asks for page 0 of the merkle tree, we pull up one page
+ * starting at offset 0 for this key type.  These are also opaque to btrfs,
+ * we're blindly storing whatever fsverity sends down.
+ *
+ * Another important consideration is the fact that the Merkle tree data scales
+ * linearly with the size of the file (with 4K pages/blocks and SHA-256, it's
+ * ~1/127th the size) so for large files, writing the tree can be a lengthy
+ * operation. For that reason, we guard the whole enable verity operation
+ * (between begin_enable_verity and end_enable_verity) with an orphan item.
+ * Again, because the data can be pretty large, it's quite possible that we
+ * could run out of space writing it, so we try our best to handle errors by
+ * stopping and rolling back rather than aborting the victim transaction.
+ */
+
+#define MERKLE_START_ALIGN                     65536
+
+/*
+ * Compute the logical file offset where we cache the Merkle tree.
+ *
+ * @inode:  inode of the verity file
+ *
+ * For the purposes of caching the Merkle tree pages, as required by
+ * fs-verity, it is convenient to do size computations in terms of a file
+ * offset, rather than in terms of page indices.
+ *
+ * Use 64K to be sure it's past the last page in the file, even with 64K pages.
+ * That rounding operation itself can overflow loff_t, so we do it in u64 and
+ * check.
+ *
+ * Returns the file offset on success, negative error code on failure.
+ */
+static loff_t merkle_file_pos(const struct inode *inode)
+{
+       u64 sz = inode->i_size;
+       u64 rounded = round_up(sz, MERKLE_START_ALIGN);
+
+       if (rounded > inode->i_sb->s_maxbytes)
+               return -EFBIG;
+
+       return rounded;
+}
+
+/*
+ * Drop all the items for this inode with this key_type.
+ *
+ * @inode:     inode to drop items for
+ * @key_type:  type of items to drop (BTRFS_VERITY_DESC_ITEM or
+ *             BTRFS_VERITY_MERKLE_ITEM)
+ *
+ * Before doing a verity enable we cleanup any existing verity items.
+ * This is also used to clean up if a verity enable failed half way through.
+ *
+ * Returns number of dropped items on success, negative error code on failure.
+ */
+static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_root *root = inode->root;
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       int count = 0;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       while (1) {
+               /* 1 for the item being dropped */
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans)) {
+                       ret = PTR_ERR(trans);
+                       goto out;
+               }
+
+               /*
+                * Walk backwards through all the items until we find one that
+                * isn't from our key type or objectid
+                */
+               key.objectid = btrfs_ino(inode);
+               key.type = key_type;
+               key.offset = (u64)-1;
+
+               ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+               if (ret > 0) {
+                       ret = 0;
+                       /* No more keys of this type, we're done */
+                       if (path->slots[0] == 0)
+                               break;
+                       path->slots[0]--;
+               } else if (ret < 0) {
+                       btrfs_end_transaction(trans);
+                       goto out;
+               }
+
+               btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+
+               /* No more keys of this type, we're done */
+               if (key.objectid != btrfs_ino(inode) || key.type != key_type)
+                       break;
+
+               /*
+                * This shouldn't be a performance sensitive function because
+                * it's not used as part of truncate.  If it ever becomes
+                * perf sensitive, change this to walk forward and bulk delete
+                * items
+                */
+               ret = btrfs_del_items(trans, root, path, path->slots[0], 1);
+               if (ret) {
+                       btrfs_end_transaction(trans);
+                       goto out;
+               }
+               count++;
+               btrfs_release_path(path);
+               btrfs_end_transaction(trans);
+       }
+       ret = count;
+       btrfs_end_transaction(trans);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+/*
+ * Drop all verity items
+ *
+ * @inode:  inode to drop verity items for
+ *
+ * In most contexts where we are dropping verity items, we want to do it for all
+ * the types of verity items, not a particular one.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int btrfs_drop_verity_items(struct btrfs_inode *inode)
+{
+       int ret;
+
+       ret = drop_verity_items(inode, BTRFS_VERITY_DESC_ITEM_KEY);
+       if (ret < 0)
+               return ret;
+       ret = drop_verity_items(inode, BTRFS_VERITY_MERKLE_ITEM_KEY);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/*
+ * Insert and write inode items with a given key type and offset.
+ *
+ * @inode:     inode to insert for
+ * @key_type:  key type to insert
+ * @offset:    item offset to insert at
+ * @src:       source data to write
+ * @len:       length of source data to write
+ *
+ * Write len bytes from src into items of up to 2K length.
+ * The inserted items will have key (ino, key_type, offset + off) where off is
+ * consecutively increasing from 0 up to the last item ending at offset + len.
+ *
+ * Returns 0 on success and a negative error code on failure.
+ */
+static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
+                          const char *src, u64 len)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_path *path;
+       struct btrfs_root *root = inode->root;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       unsigned long copy_bytes;
+       unsigned long src_offset = 0;
+       void *data;
+       int ret = 0;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       while (len > 0) {
+               /* 1 for the new item being inserted */
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans)) {
+                       ret = PTR_ERR(trans);
+                       break;
+               }
+
+               key.objectid = btrfs_ino(inode);
+               key.type = key_type;
+               key.offset = offset;
+
+               /*
+                * Insert 2K at a time mostly to be friendly for smaller leaf
+                * size filesystems
+                */
+               copy_bytes = min_t(u64, len, 2048);
+
+               ret = btrfs_insert_empty_item(trans, root, path, &key, copy_bytes);
+               if (ret) {
+                       btrfs_end_transaction(trans);
+                       break;
+               }
+
+               leaf = path->nodes[0];
+
+               data = btrfs_item_ptr(leaf, path->slots[0], void);
+               write_extent_buffer(leaf, src + src_offset,
+                                   (unsigned long)data, copy_bytes);
+               offset += copy_bytes;
+               src_offset += copy_bytes;
+               len -= copy_bytes;
+
+               btrfs_release_path(path);
+               btrfs_end_transaction(trans);
+       }
+
+       btrfs_free_path(path);
+       return ret;
+}
+
+/*
+ * Read inode items of the given key type and offset from the btree.
+ *
+ * @inode:      inode to read items of
+ * @key_type:   key type to read
+ * @offset:     item offset to read from
+ * @dest:       Buffer to read into. This parameter has slightly tricky
+ *              semantics.  If it is NULL, the function will not do any copying
+ *              and will just return the size of all the items up to len bytes.
+ *              If dest_page is passed, then the function will kmap_local the
+ *              page and ignore dest, but it must still be non-NULL to avoid the
+ *              counting-only behavior.
+ * @len:        length in bytes to read
+ * @dest_page:  copy into this page instead of the dest buffer
+ *
+ * Helper function to read items from the btree.  This returns the number of
+ * bytes read or < 0 for errors.  We can return short reads if the items don't
+ * exist on disk or aren't big enough to fill the desired length.  Supports
+ * reading into a provided buffer (dest) or into the page cache
+ *
+ * Returns number of bytes read or a negative error code on failure.
+ */
+static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
+                         char *dest, u64 len, struct page *dest_page)
+{
+       struct btrfs_path *path;
+       struct btrfs_root *root = inode->root;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       u64 item_end;
+       u64 copy_end;
+       int copied = 0;
+       u32 copy_offset;
+       unsigned long copy_bytes;
+       unsigned long dest_offset = 0;
+       void *data;
+       char *kaddr = dest;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       if (dest_page)
+               path->reada = READA_FORWARD;
+
+       key.objectid = btrfs_ino(inode);
+       key.type = key_type;
+       key.offset = offset;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0) {
+               goto out;
+       } else if (ret > 0) {
+               ret = 0;
+               if (path->slots[0] == 0)
+                       goto out;
+               path->slots[0]--;
+       }
+
+       while (len > 0) {
+               leaf = path->nodes[0];
+               btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+               if (key.objectid != btrfs_ino(inode) || key.type != key_type)
+                       break;
+
+               item_end = btrfs_item_size_nr(leaf, path->slots[0]) + key.offset;
+
+               if (copied > 0) {
+                       /*
+                        * Once we've copied something, we want all of the items
+                        * to be sequential
+                        */
+                       if (key.offset != offset)
+                               break;
+               } else {
+                       /*
+                        * Our initial offset might be in the middle of an
+                        * item.  Make sure it all makes sense.
+                        */
+                       if (key.offset > offset)
+                               break;
+                       if (item_end <= offset)
+                               break;
+               }
+
+               /* desc = NULL to just sum all the item lengths */
+               if (!dest)
+                       copy_end = item_end;
+               else
+                       copy_end = min(offset + len, item_end);
+
+               /* Number of bytes in this item we want to copy */
+               copy_bytes = copy_end - offset;
+
+               /* Offset from the start of item for copying */
+               copy_offset = offset - key.offset;
+
+               if (dest) {
+                       if (dest_page)
+                               kaddr = kmap_local_page(dest_page);
+
+                       data = btrfs_item_ptr(leaf, path->slots[0], void);
+                       read_extent_buffer(leaf, kaddr + dest_offset,
+                                          (unsigned long)data + copy_offset,
+                                          copy_bytes);
+
+                       if (dest_page)
+                               kunmap_local(kaddr);
+               }
+
+               offset += copy_bytes;
+               dest_offset += copy_bytes;
+               len -= copy_bytes;
+               copied += copy_bytes;
+
+               path->slots[0]++;
+               if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+                       /*
+                        * We've reached the last slot in this leaf and we need
+                        * to go to the next leaf.
+                        */
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0) {
+                               break;
+                       } else if (ret > 0) {
+                               ret = 0;
+                               break;
+                       }
+               }
+       }
+out:
+       btrfs_free_path(path);
+       if (!ret)
+               ret = copied;
+       return ret;
+}
+
+/*
+ * Delete an fsverity orphan
+ *
+ * @trans:  transaction to do the delete in
+ * @inode:  inode to orphan
+ *
+ * Capture verity orphan specific logic that is repeated in the couple places
+ * we delete verity orphans. Specifically, handling ENOENT and ignoring inodes
+ * with 0 links.
+ *
+ * Returns zero on success or a negative error code on failure.
+ */
+static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inode)
+{
+       struct btrfs_root *root = inode->root;
+       int ret;
+
+       /*
+        * If the inode has no links, it is either already unlinked, or was
+        * created with O_TMPFILE. In either case, it should have an orphan from
+        * that other operation. Rather than reference count the orphans, we
+        * simply ignore them here, because we only invoke the verity path in
+        * the orphan logic when i_nlink is 1.
+        */
+       if (!inode->vfs_inode.i_nlink)
+               return 0;
+
+       ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
+       if (ret == -ENOENT)
+               ret = 0;
+       return ret;
+}
+
+/*
+ * Rollback in-progress verity if we encounter an error.
+ *
+ * @inode:  inode verity had an error for
+ *
+ * We try to handle recoverable errors while enabling verity by rolling it back
+ * and just failing the operation, rather than having an fs level error no
+ * matter what. However, any error in rollback is unrecoverable.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int rollback_verity(struct btrfs_inode *inode)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_root *root = inode->root;
+       int ret;
+
+       ASSERT(inode_is_locked(&inode->vfs_inode));
+       truncate_inode_pages(inode->vfs_inode.i_mapping, inode->vfs_inode.i_size);
+       clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
+       ret = btrfs_drop_verity_items(inode);
+       if (ret) {
+               btrfs_handle_fs_error(root->fs_info, ret,
+                               "failed to drop verity items in rollback %llu",
+                               (u64)inode->vfs_inode.i_ino);
+               goto out;
+       }
+
+       /*
+        * 1 for updating the inode flag
+        * 1 for deleting the orphan
+        */
+       trans = btrfs_start_transaction(root, 2);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               btrfs_handle_fs_error(root->fs_info, ret,
+                       "failed to start transaction in verity rollback %llu",
+                       (u64)inode->vfs_inode.i_ino);
+               goto out;
+       }
+       inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
+       btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
+       ret = btrfs_update_inode(trans, root, inode);
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               goto out;
+       }
+       ret = del_orphan(trans, inode);
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               goto out;
+       }
+       btrfs_end_transaction(trans);
+out:
+       return ret;
+}
+
+/*
+ * Finalize making the file a valid verity file
+ *
+ * @inode:      inode to be marked as verity
+ * @desc:       contents of the verity descriptor to write (not NULL)
+ * @desc_size:  size of the verity descriptor
+ *
+ * Do the actual work of finalizing verity after successfully writing the Merkle
+ * tree:
+ *
+ * - write out the descriptor items
+ * - mark the inode with the verity flag
+ * - delete the orphan item
+ * - mark the ro compat bit
+ * - clear the in progress bit
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int finish_verity(struct btrfs_inode *inode, const void *desc,
+                        size_t desc_size)
+{
+       struct btrfs_trans_handle *trans = NULL;
+       struct btrfs_root *root = inode->root;
+       struct btrfs_verity_descriptor_item item;
+       int ret;
+
+       /* Write out the descriptor item */
+       memset(&item, 0, sizeof(item));
+       btrfs_set_stack_verity_descriptor_size(&item, desc_size);
+       ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 0,
+                             (const char *)&item, sizeof(item));
+       if (ret)
+               goto out;
+
+       /* Write out the descriptor itself */
+       ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 1,
+                             desc, desc_size);
+       if (ret)
+               goto out;
+
+       /*
+        * 1 for updating the inode flag
+        * 1 for deleting the orphan
+        */
+       trans = btrfs_start_transaction(root, 2);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out;
+       }
+       inode->ro_flags |= BTRFS_INODE_RO_VERITY;
+       btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
+       ret = btrfs_update_inode(trans, root, inode);
+       if (ret)
+               goto end_trans;
+       ret = del_orphan(trans, inode);
+       if (ret)
+               goto end_trans;
+       clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
+       btrfs_set_fs_compat_ro(root->fs_info, VERITY);
+end_trans:
+       btrfs_end_transaction(trans);
+out:
+       return ret;
+
+}
+
+/*
+ * fsverity op that begins enabling verity.
+ *
+ * @filp:  file to enable verity on
+ *
+ * Begin enabling fsverity for the file. We drop any existing verity items, add
+ * an orphan and set the in progress bit.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int btrfs_begin_enable_verity(struct file *filp)
+{
+       struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
+       struct btrfs_root *root = inode->root;
+       struct btrfs_trans_handle *trans;
+       int ret;
+
+       ASSERT(inode_is_locked(file_inode(filp)));
+
+       if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
+               return -EBUSY;
+
+       /*
+        * This should almost never do anything, but theoretically, it's
+        * possible that we failed to enable verity on a file, then were
+        * interrupted or failed while rolling back, failed to cleanup the
+        * orphan, and finally attempt to enable verity again.
+        */
+       ret = btrfs_drop_verity_items(inode);
+       if (ret)
+               return ret;
+
+       /* 1 for the orphan item */
+       trans = btrfs_start_transaction(root, 1);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
+
+       ret = btrfs_orphan_add(trans, inode);
+       if (!ret)
+               set_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
+       btrfs_end_transaction(trans);
+
+       return 0;
+}
+
+/*
+ * fsverity op that ends enabling verity.
+ *
+ * @filp:              file we are finishing enabling verity on
+ * @desc:              verity descriptor to write out (NULL in error conditions)
+ * @desc_size:         size of the verity descriptor (variable with signatures)
+ * @merkle_tree_size:  size of the merkle tree in bytes
+ *
+ * If desc is null, then VFS is signaling an error occurred during verity
+ * enable, and we should try to rollback. Otherwise, attempt to finish verity.
+ *
+ * Returns 0 on success, negative error code on error.
+ */
+static int btrfs_end_enable_verity(struct file *filp, const void *desc,
+                                  size_t desc_size, u64 merkle_tree_size)
+{
+       struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
+       int ret = 0;
+       int rollback_ret;
+
+       ASSERT(inode_is_locked(file_inode(filp)));
+
+       if (desc == NULL)
+               goto rollback;
+
+       ret = finish_verity(inode, desc, desc_size);
+       if (ret)
+               goto rollback;
+       return ret;
+
+rollback:
+       rollback_ret = rollback_verity(inode);
+       if (rollback_ret)
+               btrfs_err(inode->root->fs_info,
+                         "failed to rollback verity items: %d", rollback_ret);
+       return ret;
+}
+
+/*
+ * fsverity op that gets the struct fsverity_descriptor.
+ *
+ * @inode:     inode to get the descriptor of
+ * @buf:       output buffer for the descriptor contents
+ * @buf_size:  size of the output buffer. 0 to query the size
+ *
+ * fsverity does a two pass setup for reading the descriptor, in the first pass
+ * it calls with buf_size = 0 to query the size of the descriptor, and then in
+ * the second pass it actually reads the descriptor off disk.
+ *
+ * Returns the size on success or a negative error code on failure.
+ */
+static int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
+                                      size_t buf_size)
+{
+       u64 true_size;
+       int ret = 0;
+       struct btrfs_verity_descriptor_item item;
+
+       memset(&item, 0, sizeof(item));
+       ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 0,
+                            (char *)&item, sizeof(item), NULL);
+       if (ret < 0)
+               return ret;
+
+       if (item.reserved[0] != 0 || item.reserved[1] != 0)
+               return -EUCLEAN;
+
+       true_size = btrfs_stack_verity_descriptor_size(&item);
+       if (true_size > INT_MAX)
+               return -EUCLEAN;
+
+       if (buf_size == 0)
+               return true_size;
+       if (buf_size < true_size)
+               return -ERANGE;
+
+       ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 1,
+                            buf, buf_size, NULL);
+       if (ret < 0)
+               return ret;
+       if (ret != true_size)
+               return -EIO;
+
+       return true_size;
+}
+
+/*
+ * fsverity op that reads and caches a merkle tree page.
+ *
+ * @inode:         inode to read a merkle tree page for
+ * @index:         page index relative to the start of the merkle tree
+ * @num_ra_pages:  number of pages to readahead. Optional, we ignore it
+ *
+ * The Merkle tree is stored in the filesystem btree, but its pages are cached
+ * with a logical position past EOF in the inode's mapping.
+ *
+ * Returns the page we read, or an ERR_PTR on error.
+ */
+static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
+                                               pgoff_t index,
+                                               unsigned long num_ra_pages)
+{
+       struct page *page;
+       u64 off = (u64)index << PAGE_SHIFT;
+       loff_t merkle_pos = merkle_file_pos(inode);
+       int ret;
+
+       if (merkle_pos < 0)
+               return ERR_PTR(merkle_pos);
+       if (merkle_pos > inode->i_sb->s_maxbytes - off - PAGE_SIZE)
+               return ERR_PTR(-EFBIG);
+       index += merkle_pos >> PAGE_SHIFT;
+again:
+       page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
+       if (page) {
+               if (PageUptodate(page))
+                       return page;
+
+               lock_page(page);
+               /*
+                * We only insert uptodate pages, so !Uptodate has to be
+                * an error
+                */
+               if (!PageUptodate(page)) {
+                       unlock_page(page);
+                       put_page(page);
+                       return ERR_PTR(-EIO);
+               }
+               unlock_page(page);
+               return page;
+       }
+
+       page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
+       /*
+        * Merkle item keys are indexed from byte 0 in the merkle tree.
+        * They have the form:
+        *
+        * [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
+        */
+       ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
+                            page_address(page), PAGE_SIZE, page);
+       if (ret < 0) {
+               put_page(page);
+               return ERR_PTR(ret);
+       }
+       if (ret < PAGE_SIZE)
+               memzero_page(page, ret, PAGE_SIZE - ret);
+
+       SetPageUptodate(page);
+       ret = add_to_page_cache_lru(page, inode->i_mapping, index, GFP_NOFS);
+
+       if (!ret) {
+               /* Inserted and ready for fsverity */
+               unlock_page(page);
+       } else {
+               put_page(page);
+               /* Did someone race us into inserting this page? */
+               if (ret == -EEXIST)
+                       goto again;
+               page = ERR_PTR(ret);
+       }
+       return page;
+}
+
+/*
+ * fsverity op that writes a Merkle tree block into the btree.
+ *
+ * @inode:          inode to write a Merkle tree block for
+ * @buf:            Merkle tree data block to write
+ * @index:          index of the block in the Merkle tree
+ * @log_blocksize:  log base 2 of the Merkle tree block size
+ *
+ * Note that the block size could be different from the page size, so it is not
+ * safe to assume that index is a page index.
+ *
+ * Returns 0 on success or negative error code on failure
+ */
+static int btrfs_write_merkle_tree_block(struct inode *inode, const void *buf,
+                                       u64 index, int log_blocksize)
+{
+       u64 off = index << log_blocksize;
+       u64 len = 1ULL << log_blocksize;
+       loff_t merkle_pos = merkle_file_pos(inode);
+
+       if (merkle_pos < 0)
+               return merkle_pos;
+       if (merkle_pos > inode->i_sb->s_maxbytes - off - len)
+               return -EFBIG;
+
+       return write_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY,
+                              off, buf, len);
+}
+
+const struct fsverity_operations btrfs_verityops = {
+       .begin_enable_verity     = btrfs_begin_enable_verity,
+       .end_enable_verity       = btrfs_end_enable_verity,
+       .get_verity_descriptor   = btrfs_get_verity_descriptor,
+       .read_merkle_tree_page   = btrfs_read_merkle_tree_page,
+       .write_merkle_tree_block = btrfs_write_merkle_tree_block,
+};
index 70f94b7..ec3a874 100644 (file)
@@ -38,7 +38,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
                .sub_stripes    = 2,
                .dev_stripes    = 1,
                .devs_max       = 0,    /* 0 == as many as possible */
-               .devs_min       = 4,
+               .devs_min       = 2,
                .tolerated_failures = 1,
                .devs_increment = 2,
                .ncopies        = 2,
@@ -103,7 +103,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
                .sub_stripes    = 1,
                .dev_stripes    = 1,
                .devs_max       = 0,
-               .devs_min       = 2,
+               .devs_min       = 1,
                .tolerated_failures = 0,
                .devs_increment = 1,
                .ncopies        = 1,
@@ -153,6 +153,32 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
        },
 };
 
+/*
+ * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
+ * can be used as index to access btrfs_raid_array[].
+ */
+enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
+{
+       if (flags & BTRFS_BLOCK_GROUP_RAID10)
+               return BTRFS_RAID_RAID10;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID1)
+               return BTRFS_RAID_RAID1;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
+               return BTRFS_RAID_RAID1C3;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
+               return BTRFS_RAID_RAID1C4;
+       else if (flags & BTRFS_BLOCK_GROUP_DUP)
+               return BTRFS_RAID_DUP;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID0)
+               return BTRFS_RAID_RAID0;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID5)
+               return BTRFS_RAID_RAID5;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID6)
+               return BTRFS_RAID_RAID6;
+
+       return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
+}
+
 const char *btrfs_bg_type_to_raid_name(u64 flags)
 {
        const int index = btrfs_bg_flags_to_raid_index(flags);
@@ -404,44 +430,6 @@ void __exit btrfs_cleanup_fs_uuids(void)
        }
 }
 
-/*
- * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
- * Returned struct is not linked onto any lists and must be destroyed using
- * btrfs_free_device.
- */
-static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
-{
-       struct btrfs_device *dev;
-
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return ERR_PTR(-ENOMEM);
-
-       /*
-        * Preallocate a bio that's always going to be used for flushing device
-        * barriers and matches the device lifespan
-        */
-       dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
-       if (!dev->flush_bio) {
-               kfree(dev);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       INIT_LIST_HEAD(&dev->dev_list);
-       INIT_LIST_HEAD(&dev->dev_alloc_list);
-       INIT_LIST_HEAD(&dev->post_commit_list);
-
-       atomic_set(&dev->reada_in_flight, 0);
-       atomic_set(&dev->dev_stats_ccnt, 0);
-       btrfs_device_data_ordered_init(dev);
-       INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
-       INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
-       extent_io_tree_init(fs_info, &dev->alloc_state,
-                           IO_TREE_DEVICE_ALLOC_STATE, NULL);
-
-       return dev;
-}
-
 static noinline struct btrfs_fs_devices *find_fsid(
                const u8 *fsid, const u8 *metadata_fsid)
 {
@@ -1130,6 +1118,9 @@ static void btrfs_close_one_device(struct btrfs_device *device)
                fs_devices->rw_devices--;
        }
 
+       if (device->devid == BTRFS_DEV_REPLACE_DEVID)
+               clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
+
        if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
                fs_devices->missing_devices--;
 
@@ -1228,7 +1219,7 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
 static int devid_cmp(void *priv, const struct list_head *a,
                     const struct list_head *b)
 {
-       struct btrfs_device *dev1, *dev2;
+       const struct btrfs_device *dev1, *dev2;
 
        dev1 = list_entry(a, struct btrfs_device, dev_list);
        dev2 = list_entry(b, struct btrfs_device, dev_list);
@@ -1598,14 +1589,9 @@ again:
        key.offset = search_start;
        key.type = BTRFS_DEV_EXTENT_KEY;
 
-       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       ret = btrfs_search_backwards(root, &key, path);
        if (ret < 0)
                goto out;
-       if (ret > 0) {
-               ret = btrfs_previous_item(root, path, key.objectid, key.type);
-               if (ret < 0)
-                       goto out;
-       }
 
        while (1) {
                l = path->nodes[0];
@@ -1759,48 +1745,6 @@ out:
        return ret;
 }
 
-static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
-                                 struct btrfs_device *device,
-                                 u64 chunk_offset, u64 start, u64 num_bytes)
-{
-       int ret;
-       struct btrfs_path *path;
-       struct btrfs_fs_info *fs_info = device->fs_info;
-       struct btrfs_root *root = fs_info->dev_root;
-       struct btrfs_dev_extent *extent;
-       struct extent_buffer *leaf;
-       struct btrfs_key key;
-
-       WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
-       WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
-       path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
-
-       key.objectid = device->devid;
-       key.offset = start;
-       key.type = BTRFS_DEV_EXTENT_KEY;
-       ret = btrfs_insert_empty_item(trans, root, path, &key,
-                                     sizeof(*extent));
-       if (ret)
-               goto out;
-
-       leaf = path->nodes[0];
-       extent = btrfs_item_ptr(leaf, path->slots[0],
-                               struct btrfs_dev_extent);
-       btrfs_set_dev_extent_chunk_tree(leaf, extent,
-                                       BTRFS_CHUNK_TREE_OBJECTID);
-       btrfs_set_dev_extent_chunk_objectid(leaf, extent,
-                                           BTRFS_FIRST_CHUNK_TREE_OBJECTID);
-       btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
-
-       btrfs_set_dev_extent_length(leaf, extent, num_bytes);
-       btrfs_mark_buffer_dirty(leaf);
-out:
-       btrfs_free_path(path);
-       return ret;
-}
-
 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
 {
        struct extent_map_tree *em_tree;
@@ -2003,12 +1947,8 @@ static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
                if (!(all_avail & btrfs_raid_array[i].bg_flag))
                        continue;
 
-               if (num_devices < btrfs_raid_array[i].devs_min) {
-                       int ret = btrfs_raid_array[i].mindev_error;
-
-                       if (ret)
-                               return ret;
-               }
+               if (num_devices < btrfs_raid_array[i].devs_min)
+                       return btrfs_raid_array[i].mindev_error;
        }
 
        return 0;
@@ -2137,7 +2077,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
 
        if (IS_ERR(device)) {
                if (PTR_ERR(device) == -ENOENT &&
-                   strcmp(device_path, "missing") == 0)
+                   device_path && strcmp(device_path, "missing") == 0)
                        ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
                else
                        ret = PTR_ERR(device);
@@ -3622,10 +3562,7 @@ static u64 calc_data_stripes(u64 type, int num_stripes)
        const int ncopies = btrfs_raid_array[index].ncopies;
        const int nparity = btrfs_raid_array[index].nparity;
 
-       if (nparity)
-               return num_stripes - nparity;
-       else
-               return num_stripes / ncopies;
+       return (num_stripes - nparity) / ncopies;
 }
 
 /* [pstart, pend) */
@@ -4025,6 +3962,13 @@ static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
        if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
                return true;
 
+       if (fs_info->sectorsize < PAGE_SIZE &&
+               bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+               btrfs_err(fs_info,
+               "RAID56 is not yet supported for sectorsize %u with page size %lu",
+                         fs_info->sectorsize, PAGE_SIZE);
+               return false;
+       }
        /* Profile is valid and does not have bits outside of the allowed set */
        if (alloc_profile_is_valid(bargs->target, 1) &&
            (bargs->target & ~allowed) == 0)
@@ -5463,56 +5407,6 @@ out:
        return block_group;
 }
 
-/*
- * This function, btrfs_finish_chunk_alloc(), belongs to phase 2.
- *
- * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
- * phases.
- */
-int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
-                            u64 chunk_offset, u64 chunk_size)
-{
-       struct btrfs_fs_info *fs_info = trans->fs_info;
-       struct btrfs_device *device;
-       struct extent_map *em;
-       struct map_lookup *map;
-       u64 dev_offset;
-       u64 stripe_size;
-       int i;
-       int ret = 0;
-
-       em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
-       if (IS_ERR(em))
-               return PTR_ERR(em);
-
-       map = em->map_lookup;
-       stripe_size = em->orig_block_len;
-
-       /*
-        * Take the device list mutex to prevent races with the final phase of
-        * a device replace operation that replaces the device object associated
-        * with the map's stripes, because the device object's id can change
-        * at any time during that final phase of the device replace operation
-        * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
-        * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
-        * resulting in persisting a device extent item with such ID.
-        */
-       mutex_lock(&fs_info->fs_devices->device_list_mutex);
-       for (i = 0; i < map->num_stripes; i++) {
-               device = map->stripes[i].dev;
-               dev_offset = map->stripes[i].physical;
-
-               ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
-                                            dev_offset, stripe_size);
-               if (ret)
-                       break;
-       }
-       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-
-       free_extent_map(em);
-       return ret;
-}
-
 /*
  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
@@ -6923,9 +6817,31 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
        if (WARN_ON(!devid && !fs_info))
                return ERR_PTR(-EINVAL);
 
-       dev = __alloc_device(fs_info);
-       if (IS_ERR(dev))
-               return dev;
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return ERR_PTR(-ENOMEM);
+
+       /*
+        * Preallocate a bio that's always going to be used for flushing device
+        * barriers and matches the device lifespan
+        */
+       dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
+       if (!dev->flush_bio) {
+               kfree(dev);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_LIST_HEAD(&dev->dev_list);
+       INIT_LIST_HEAD(&dev->dev_alloc_list);
+       INIT_LIST_HEAD(&dev->post_commit_list);
+
+       atomic_set(&dev->reada_in_flight, 0);
+       atomic_set(&dev->dev_stats_ccnt, 0);
+       btrfs_device_data_ordered_init(dev);
+       INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+       INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+       extent_io_tree_init(fs_info, &dev->alloc_state,
+                           IO_TREE_DEVICE_ALLOC_STATE, NULL);
 
        if (devid)
                tmp = *devid;
@@ -6961,15 +6877,7 @@ static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
 
 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
 {
-       int index = btrfs_bg_flags_to_raid_index(type);
-       int ncopies = btrfs_raid_array[index].ncopies;
-       const int nparity = btrfs_raid_array[index].nparity;
-       int data_stripes;
-
-       if (nparity)
-               data_stripes = num_stripes - nparity;
-       else
-               data_stripes = num_stripes / ncopies;
+       const int data_stripes = calc_data_stripes(type, num_stripes);
 
        return div_u64(chunk_len, data_stripes);
 }
@@ -8144,7 +8052,7 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
                goto out;
 
        if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
-               ret = btrfs_next_item(root, path);
+               ret = btrfs_next_leaf(root, path);
                if (ret < 0)
                        goto out;
                /* No dev extents at all? Not good */
index 55a8ba2..b082250 100644 (file)
@@ -508,8 +508,6 @@ int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
                           u64 logical, u64 len);
 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
                                    u64 logical);
-int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
-                            u64 chunk_offset, u64 chunk_size);
 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
                                     struct btrfs_block_group *bg);
 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
@@ -568,32 +566,6 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
        atomic_inc(&dev->dev_stats_ccnt);
 }
 
-/*
- * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
- * can be used as index to access btrfs_raid_array[].
- */
-static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
-{
-       if (flags & BTRFS_BLOCK_GROUP_RAID10)
-               return BTRFS_RAID_RAID10;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID1)
-               return BTRFS_RAID_RAID1;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
-               return BTRFS_RAID_RAID1C3;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
-               return BTRFS_RAID_RAID1C4;
-       else if (flags & BTRFS_BLOCK_GROUP_DUP)
-               return BTRFS_RAID_DUP;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID0)
-               return BTRFS_RAID_RAID0;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID5)
-               return BTRFS_RAID_RAID5;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID6)
-               return BTRFS_RAID_RAID6;
-
-       return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
-}
-
 void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
 
 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
@@ -603,6 +575,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
                               struct block_device *bdev,
                               const char *device_path);
 
+enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags);
 int btrfs_bg_type_to_factor(u64 flags);
 const char *btrfs_bg_type_to_raid_name(u64 flags);
 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
index c3fa7d3..8afa900 100644 (file)
@@ -121,12 +121,12 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
        workspace->strm.total_in = 0;
        workspace->strm.total_out = 0;
 
-       out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       out_page = alloc_page(GFP_NOFS);
        if (out_page == NULL) {
                ret = -ENOMEM;
                goto out;
        }
-       cpage_out = kmap(out_page);
+       cpage_out = page_address(out_page);
        pages[0] = out_page;
        nr_pages = 1;
 
@@ -148,26 +148,22 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
                                int i;
 
                                for (i = 0; i < in_buf_pages; i++) {
-                                       if (in_page) {
-                                               kunmap(in_page);
+                                       if (in_page)
                                                put_page(in_page);
-                                       }
                                        in_page = find_get_page(mapping,
                                                                start >> PAGE_SHIFT);
-                                       data_in = kmap(in_page);
+                                       data_in = page_address(in_page);
                                        memcpy(workspace->buf + i * PAGE_SIZE,
                                               data_in, PAGE_SIZE);
                                        start += PAGE_SIZE;
                                }
                                workspace->strm.next_in = workspace->buf;
                        } else {
-                               if (in_page) {
-                                       kunmap(in_page);
+                               if (in_page)
                                        put_page(in_page);
-                               }
                                in_page = find_get_page(mapping,
                                                        start >> PAGE_SHIFT);
-                               data_in = kmap(in_page);
+                               data_in = page_address(in_page);
                                start += PAGE_SIZE;
                                workspace->strm.next_in = data_in;
                        }
@@ -196,18 +192,17 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
                 * the stream end if required
                 */
                if (workspace->strm.avail_out == 0) {
-                       kunmap(out_page);
                        if (nr_pages == nr_dest_pages) {
                                out_page = NULL;
                                ret = -E2BIG;
                                goto out;
                        }
-                       out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+                       out_page = alloc_page(GFP_NOFS);
                        if (out_page == NULL) {
                                ret = -ENOMEM;
                                goto out;
                        }
-                       cpage_out = kmap(out_page);
+                       cpage_out = page_address(out_page);
                        pages[nr_pages] = out_page;
                        nr_pages++;
                        workspace->strm.avail_out = PAGE_SIZE;
@@ -234,18 +229,17 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
                        goto out;
                } else if (workspace->strm.avail_out == 0) {
                        /* get another page for the stream end */
-                       kunmap(out_page);
                        if (nr_pages == nr_dest_pages) {
                                out_page = NULL;
                                ret = -E2BIG;
                                goto out;
                        }
-                       out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+                       out_page = alloc_page(GFP_NOFS);
                        if (out_page == NULL) {
                                ret = -ENOMEM;
                                goto out;
                        }
-                       cpage_out = kmap(out_page);
+                       cpage_out = page_address(out_page);
                        pages[nr_pages] = out_page;
                        nr_pages++;
                        workspace->strm.avail_out = PAGE_SIZE;
@@ -264,13 +258,8 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
        *total_in = workspace->strm.total_in;
 out:
        *out_pages = nr_pages;
-       if (out_page)
-               kunmap(out_page);
-
-       if (in_page) {
-               kunmap(in_page);
+       if (in_page)
                put_page(in_page);
-       }
        return ret;
 }
 
@@ -286,10 +275,8 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
        unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
        unsigned long buf_start;
        struct page **pages_in = cb->compressed_pages;
-       u64 disk_start = cb->start;
-       struct bio *orig_bio = cb->orig_bio;
 
-       data_in = kmap(pages_in[page_in_index]);
+       data_in = page_address(pages_in[page_in_index]);
        workspace->strm.next_in = data_in;
        workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
        workspace->strm.total_in = 0;
@@ -311,7 +298,6 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 
        if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
                pr_warn("BTRFS: inflateInit failed\n");
-               kunmap(pages_in[page_in_index]);
                return -EIO;
        }
        while (workspace->strm.total_in < srclen) {
@@ -326,9 +312,8 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
                if (buf_start == total_out)
                        break;
 
-               ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
-                                                total_out, disk_start,
-                                                orig_bio);
+               ret2 = btrfs_decompress_buf2page(workspace->buf,
+                               total_out - buf_start, cb, buf_start);
                if (ret2 == 0) {
                        ret = 0;
                        goto done;
@@ -339,17 +324,16 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 
                if (workspace->strm.avail_in == 0) {
                        unsigned long tmp;
-                       kunmap(pages_in[page_in_index]);
+
                        page_in_index++;
                        if (page_in_index >= total_pages_in) {
                                data_in = NULL;
                                break;
                        }
-                       data_in = kmap(pages_in[page_in_index]);
+                       data_in = page_address(pages_in[page_in_index]);
                        workspace->strm.next_in = data_in;
                        tmp = srclen - workspace->strm.total_in;
-                       workspace->strm.avail_in = min(tmp,
-                                                          PAGE_SIZE);
+                       workspace->strm.avail_in = min(tmp, PAGE_SIZE);
                }
        }
        if (ret != Z_STREAM_END)
@@ -358,10 +342,8 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
                ret = 0;
 done:
        zlib_inflateEnd(&workspace->strm);
-       if (data_in)
-               kunmap(pages_in[page_in_index]);
        if (!ret)
-               zero_fill_bio(orig_bio);
+               zero_fill_bio(cb->orig_bio);
        return ret;
 }
 
index 907c2cc..47af1ab 100644 (file)
@@ -245,7 +245,7 @@ static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
                goto out;
 
        if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
-               ret = btrfs_next_item(root, path);
+               ret = btrfs_next_leaf(root, path);
                if (ret < 0)
                        goto out;
                /* No dev extents at all? Not good */
@@ -296,7 +296,6 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
        struct btrfs_fs_info *fs_info = device->fs_info;
        struct btrfs_zoned_device_info *zone_info = NULL;
        struct block_device *bdev = device->bdev;
-       struct request_queue *queue = bdev_get_queue(bdev);
        sector_t nr_sectors;
        sector_t sector = 0;
        struct blk_zone *zones = NULL;
@@ -348,19 +347,10 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
 
        nr_sectors = bdev_nr_sectors(bdev);
        zone_info->zone_size_shift = ilog2(zone_info->zone_size);
-       zone_info->max_zone_append_size =
-               (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
        zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
        if (!IS_ALIGNED(nr_sectors, zone_sectors))
                zone_info->nr_zones++;
 
-       if (bdev_is_zoned(bdev) && zone_info->max_zone_append_size == 0) {
-               btrfs_err(fs_info, "zoned: device %pg does not support zone append",
-                         bdev);
-               ret = -EINVAL;
-               goto out;
-       }
-
        zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
        if (!zone_info->seq_zones) {
                ret = -ENOMEM;
@@ -529,7 +519,6 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
        u64 zoned_devices = 0;
        u64 nr_devices = 0;
        u64 zone_size = 0;
-       u64 max_zone_append_size = 0;
        const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
        int ret = 0;
 
@@ -565,11 +554,6 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
                                ret = -EINVAL;
                                goto out;
                        }
-                       if (!max_zone_append_size ||
-                           (zone_info->max_zone_append_size &&
-                            zone_info->max_zone_append_size < max_zone_append_size))
-                               max_zone_append_size =
-                                       zone_info->max_zone_append_size;
                }
                nr_devices++;
        }
@@ -619,7 +603,6 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
        }
 
        fs_info->zone_size = zone_size;
-       fs_info->max_zone_append_size = max_zone_append_size;
        fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
 
        /*
@@ -1318,9 +1301,6 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
        if (!btrfs_is_zoned(fs_info))
                return false;
 
-       if (!fs_info->max_zone_append_size)
-               return false;
-
        if (!is_data_inode(&inode->vfs_inode))
                return false;
 
index b0ae260..4b29970 100644 (file)
@@ -22,7 +22,6 @@ struct btrfs_zoned_device_info {
         */
        u64 zone_size;
        u8  zone_size_shift;
-       u64 max_zone_append_size;
        u32 nr_zones;
        unsigned long *seq_zones;
        unsigned long *empty_zones;
index 3e26b46..56dce9f 100644 (file)
@@ -399,19 +399,19 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
 
        /* map in the first page of input data */
        in_page = find_get_page(mapping, start >> PAGE_SHIFT);
-       workspace->in_buf.src = kmap(in_page);
+       workspace->in_buf.src = page_address(in_page);
        workspace->in_buf.pos = 0;
        workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
 
 
        /* Allocate and map in the output buffer */
-       out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       out_page = alloc_page(GFP_NOFS);
        if (out_page == NULL) {
                ret = -ENOMEM;
                goto out;
        }
        pages[nr_pages++] = out_page;
-       workspace->out_buf.dst = kmap(out_page);
+       workspace->out_buf.dst = page_address(out_page);
        workspace->out_buf.pos = 0;
        workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
 
@@ -446,19 +446,18 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
                if (workspace->out_buf.pos == workspace->out_buf.size) {
                        tot_out += PAGE_SIZE;
                        max_out -= PAGE_SIZE;
-                       kunmap(out_page);
                        if (nr_pages == nr_dest_pages) {
                                out_page = NULL;
                                ret = -E2BIG;
                                goto out;
                        }
-                       out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+                       out_page = alloc_page(GFP_NOFS);
                        if (out_page == NULL) {
                                ret = -ENOMEM;
                                goto out;
                        }
                        pages[nr_pages++] = out_page;
-                       workspace->out_buf.dst = kmap(out_page);
+                       workspace->out_buf.dst = page_address(out_page);
                        workspace->out_buf.pos = 0;
                        workspace->out_buf.size = min_t(size_t, max_out,
                                                        PAGE_SIZE);
@@ -473,13 +472,12 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
                /* Check if we need more input */
                if (workspace->in_buf.pos == workspace->in_buf.size) {
                        tot_in += PAGE_SIZE;
-                       kunmap(in_page);
                        put_page(in_page);
 
                        start += PAGE_SIZE;
                        len -= PAGE_SIZE;
                        in_page = find_get_page(mapping, start >> PAGE_SHIFT);
-                       workspace->in_buf.src = kmap(in_page);
+                       workspace->in_buf.src = page_address(in_page);
                        workspace->in_buf.pos = 0;
                        workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
                }
@@ -506,19 +504,18 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
 
                tot_out += PAGE_SIZE;
                max_out -= PAGE_SIZE;
-               kunmap(out_page);
                if (nr_pages == nr_dest_pages) {
                        out_page = NULL;
                        ret = -E2BIG;
                        goto out;
                }
-               out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+               out_page = alloc_page(GFP_NOFS);
                if (out_page == NULL) {
                        ret = -ENOMEM;
                        goto out;
                }
                pages[nr_pages++] = out_page;
-               workspace->out_buf.dst = kmap(out_page);
+               workspace->out_buf.dst = page_address(out_page);
                workspace->out_buf.pos = 0;
                workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
        }
@@ -534,12 +531,8 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
 out:
        *out_pages = nr_pages;
        /* Cleanup */
-       if (in_page) {
-               kunmap(in_page);
+       if (in_page)
                put_page(in_page);
-       }
-       if (out_page)
-               kunmap(out_page);
        return ret;
 }
 
@@ -547,8 +540,6 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 {
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        struct page **pages_in = cb->compressed_pages;
-       u64 disk_start = cb->start;
-       struct bio *orig_bio = cb->orig_bio;
        size_t srclen = cb->compressed_len;
        ZSTD_DStream *stream;
        int ret = 0;
@@ -565,7 +556,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
                goto done;
        }
 
-       workspace->in_buf.src = kmap(pages_in[page_in_index]);
+       workspace->in_buf.src = page_address(pages_in[page_in_index]);
        workspace->in_buf.pos = 0;
        workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
 
@@ -589,7 +580,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
                workspace->out_buf.pos = 0;
 
                ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
-                               buf_start, total_out, disk_start, orig_bio);
+                               total_out - buf_start, cb, buf_start);
                if (ret == 0)
                        break;
 
@@ -601,23 +592,21 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
                        break;
 
                if (workspace->in_buf.pos == workspace->in_buf.size) {
-                       kunmap(pages_in[page_in_index++]);
+                       page_in_index++;
                        if (page_in_index >= total_pages_in) {
                                workspace->in_buf.src = NULL;
                                ret = -EIO;
                                goto done;
                        }
                        srclen -= PAGE_SIZE;
-                       workspace->in_buf.src = kmap(pages_in[page_in_index]);
+                       workspace->in_buf.src = page_address(pages_in[page_in_index]);
                        workspace->in_buf.pos = 0;
                        workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
                }
        }
        ret = 0;
-       zero_fill_bio(orig_bio);
+       zero_fill_bio(cb->orig_bio);
 done:
-       if (workspace->in_buf.src)
-               kunmap(pages_in[page_in_index]);
        return ret;
 }
 
index 6290c3a..bd6a9e9 100644 (file)
@@ -1912,7 +1912,7 @@ EXPORT_SYMBOL(page_zero_new_buffers);
 
 static void
 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
-               struct iomap *iomap)
+               const struct iomap *iomap)
 {
        loff_t offset = block << inode->i_blkbits;
 
@@ -1966,7 +1966,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
 }
 
 int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
-               get_block_t *get_block, struct iomap *iomap)
+               get_block_t *get_block, const struct iomap *iomap)
 {
        unsigned from = pos & (PAGE_SIZE - 1);
        unsigned to = from + len;
index a1e2813..7e7a897 100644 (file)
@@ -1395,9 +1395,11 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                ret = VM_FAULT_SIGBUS;
        } else {
                struct address_space *mapping = inode->i_mapping;
-               struct page *page = find_or_create_page(mapping, 0,
-                                               mapping_gfp_constraint(mapping,
-                                               ~__GFP_FS));
+               struct page *page;
+
+               filemap_invalidate_lock_shared(mapping);
+               page = find_or_create_page(mapping, 0,
+                               mapping_gfp_constraint(mapping, ~__GFP_FS));
                if (!page) {
                        ret = VM_FAULT_OOM;
                        goto out_inline;
@@ -1418,6 +1420,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                vmf->page = page;
                ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
 out_inline:
+               filemap_invalidate_unlock_shared(mapping);
                dout("filemap_fault %p %llu read inline data ret %x\n",
                     inode, off, ret);
        }
index 7bdefd0..39db97f 100644 (file)
@@ -1743,7 +1743,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
 
 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
 {
-       return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+       struct ceph_cap_flush *cf;
+
+       cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+       cf->is_capsnap = false;
+       return cf;
 }
 
 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
@@ -1778,7 +1782,7 @@ static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc,
                prev->wake = true;
                wake = false;
        }
-       list_del(&cf->g_list);
+       list_del_init(&cf->g_list);
        return wake;
 }
 
@@ -1793,7 +1797,7 @@ static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
                prev->wake = true;
                wake = false;
        }
-       list_del(&cf->i_list);
+       list_del_init(&cf->i_list);
        return wake;
 }
 
@@ -2352,7 +2356,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
        ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
 
        list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
-               if (!cf->caps) {
+               if (cf->is_capsnap) {
                        last_snap_flush = cf->tid;
                        break;
                }
@@ -2371,7 +2375,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
 
                first_tid = cf->tid + 1;
 
-               if (cf->caps) {
+               if (!cf->is_capsnap) {
                        struct cap_msg_args arg;
 
                        dout("kick_flushing_caps %p cap %p tid %llu %s\n",
@@ -3516,7 +3520,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                        cleaned = cf->caps;
 
                /* Is this a capsnap? */
-               if (cf->caps == 0)
+               if (cf->is_capsnap)
                        continue;
 
                if (cf->tid <= flush_tid) {
@@ -3589,8 +3593,9 @@ out:
        while (!list_empty(&to_remove)) {
                cf = list_first_entry(&to_remove,
                                      struct ceph_cap_flush, i_list);
-               list_del(&cf->i_list);
-               ceph_free_cap_flush(cf);
+               list_del_init(&cf->i_list);
+               if (!cf->is_capsnap)
+                       ceph_free_cap_flush(cf);
        }
 
        if (wake_ci)
@@ -4150,11 +4155,19 @@ bad:
 
 /*
  * Delayed work handler to process end of delayed cap release LRU list.
+ *
+ * If new caps are added to the list while processing it, these won't get
+ * processed in this run.  In this case, the ci->i_hold_caps_max will be
+ * returned so that the work can be scheduled accordingly.
  */
-void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
+unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
+       struct ceph_mount_options *opt = mdsc->fsc->mount_options;
+       unsigned long delay_max = opt->caps_wanted_delay_max * HZ;
+       unsigned long loop_start = jiffies;
+       unsigned long delay = 0;
 
        dout("check_delayed_caps\n");
        spin_lock(&mdsc->cap_delay_lock);
@@ -4162,6 +4175,11 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                ci = list_first_entry(&mdsc->cap_delay_list,
                                      struct ceph_inode_info,
                                      i_cap_delay_list);
+               if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
+                       dout("%s caps added recently.  Exiting loop", __func__);
+                       delay = ci->i_hold_caps_max;
+                       break;
+               }
                if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
                    time_before(jiffies, ci->i_hold_caps_max))
                        break;
@@ -4177,6 +4195,8 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                }
        }
        spin_unlock(&mdsc->cap_delay_lock);
+
+       return delay;
 }
 
 /*
index d1755ac..e1d605a 100644 (file)
@@ -2088,6 +2088,7 @@ static long ceph_fallocate(struct file *file, int mode,
        if (ret < 0)
                goto unlock;
 
+       filemap_invalidate_lock(inode->i_mapping);
        ceph_zero_pagecache_range(inode, offset, length);
        ret = ceph_zero_objects(inode, offset, length);
 
@@ -2100,6 +2101,7 @@ static long ceph_fallocate(struct file *file, int mode,
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
        }
+       filemap_invalidate_unlock(inode->i_mapping);
 
        ceph_put_cap_refs(ci, got);
 unlock:
index fa8a847..bdeb271 100644 (file)
@@ -240,9 +240,6 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       /* No mandatory locks */
-       if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
 
        dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
 
index 9db1b39..0b69aec 100644 (file)
@@ -1616,7 +1616,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                spin_lock(&mdsc->cap_dirty_lock);
 
                list_for_each_entry(cf, &to_remove, i_list)
-                       list_del(&cf->g_list);
+                       list_del_init(&cf->g_list);
 
                if (!list_empty(&ci->i_dirty_item)) {
                        pr_warn_ratelimited(
@@ -1668,8 +1668,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                struct ceph_cap_flush *cf;
                cf = list_first_entry(&to_remove,
                                      struct ceph_cap_flush, i_list);
-               list_del(&cf->i_list);
-               ceph_free_cap_flush(cf);
+               list_del_init(&cf->i_list);
+               if (!cf->is_capsnap)
+                       ceph_free_cap_flush(cf);
        }
 
        wake_up_all(&ci->i_cap_wq);
@@ -4490,22 +4491,29 @@ void inc_session_sequence(struct ceph_mds_session *s)
 }
 
 /*
- * delayed work -- periodically trim expired leases, renew caps with mds
+ * delayed work -- periodically trim expired leases, renew caps with mds.  If
+ * the @delay parameter is set to 0 or if it's more than 5 secs, the default
+ * workqueue delay value of 5 secs will be used.
  */
-static void schedule_delayed(struct ceph_mds_client *mdsc)
+static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
 {
-       int delay = 5;
-       unsigned hz = round_jiffies_relative(HZ * delay);
-       schedule_delayed_work(&mdsc->delayed_work, hz);
+       unsigned long max_delay = HZ * 5;
+
+       /* 5 secs default delay */
+       if (!delay || (delay > max_delay))
+               delay = max_delay;
+       schedule_delayed_work(&mdsc->delayed_work,
+                             round_jiffies_relative(delay));
 }
 
 static void delayed_work(struct work_struct *work)
 {
-       int i;
        struct ceph_mds_client *mdsc =
                container_of(work, struct ceph_mds_client, delayed_work.work);
+       unsigned long delay;
        int renew_interval;
        int renew_caps;
+       int i;
 
        dout("mdsc delayed_work\n");
 
@@ -4545,7 +4553,7 @@ static void delayed_work(struct work_struct *work)
        }
        mutex_unlock(&mdsc->mutex);
 
-       ceph_check_delayed_caps(mdsc);
+       delay = ceph_check_delayed_caps(mdsc);
 
        ceph_queue_cap_reclaim_work(mdsc);
 
@@ -4553,7 +4561,7 @@ static void delayed_work(struct work_struct *work)
 
        maybe_recover_session(mdsc);
 
-       schedule_delayed(mdsc);
+       schedule_delayed(mdsc, delay);
 }
 
 int ceph_mdsc_init(struct ceph_fs_client *fsc)
@@ -5030,7 +5038,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
                          mdsc->mdsmap->m_epoch);
 
        mutex_unlock(&mdsc->mutex);
-       schedule_delayed(mdsc);
+       schedule_delayed(mdsc, 0);
        return;
 
 bad_unlock:
index abd9af7..3c444b9 100644 (file)
@@ -394,9 +394,11 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
 {
        int i;
 
-       for (i = 0; i < m->possible_max_rank; i++)
-               kfree(m->m_info[i].export_targets);
-       kfree(m->m_info);
+       if (m->m_info) {
+               for (i = 0; i < m->possible_max_rank; i++)
+                       kfree(m->m_info[i].export_targets);
+               kfree(m->m_info);
+       }
        kfree(m->m_data_pg_pools);
        kfree(m);
 }
index 4ac0606..15105f9 100644 (file)
@@ -67,19 +67,19 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
 {
        lockdep_assert_held(&mdsc->snap_rwsem);
 
-       dout("get_realm %p %d -> %d\n", realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
        /*
-        * since we _only_ increment realm refs or empty the empty
-        * list with snap_rwsem held, adjusting the empty list here is
-        * safe.  we do need to protect against concurrent empty list
-        * additions, however.
+        * The 0->1 and 1->0 transitions must take the snap_empty_lock
+        * atomically with the refcount change. Go ahead and bump the
+        * nref here, unless it's 0, in which case we take the spinlock
+        * and then do the increment and remove it from the list.
         */
-       if (atomic_inc_return(&realm->nref) == 1) {
-               spin_lock(&mdsc->snap_empty_lock);
+       if (atomic_inc_not_zero(&realm->nref))
+               return;
+
+       spin_lock(&mdsc->snap_empty_lock);
+       if (atomic_inc_return(&realm->nref) == 1)
                list_del_init(&realm->empty_item);
-               spin_unlock(&mdsc->snap_empty_lock);
-       }
+       spin_unlock(&mdsc->snap_empty_lock);
 }
 
 static void __insert_snap_realm(struct rb_root *root,
@@ -208,28 +208,28 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
 {
        lockdep_assert_held_write(&mdsc->snap_rwsem);
 
-       dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
+       /*
+        * We do not require the snap_empty_lock here, as any caller that
+        * increments the value must hold the snap_rwsem.
+        */
        if (atomic_dec_and_test(&realm->nref))
                __destroy_snap_realm(mdsc, realm);
 }
 
 /*
- * caller needn't hold any locks
+ * See comments in ceph_get_snap_realm. Caller needn't hold any locks.
  */
 void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
                         struct ceph_snap_realm *realm)
 {
-       dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
-       if (!atomic_dec_and_test(&realm->nref))
+       if (!atomic_dec_and_lock(&realm->nref, &mdsc->snap_empty_lock))
                return;
 
        if (down_write_trylock(&mdsc->snap_rwsem)) {
+               spin_unlock(&mdsc->snap_empty_lock);
                __destroy_snap_realm(mdsc, realm);
                up_write(&mdsc->snap_rwsem);
        } else {
-               spin_lock(&mdsc->snap_empty_lock);
                list_add(&realm->empty_item, &mdsc->snap_empty);
                spin_unlock(&mdsc->snap_empty_lock);
        }
@@ -487,6 +487,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
                return;
        }
+       capsnap->cap_flush.is_capsnap = true;
+       INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
+       INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
 
        spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
index 6b6332a..b1a3636 100644 (file)
@@ -182,8 +182,9 @@ struct ceph_cap {
 
 struct ceph_cap_flush {
        u64 tid;
-       int caps; /* 0 means capsnap */
+       int caps;
        bool wake; /* wake up flush waiters when finish ? */
+       bool is_capsnap; /* true means capsnap */
        struct list_head g_list; // global
        struct list_head i_list; // per inode
 };
@@ -1167,7 +1168,7 @@ extern void ceph_flush_snaps(struct ceph_inode_info *ci,
 extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
 extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                            struct ceph_mds_session *session);
-extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
+extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
 extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
 extern int  ceph_drop_caps_for_unlink(struct inode *inode);
 extern int ceph_encode_inode_release(void **p, struct inode *inode,
index 7364950..3b7e3b9 100644 (file)
@@ -4,19 +4,16 @@ config CIFS
        depends on INET
        select NLS
        select CRYPTO
-       select CRYPTO_MD4
        select CRYPTO_MD5
        select CRYPTO_SHA256
        select CRYPTO_SHA512
        select CRYPTO_CMAC
        select CRYPTO_HMAC
-       select CRYPTO_LIB_ARC4
        select CRYPTO_AEAD2
        select CRYPTO_CCM
        select CRYPTO_GCM
        select CRYPTO_ECB
        select CRYPTO_AES
-       select CRYPTO_LIB_DES
        select KEYS
        select DNS_RESOLVER
        select ASN1
@@ -85,33 +82,6 @@ config CIFS_ALLOW_INSECURE_LEGACY
 
          If unsure, say Y.
 
-config CIFS_WEAK_PW_HASH
-       bool "Support legacy servers which use weaker LANMAN security"
-       depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY
-       help
-         Modern CIFS servers including Samba and most Windows versions
-         (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
-         security mechanisms. These hash the password more securely
-         than the mechanisms used in the older LANMAN version of the
-         SMB protocol but LANMAN based authentication is needed to
-         establish sessions with some old SMB servers.
-
-         Enabling this option allows the cifs module to mount to older
-         LANMAN based servers such as OS/2 and Windows 95, but such
-         mounts may be less secure than mounts using NTLM or more recent
-         security mechanisms if you are on a public network.  Unless you
-         have a need to access old SMB servers (and are on a private
-         network) you probably want to say N.  Even if this support
-         is enabled in the kernel build, LANMAN authentication will not be
-         used automatically. At runtime LANMAN mounts are disabled but
-         can be set to required (or optional) either in
-         /proc/fs/cifs (see Documentation/admin-guide/cifs/usage.rst for
-         more detail) or via an option on the mount command. This support
-         is disabled by default in order to reduce the possibility of a
-         downgrade attack.
-
-         If unsure, say N.
-
 config CIFS_UPCALL
        bool "Kerberos/SPNEGO advanced session setup"
        depends on CIFS
index 8857ac7..51a824f 100644 (file)
@@ -250,9 +250,6 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
        seq_printf(m, ",ALLOW_INSECURE_LEGACY");
 #endif
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-       seq_printf(m, ",WEAK_PW_HASH");
-#endif
 #ifdef CONFIG_CIFS_POSIX
        seq_printf(m, ",CIFS_POSIX");
 #endif
@@ -929,14 +926,6 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
                *flags = CIFSSEC_MUST_NTLMSSP;
        else if ((*flags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
                *flags = CIFSSEC_MUST_NTLMV2;
-       else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
-               *flags = CIFSSEC_MUST_NTLM;
-       else if (CIFSSEC_MUST_LANMAN &&
-                (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
-               *flags = CIFSSEC_MUST_LANMAN;
-       else if (CIFSSEC_MUST_PLNTXT &&
-                (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
-               *flags = CIFSSEC_MUST_PLNTXT;
 
        *flags |= signflags;
 }
index 93b4781..12bde7b 100644 (file)
@@ -147,8 +147,6 @@ static int cifs_swn_send_register_message(struct cifs_swn_reg *swnreg)
                        goto nlmsg_fail;
                }
                break;
-       case LANMAN:
-       case NTLM:
        case NTLMv2:
        case RawNTLMSSP:
                ret = cifs_swn_auth_info_ntlm(swnreg->tcon, skb);
index 9bd03a2..171ad8b 100644 (file)
@@ -358,14 +358,9 @@ cifs_strndup_from_utf16(const char *src, const int maxlen,
                if (!dst)
                        return NULL;
                cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
-                              NO_MAP_UNI_RSVD);
+                               NO_MAP_UNI_RSVD);
        } else {
-               len = strnlen(src, maxlen);
-               len++;
-               dst = kmalloc(len, GFP_KERNEL);
-               if (!dst)
-                       return NULL;
-               strlcpy(dst, src, len);
+               dst = kstrndup(src, maxlen, GFP_KERNEL);
        }
 
        return dst;
index ecf15d8..6679e07 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/random.h>
 #include <linux/highmem.h>
 #include <linux/fips.h>
-#include <crypto/arc4.h>
+#include "../cifs_common/arc4.h"
 #include <crypto/aead.h>
 
 int __cifs_calc_signature(struct smb_rqst *rqst,
@@ -250,87 +250,6 @@ int cifs_verify_signature(struct smb_rqst *rqst,
 
 }
 
-/* first calculate 24 bytes ntlm response and then 16 byte session key */
-int setup_ntlm_response(struct cifs_ses *ses, const struct nls_table *nls_cp)
-{
-       int rc = 0;
-       unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
-       char temp_key[CIFS_SESS_KEY_SIZE];
-
-       if (!ses)
-               return -EINVAL;
-
-       ses->auth_key.response = kmalloc(temp_len, GFP_KERNEL);
-       if (!ses->auth_key.response)
-               return -ENOMEM;
-
-       ses->auth_key.len = temp_len;
-
-       rc = SMBNTencrypt(ses->password, ses->server->cryptkey,
-                       ses->auth_key.response + CIFS_SESS_KEY_SIZE, nls_cp);
-       if (rc) {
-               cifs_dbg(FYI, "%s Can't generate NTLM response, error: %d\n",
-                        __func__, rc);
-               return rc;
-       }
-
-       rc = E_md4hash(ses->password, temp_key, nls_cp);
-       if (rc) {
-               cifs_dbg(FYI, "%s Can't generate NT hash, error: %d\n",
-                        __func__, rc);
-               return rc;
-       }
-
-       rc = mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
-       if (rc)
-               cifs_dbg(FYI, "%s Can't generate NTLM session key, error: %d\n",
-                        __func__, rc);
-
-       return rc;
-}
-
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
-                       char *lnm_session_key)
-{
-       int i, len;
-       int rc;
-       char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
-
-       if (password) {
-               for (len = 0; len < CIFS_ENCPWD_SIZE; len++)
-                       if (!password[len])
-                               break;
-
-               memcpy(password_with_pad, password, len);
-       }
-
-       if (!encrypt && global_secflags & CIFSSEC_MAY_PLNTXT) {
-               memcpy(lnm_session_key, password_with_pad,
-                       CIFS_ENCPWD_SIZE);
-               return 0;
-       }
-
-       /* calculate old style session key */
-       /* calling toupper is less broken than repeatedly
-       calling nls_toupper would be since that will never
-       work for UTF8, but neither handles multibyte code pages
-       but the only alternative would be converting to UCS-16 (Unicode)
-       (using a routine something like UniStrupr) then
-       uppercasing and then converting back from Unicode - which
-       would only worth doing it if we knew it were utf8. Basically
-       utf8 and other multibyte codepages each need their own strupper
-       function since a byte at a time will ont work. */
-
-       for (i = 0; i < CIFS_ENCPWD_SIZE; i++)
-               password_with_pad[i] = toupper(password_with_pad[i]);
-
-       rc = SMBencrypt(password_with_pad, cryptkey, lnm_session_key);
-
-       return rc;
-}
-#endif /* CIFS_WEAK_PW_HASH */
-
 /* Build a proper attribute value/target info pairs blob.
  * Fill in netbios and dns domain name and workstation name
  * and client time (total five av pairs and + one end of fields indicator.
@@ -780,9 +699,9 @@ calc_seckey(struct cifs_ses *ses)
                return -ENOMEM;
        }
 
-       arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE);
-       arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key,
-                  CIFS_CPHTXT_SIZE);
+       cifs_arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE);
+       cifs_arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key,
+                       CIFS_CPHTXT_SIZE);
 
        /* make secondary_key/nonce as session key */
        memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE);
index 64b71c4..8c20bfa 100644 (file)
@@ -399,7 +399,6 @@ cifs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages_final(&inode->i_data);
        clear_inode(inode);
-       cifs_fscache_release_inode_cookie(inode);
 }
 
 static void
@@ -438,15 +437,9 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
        seq_puts(s, ",sec=");
 
        switch (ses->sectype) {
-       case LANMAN:
-               seq_puts(s, "lanman");
-               break;
        case NTLMv2:
                seq_puts(s, "ntlmv2");
                break;
-       case NTLM:
-               seq_puts(s, "ntlm");
-               break;
        case Kerberos:
                seq_puts(s, "krb5");
                break;
@@ -1755,7 +1748,6 @@ MODULE_DESCRIPTION
 MODULE_VERSION(CIFS_VERSION);
 MODULE_SOFTDEP("ecb");
 MODULE_SOFTDEP("hmac");
-MODULE_SOFTDEP("md4");
 MODULE_SOFTDEP("md5");
 MODULE_SOFTDEP("nls");
 MODULE_SOFTDEP("aes");
index c0bfc2f..c068f7d 100644 (file)
@@ -114,8 +114,6 @@ enum statusEnum {
 
 enum securityEnum {
        Unspecified = 0,        /* not specified */
-       LANMAN,                 /* Legacy LANMAN auth */
-       NTLM,                   /* Legacy NTLM012 auth with NTLM hash */
        NTLMv2,                 /* Legacy NTLM auth with NTLMv2 hash */
        RawNTLMSSP,             /* NTLMSSP without SPNEGO, NTLMv2 hash */
        Kerberos,               /* Kerberos via SPNEGO */
@@ -634,7 +632,6 @@ struct TCP_Server_Info {
        struct session_key session_key;
        unsigned long lstrp; /* when we got last response from this server */
        struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
-#define        CIFS_NEGFLAVOR_LANMAN   0       /* wct == 13, LANMAN */
 #define        CIFS_NEGFLAVOR_UNENCAP  1       /* wct == 17, but no ext_sec */
 #define        CIFS_NEGFLAVOR_EXTENDED 2       /* wct == 17, ext_sec bit set */
        char    negflavor;      /* NEGOTIATE response flavor */
@@ -1611,6 +1608,11 @@ struct dfs_info3_param {
        int ttl;
 };
 
+struct file_list {
+       struct list_head list;
+       struct cifsFileInfo *cfile;
+};
+
 /*
  * common struct for holding inode info when searching for or updating an
  * inode with new info
@@ -1729,16 +1731,8 @@ static inline bool is_retryable_error(int error)
 
 /* Security Flags: indicate type of session setup needed */
 #define   CIFSSEC_MAY_SIGN     0x00001
-#define   CIFSSEC_MAY_NTLM     0x00002
 #define   CIFSSEC_MAY_NTLMV2   0x00004
 #define   CIFSSEC_MAY_KRB5     0x00008
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-#define   CIFSSEC_MAY_LANMAN   0x00010
-#define   CIFSSEC_MAY_PLNTXT   0x00020
-#else
-#define   CIFSSEC_MAY_LANMAN    0
-#define   CIFSSEC_MAY_PLNTXT    0
-#endif /* weak passwords */
 #define   CIFSSEC_MAY_SEAL     0x00040 /* not supported yet */
 #define   CIFSSEC_MAY_NTLMSSP  0x00080 /* raw ntlmssp with ntlmv2 */
 
@@ -1746,32 +1740,19 @@ static inline bool is_retryable_error(int error)
 /* note that only one of the following can be set so the
 result of setting MUST flags more than once will be to
 require use of the stronger protocol */
-#define   CIFSSEC_MUST_NTLM    0x02002
 #define   CIFSSEC_MUST_NTLMV2  0x04004
 #define   CIFSSEC_MUST_KRB5    0x08008
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-#define   CIFSSEC_MUST_LANMAN  0x10010
-#define   CIFSSEC_MUST_PLNTXT  0x20020
-#ifdef CONFIG_CIFS_UPCALL
-#define   CIFSSEC_MASK          0xBF0BF /* allows weak security but also krb5 */
-#else
-#define   CIFSSEC_MASK          0xB70B7 /* current flags supported if weak */
-#endif /* UPCALL */
-#else /* do not allow weak pw hash */
-#define   CIFSSEC_MUST_LANMAN  0
-#define   CIFSSEC_MUST_PLNTXT  0
 #ifdef CONFIG_CIFS_UPCALL
 #define   CIFSSEC_MASK          0x8F08F /* flags supported if no weak allowed */
 #else
 #define          CIFSSEC_MASK          0x87087 /* flags supported if no weak allowed */
 #endif /* UPCALL */
-#endif /* WEAK_PW_HASH */
 #define   CIFSSEC_MUST_SEAL    0x40040 /* not supported yet */
 #define   CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
 
 #define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP)
-#define   CIFSSEC_MAX (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_NTLMV2)
-#define   CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_LANMAN | CIFSSEC_MAY_PLNTXT | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
+#define   CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2)
+#define   CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
 /*
  *****************************************************************
  * All constants go here
@@ -1935,10 +1916,6 @@ static inline char *get_security_type_str(enum securityEnum sectype)
                return "Kerberos";
        case NTLMv2:
                return "NTLMv2";
-       case NTLM:
-               return "NTLM";
-       case LANMAN:
-               return "LANMAN";
        default:
                return "Unknown";
        }
index f6e2350..dc920e2 100644 (file)
 #include <asm/unaligned.h>
 #include "smbfsctl.h"
 
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-#define LANMAN_PROT 0
-#define LANMAN2_PROT 1
-#define CIFS_PROT   2
-#else
 #define CIFS_PROT   0
-#endif
 #define POSIX_PROT  (CIFS_PROT+1)
 #define BAD_PROT 0xFFFF
 
@@ -505,30 +499,8 @@ typedef struct negotiate_req {
        unsigned char DialectsArray[1];
 } __attribute__((packed)) NEGOTIATE_REQ;
 
-/* Dialect index is 13 for LANMAN */
-
 #define MIN_TZ_ADJ (15 * 60) /* minimum grid for timezones in seconds */
 
-typedef struct lanman_neg_rsp {
-       struct smb_hdr hdr;     /* wct = 13 */
-       __le16 DialectIndex;
-       __le16 SecurityMode;
-       __le16 MaxBufSize;
-       __le16 MaxMpxCount;
-       __le16 MaxNumberVcs;
-       __le16 RawMode;
-       __le32 SessionKey;
-       struct {
-               __le16 Time;
-               __le16 Date;
-       } __attribute__((packed)) SrvTime;
-       __le16 ServerTimeZone;
-       __le16 EncryptionKeyLength;
-       __le16 Reserved;
-       __u16  ByteCount;
-       unsigned char EncryptionKey[1];
-} __attribute__((packed)) LANMAN_NEG_RSP;
-
 #define READ_RAW_ENABLE 1
 #define WRITE_RAW_ENABLE 2
 #define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE)
index e0def0f..f9740c2 100644 (file)
@@ -498,19 +498,12 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
 extern int cifs_verify_signature(struct smb_rqst *rqst,
                                 struct TCP_Server_Info *server,
                                __u32 expected_sequence_number);
-extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *,
-                       const struct nls_table *);
-extern int setup_ntlm_response(struct cifs_ses *, const struct nls_table *);
 extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
 extern void cifs_crypto_secmech_release(struct TCP_Server_Info *server);
 extern int calc_seckey(struct cifs_ses *);
 extern int generate_smb30signingkey(struct cifs_ses *);
 extern int generate_smb311signingkey(struct cifs_ses *);
 
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-extern int calc_lanman_hash(const char *password, const char *cryptkey,
-                               bool encrypt, char *lnm_session_key);
-#endif /* CIFS_WEAK_PW_HASH */
 extern int CIFSSMBCopy(unsigned int xid,
                        struct cifs_tcon *source_tcon,
                        const char *fromName,
@@ -547,11 +540,8 @@ extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
                              struct cifs_sb_info *cifs_sb,
                              struct cifs_fattr *fattr,
                              const unsigned char *path);
-extern int mdfour(unsigned char *, unsigned char *, int);
 extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
                        const struct nls_table *codepage);
-extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
-                       unsigned char *p24);
 
 extern int
 cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname);
index 65d1a65..a8e41c1 100644 (file)
@@ -42,10 +42,6 @@ static struct {
        int index;
        char *name;
 } protocols[] = {
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-       {LANMAN_PROT, "\2LM1.2X002"},
-       {LANMAN2_PROT, "\2LANMAN2.1"},
-#endif /* weak password hashing for legacy clients */
        {CIFS_PROT, "\2NT LM 0.12"},
        {POSIX_PROT, "\2POSIX 2"},
        {BAD_PROT, "\2"}
@@ -55,10 +51,6 @@ static struct {
        int index;
        char *name;
 } protocols[] = {
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-       {LANMAN_PROT, "\2LM1.2X002"},
-       {LANMAN2_PROT, "\2LANMAN2.1"},
-#endif /* weak password hashing for legacy clients */
        {CIFS_PROT, "\2NT LM 0.12"},
        {BAD_PROT, "\2"}
 };
@@ -66,17 +58,9 @@ static struct {
 
 /* define the number of elements in the cifs dialect array */
 #ifdef CONFIG_CIFS_POSIX
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-#define CIFS_NUM_PROT 4
-#else
 #define CIFS_NUM_PROT 2
-#endif /* CIFS_WEAK_PW_HASH */
 #else /* not posix */
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-#define CIFS_NUM_PROT 3
-#else
 #define CIFS_NUM_PROT 1
-#endif /* CONFIG_CIFS_WEAK_PW_HASH */
 #endif /* CIFS_POSIX */
 
 /*
@@ -475,89 +459,6 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
        return 0;
 }
 
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-static int
-decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
-{
-       __s16 tmp;
-       struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr;
-
-       if (server->dialect != LANMAN_PROT && server->dialect != LANMAN2_PROT)
-               return -EOPNOTSUPP;
-
-       server->sec_mode = le16_to_cpu(rsp->SecurityMode);
-       server->maxReq = min_t(unsigned int,
-                              le16_to_cpu(rsp->MaxMpxCount),
-                              cifs_max_pending);
-       set_credits(server, server->maxReq);
-       server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
-       /* set up max_read for readpages check */
-       server->max_read = server->maxBuf;
-       /* even though we do not use raw we might as well set this
-       accurately, in case we ever find a need for it */
-       if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
-               server->max_rw = 0xFF00;
-               server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE;
-       } else {
-               server->max_rw = 0;/* do not need to use raw anyway */
-               server->capabilities = CAP_MPX_MODE;
-       }
-       tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone);
-       if (tmp == -1) {
-               /* OS/2 often does not set timezone therefore
-                * we must use server time to calc time zone.
-                * Could deviate slightly from the right zone.
-                * Smallest defined timezone difference is 15 minutes
-                * (i.e. Nepal).  Rounding up/down is done to match
-                * this requirement.
-                */
-               int val, seconds, remain, result;
-               struct timespec64 ts;
-               time64_t utc = ktime_get_real_seconds();
-               ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
-                                   rsp->SrvTime.Time, 0);
-               cifs_dbg(FYI, "SrvTime %lld sec since 1970 (utc: %lld) diff: %lld\n",
-                        ts.tv_sec, utc,
-                        utc - ts.tv_sec);
-               val = (int)(utc - ts.tv_sec);
-               seconds = abs(val);
-               result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
-               remain = seconds % MIN_TZ_ADJ;
-               if (remain >= (MIN_TZ_ADJ / 2))
-                       result += MIN_TZ_ADJ;
-               if (val < 0)
-                       result = -result;
-               server->timeAdj = result;
-       } else {
-               server->timeAdj = (int)tmp;
-               server->timeAdj *= 60; /* also in seconds */
-       }
-       cifs_dbg(FYI, "server->timeAdj: %d seconds\n", server->timeAdj);
-
-
-       /* BB get server time for time conversions and add
-       code to use it and timezone since this is not UTC */
-
-       if (rsp->EncryptionKeyLength ==
-                       cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
-               memcpy(server->cryptkey, rsp->EncryptionKey,
-                       CIFS_CRYPTO_KEY_SIZE);
-       } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
-               return -EIO; /* need cryptkey unless plain text */
-       }
-
-       cifs_dbg(FYI, "LANMAN negotiated\n");
-       return 0;
-}
-#else
-static inline int
-decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
-{
-       cifs_dbg(VFS, "mount failed, cifs module not built with CIFS_WEAK_PW_HASH support\n");
-       return -EOPNOTSUPP;
-}
-#endif
-
 static bool
 should_set_ext_sec_flag(enum securityEnum sectype)
 {
@@ -626,16 +527,12 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
        server->dialect = le16_to_cpu(pSMBr->DialectIndex);
        cifs_dbg(FYI, "Dialect: %d\n", server->dialect);
        /* Check wct = 1 error case */
-       if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) {
+       if ((pSMBr->hdr.WordCount <= 13) || (server->dialect == BAD_PROT)) {
                /* core returns wct = 1, but we do not ask for core - otherwise
                small wct just comes when dialect index is -1 indicating we
                could not negotiate a common dialect */
                rc = -EOPNOTSUPP;
                goto neg_err_exit;
-       } else if (pSMBr->hdr.WordCount == 13) {
-               server->negflavor = CIFS_NEGFLAVOR_LANMAN;
-               rc = decode_lanman_negprot_rsp(server, pSMBr);
-               goto signing_check;
        } else if (pSMBr->hdr.WordCount != 17) {
                /* unknown wct */
                rc = -EOPNOTSUPP;
@@ -677,7 +574,6 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
                server->capabilities &= ~CAP_EXTENDED_SECURITY;
        }
 
-signing_check:
        if (!rc)
                rc = cifs_enable_signing(server, ses->sign);
 neg_err_exit:
@@ -2101,6 +1997,7 @@ cifs_writev_complete(struct work_struct *work)
                else if (wdata->result < 0)
                        SetPageError(page);
                end_page_writeback(page);
+               cifs_readpage_to_fscache(inode, page);
                put_page(page);
        }
        if (wdata->result != -EAGAIN)
index 3781eee..0db3448 100644 (file)
@@ -3684,38 +3684,6 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
                *bcc_ptr = 0; /* password is null byte */
                bcc_ptr++;              /* skip password */
                /* already aligned so no need to do it below */
-       } else {
-               pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-               /* BB FIXME add code to fail this if NTLMv2 or Kerberos
-                  specified as required (when that support is added to
-                  the vfs in the future) as only NTLM or the much
-                  weaker LANMAN (which we do not send by default) is accepted
-                  by Samba (not sure whether other servers allow
-                  NTLMv2 password here) */
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-               if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
-                   (ses->sectype == LANMAN))
-                       calc_lanman_hash(tcon->password, ses->server->cryptkey,
-                                        ses->server->sec_mode &
-                                           SECMODE_PW_ENCRYPT ? true : false,
-                                        bcc_ptr);
-               else
-#endif /* CIFS_WEAK_PW_HASH */
-               rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
-                                       bcc_ptr, nls_codepage);
-               if (rc) {
-                       cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n",
-                                __func__, rc);
-                       cifs_buf_release(smb_buffer);
-                       return rc;
-               }
-
-               bcc_ptr += CIFS_AUTH_RESP_SIZE;
-               if (ses->capabilities & CAP_UNICODE) {
-                       /* must align unicode strings */
-                       *bcc_ptr = 0; /* null byte password */
-                       bcc_ptr++;
-               }
        }
 
        if (ses->server->sign)
index 79402ca..5f8a302 100644 (file)
@@ -100,7 +100,7 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
                pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
 
-       s = dentry_path_raw(direntry, page, PAGE_SIZE);
+       s = dentry_path_raw(direntry, page, PATH_MAX);
        if (IS_ERR(s))
                return s;
        if (!s[1])      // for root we want "", not "/"
index 0a72840..d021647 100644 (file)
@@ -377,6 +377,8 @@ static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
        struct cifsLockInfo *li, *tmp;
        struct super_block *sb = inode->i_sb;
 
+       cifs_fscache_release_inode_cookie(inode);
+
        /*
         * Delete any outstanding lock records. We'll lose them when the file
         * is closed anyway.
@@ -882,8 +884,10 @@ int cifs_close(struct inode *inode, struct file *file)
                if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
                    cinode->lease_granted &&
                    dclose) {
-                       if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
+                       if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
                                inode->i_ctime = inode->i_mtime = current_time(inode);
+                               cifs_fscache_update_inode_cookie(inode);
+                       }
                        spin_lock(&cinode->deferred_lock);
                        cifs_add_deferred_close(cfile, dclose);
                        if (cfile->deferred_close_scheduled &&
@@ -4170,6 +4174,10 @@ static vm_fault_t
 cifs_page_mkwrite(struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
+       struct file *file = vmf->vma->vm_file;
+       struct inode *inode = file_inode(file);
+
+       cifs_fscache_wait_on_page_write(inode, page);
 
        lock_page(page);
        return VM_FAULT_LOCKED;
@@ -4235,13 +4243,16 @@ cifs_readv_complete(struct work_struct *work)
                    (rdata->result == -EAGAIN && got_bytes)) {
                        flush_dcache_page(page);
                        SetPageUptodate(page);
-               }
+               } else
+                       SetPageError(page);
 
                unlock_page(page);
 
                if (rdata->result == 0 ||
                    (rdata->result == -EAGAIN && got_bytes))
                        cifs_readpage_to_fscache(rdata->mapping->host, page);
+               else
+                       cifs_fscache_uncache_page(rdata->mapping->host, page);
 
                got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
 
@@ -4847,17 +4858,6 @@ void cifs_oplock_break(struct work_struct *work)
                cifs_dbg(VFS, "Push locks rc = %d\n", rc);
 
 oplock_break_ack:
-       /*
-        * releasing stale oplock after recent reconnect of smb session using
-        * a now incorrect file handle is not a data integrity issue but do
-        * not bother sending an oplock release if session to server still is
-        * disconnected since oplock already released by the server
-        */
-       if (!cfile->oplock_break_cancelled) {
-               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
-                                                            cinode);
-               cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
-       }
        /*
         * When oplock break is received and there are no active
         * file handles but cached, then schedule deferred close immediately.
@@ -4865,17 +4865,27 @@ oplock_break_ack:
         */
        spin_lock(&CIFS_I(inode)->deferred_lock);
        is_deferred = cifs_is_deferred_close(cfile, &dclose);
+       spin_unlock(&CIFS_I(inode)->deferred_lock);
        if (is_deferred &&
            cfile->deferred_close_scheduled &&
            delayed_work_pending(&cfile->deferred)) {
-               /*
-                * If there is no pending work, mod_delayed_work queues new work.
-                * So, Increase the ref count to avoid use-after-free.
-                */
-               if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
-                       cifsFileInfo_get(cfile);
+               if (cancel_delayed_work(&cfile->deferred)) {
+                       _cifsFileInfo_put(cfile, false, false);
+                       goto oplock_break_done;
+               }
        }
-       spin_unlock(&CIFS_I(inode)->deferred_lock);
+       /*
+        * releasing stale oplock after recent reconnect of smb session using
+        * a now incorrect file handle is not a data integrity issue but do
+        * not bother sending an oplock release if session to server still is
+        * disconnected since oplock already released by the server
+        */
+       if (!cfile->oplock_break_cancelled) {
+               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
+                                                            cinode);
+               cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+       }
+oplock_break_done:
        _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
        cifs_done_oplock_break(cinode);
 }
index eed59bc..3109def 100644 (file)
@@ -57,12 +57,9 @@ static const match_table_t cifs_secflavor_tokens = {
        { Opt_sec_krb5p, "krb5p" },
        { Opt_sec_ntlmsspi, "ntlmsspi" },
        { Opt_sec_ntlmssp, "ntlmssp" },
-       { Opt_ntlm, "ntlm" },
-       { Opt_sec_ntlmi, "ntlmi" },
        { Opt_sec_ntlmv2, "nontlm" },
        { Opt_sec_ntlmv2, "ntlmv2" },
        { Opt_sec_ntlmv2i, "ntlmv2i" },
-       { Opt_sec_lanman, "lanman" },
        { Opt_sec_none, "none" },
 
        { Opt_sec_err, NULL }
@@ -221,23 +218,12 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
        case Opt_sec_ntlmssp:
                ctx->sectype = RawNTLMSSP;
                break;
-       case Opt_sec_ntlmi:
-               ctx->sign = true;
-               fallthrough;
-       case Opt_ntlm:
-               ctx->sectype = NTLM;
-               break;
        case Opt_sec_ntlmv2i:
                ctx->sign = true;
                fallthrough;
        case Opt_sec_ntlmv2:
                ctx->sectype = NTLMv2;
                break;
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-       case Opt_sec_lanman:
-               ctx->sectype = LANMAN;
-               break;
-#endif
        case Opt_sec_none:
                ctx->nullauth = 1;
                break;
@@ -1266,10 +1252,17 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                        ctx->posix_paths = 1;
                break;
        case Opt_unix:
-               if (result.negated)
+               if (result.negated) {
+                       if (ctx->linux_ext == 1)
+                               pr_warn_once("conflicting posix mount options specified\n");
                        ctx->linux_ext = 0;
-               else
                        ctx->no_linux_ext = 1;
+               } else {
+                       if (ctx->no_linux_ext == 1)
+                               pr_warn_once("conflicting posix mount options specified\n");
+                       ctx->linux_ext = 1;
+                       ctx->no_linux_ext = 0;
+               }
                break;
        case Opt_nocase:
                ctx->nocase = 1;
index b624397..a42ba71 100644 (file)
@@ -47,11 +47,8 @@ enum cifs_sec_param {
        Opt_sec_krb5p,
        Opt_sec_ntlmsspi,
        Opt_sec_ntlmssp,
-       Opt_ntlm,
-       Opt_sec_ntlmi,
        Opt_sec_ntlmv2,
        Opt_sec_ntlmv2i,
-       Opt_sec_lanman,
        Opt_sec_none,
 
        Opt_sec_err
index dd62503..fab47fa 100644 (file)
@@ -176,29 +176,34 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
                auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
 
                cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
+               /* fscache_relinquish_cookie does not seem to update auxdata */
+               fscache_update_cookie(cifsi->fscache, &auxdata);
                fscache_relinquish_cookie(cifsi->fscache, &auxdata, false);
                cifsi->fscache = NULL;
        }
 }
 
-static void cifs_fscache_disable_inode_cookie(struct inode *inode)
+void cifs_fscache_update_inode_cookie(struct inode *inode)
 {
+       struct cifs_fscache_inode_auxdata auxdata;
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
        if (cifsi->fscache) {
+               memset(&auxdata, 0, sizeof(auxdata));
+               auxdata.eof = cifsi->server_eof;
+               auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
+               auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
+               auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
+               auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
+
                cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
-               fscache_uncache_all_inode_pages(cifsi->fscache, inode);
-               fscache_relinquish_cookie(cifsi->fscache, NULL, true);
-               cifsi->fscache = NULL;
+               fscache_update_cookie(cifsi->fscache, &auxdata);
        }
 }
 
 void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
 {
-       if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
-               cifs_fscache_disable_inode_cookie(inode);
-       else
-               cifs_fscache_enable_inode_cookie(inode);
+       cifs_fscache_enable_inode_cookie(inode);
 }
 
 void cifs_fscache_reset_inode_cookie(struct inode *inode)
@@ -310,6 +315,8 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
        int ret;
 
+       WARN_ON(!cifsi->fscache);
+
        cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
                 __func__, cifsi->fscache, page, inode);
        ret = fscache_write_page(cifsi->fscache, page,
@@ -334,3 +341,21 @@ void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
        fscache_wait_on_page_write(cookie, page);
        fscache_uncache_page(cookie, page);
 }
+
+void __cifs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
+{
+       struct cifsInodeInfo *cifsi = CIFS_I(inode);
+       struct fscache_cookie *cookie = cifsi->fscache;
+
+       cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
+       fscache_wait_on_page_write(cookie, page);
+}
+
+void __cifs_fscache_uncache_page(struct inode *inode, struct page *page)
+{
+       struct cifsInodeInfo *cifsi = CIFS_I(inode);
+       struct fscache_cookie *cookie = cifsi->fscache;
+
+       cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
+       fscache_uncache_page(cookie, page);
+}
index 3d55cb2..82e856b 100644 (file)
@@ -55,10 +55,13 @@ extern void cifs_fscache_get_super_cookie(struct cifs_tcon *);
 extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
 
 extern void cifs_fscache_release_inode_cookie(struct inode *);
+extern void cifs_fscache_update_inode_cookie(struct inode *inode);
 extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
 extern void cifs_fscache_reset_inode_cookie(struct inode *);
 
 extern void __cifs_fscache_invalidate_page(struct page *, struct inode *);
+extern void __cifs_fscache_wait_on_page_write(struct inode *inode, struct page *page);
+extern void __cifs_fscache_uncache_page(struct inode *inode, struct page *page);
 extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
 extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
 extern int __cifs_readpages_from_fscache(struct inode *,
@@ -76,6 +79,20 @@ static inline void cifs_fscache_invalidate_page(struct page *page,
                __cifs_fscache_invalidate_page(page, inode);
 }
 
+static inline void cifs_fscache_wait_on_page_write(struct inode *inode,
+                                                  struct page *page)
+{
+       if (PageFsCache(page))
+               __cifs_fscache_wait_on_page_write(inode, page);
+}
+
+static inline void cifs_fscache_uncache_page(struct inode *inode,
+                                                  struct page *page)
+{
+       if (PageFsCache(page))
+               __cifs_fscache_uncache_page(inode, page);
+}
+
 static inline int cifs_readpage_from_fscache(struct inode *inode,
                                             struct page *page)
 {
@@ -123,6 +140,7 @@ static inline void
 cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
 
 static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
+static inline void cifs_fscache_update_inode_cookie(struct inode *inode) {}
 static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
                                                 struct file *filp) {}
 static inline void cifs_fscache_reset_inode_cookie(struct inode *inode) {}
@@ -133,6 +151,11 @@ static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
 
 static inline void cifs_fscache_invalidate_page(struct page *page,
                        struct inode *inode) {}
+static inline void cifs_fscache_wait_on_page_write(struct inode *inode,
+                                                  struct page *page) {}
+static inline void cifs_fscache_uncache_page(struct inode *inode,
+                                                  struct page *page) {}
+
 static inline int
 cifs_readpage_from_fscache(struct inode *inode, struct page *page)
 {
index b96b253..50c01cf 100644 (file)
@@ -1625,7 +1625,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
                goto unlink_out;
        }
 
-       cifs_close_all_deferred_files(tcon);
+       cifs_close_deferred_file(CIFS_I(inode));
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2084,6 +2084,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
        FILE_UNIX_BASIC_INFO *info_buf_target;
        unsigned int xid;
        int rc, tmprc;
+       int retry_count = 0;
 
        if (flags & ~RENAME_NOREPLACE)
                return -EINVAL;
@@ -2113,10 +2114,24 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
                goto cifs_rename_exit;
        }
 
-       cifs_close_all_deferred_files(tcon);
+       cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+       if (d_inode(target_dentry) != NULL)
+               cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+
        rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
                            to_name);
 
+       if (rc == -EACCES) {
+               while (retry_count < 3) {
+                       cifs_close_all_deferred_files(tcon);
+                       rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+                                           to_name);
+                       if (rc != -EACCES)
+                               break;
+                       retry_count++;
+               }
+       }
+
        /*
         * No-replace is the natural behavior for CIFS, so skip unlink hacks.
         */
@@ -2282,6 +2297,7 @@ cifs_revalidate_mapping(struct inode *inode)
 {
        int rc;
        unsigned long *flags = &CIFS_I(inode)->flags;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 
        /* swapfiles are not supposed to be shared */
        if (IS_SWAPFILE(inode))
@@ -2293,11 +2309,16 @@ cifs_revalidate_mapping(struct inode *inode)
                return rc;
 
        if (test_and_clear_bit(CIFS_INO_INVALID_MAPPING, flags)) {
+               /* for cache=singleclient, do not invalidate */
+               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
+                       goto skip_invalidate;
+
                rc = cifs_invalidate_mapping(inode);
                if (rc)
                        set_bit(CIFS_INO_INVALID_MAPPING, flags);
        }
 
+skip_invalidate:
        clear_bit_unlock(CIFS_INO_LOCK, flags);
        smp_mb__after_atomic();
        wake_up_bit(flags, CIFS_INO_LOCK);
index 844abeb..9469f1c 100644 (file)
@@ -723,13 +723,31 @@ void
 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
 {
        struct cifsFileInfo *cfile = NULL;
-       struct cifs_deferred_close *dclose;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
+
+       if (cifs_inode == NULL)
+               return;
 
+       INIT_LIST_HEAD(&file_head);
+       spin_lock(&cifs_inode->open_file_lock);
        list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
-               spin_lock(&cifs_inode->deferred_lock);
-               if (cifs_is_deferred_close(cfile, &dclose))
-                       mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
-               spin_unlock(&cifs_inode->deferred_lock);
+               if (delayed_work_pending(&cfile->deferred)) {
+                       if (cancel_delayed_work(&cfile->deferred)) {
+                               tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                               if (tmp_list == NULL)
+                                       continue;
+                               tmp_list->cfile = cfile;
+                               list_add_tail(&tmp_list->list, &file_head);
+                       }
+               }
+       }
+       spin_unlock(&cifs_inode->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
        }
 }
 
@@ -738,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
 {
        struct cifsFileInfo *cfile;
        struct list_head *tmp;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
 
+       INIT_LIST_HEAD(&file_head);
        spin_lock(&tcon->open_file_lock);
        list_for_each(tmp, &tcon->openFileList) {
                cfile = list_entry(tmp, struct cifsFileInfo, tlist);
                if (delayed_work_pending(&cfile->deferred)) {
-                       /*
-                        * If there is no pending work, mod_delayed_work queues new work.
-                        * So, Increase the ref count to avoid use-after-free.
-                        */
-                       if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
-                               cifsFileInfo_get(cfile);
+                       if (cancel_delayed_work(&cfile->deferred)) {
+                               tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                               if (tmp_list == NULL)
+                                       continue;
+                               tmp_list->cfile = cfile;
+                               list_add_tail(&tmp_list->list, &file_head);
+                       }
                }
        }
        spin_unlock(&tcon->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
+       }
 }
 
 /* parses DFS refferal V3 structure
index bfee176..54d77c9 100644 (file)
@@ -369,7 +369,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
  */
 
 static int
-initiate_cifs_search(const unsigned int xid, struct file *file,
+_initiate_cifs_search(const unsigned int xid, struct file *file,
                     const char *full_path)
 {
        __u16 search_flags;
@@ -451,6 +451,27 @@ error_exit:
        return rc;
 }
 
+static int
+initiate_cifs_search(const unsigned int xid, struct file *file,
+                    const char *full_path)
+{
+       int rc, retry_count = 0;
+
+       do {
+               rc = _initiate_cifs_search(xid, file, full_path);
+               /*
+                * If we don't have enough credits to start reading the
+                * directory just try again after short wait.
+                */
+               if (rc != -EDEADLK)
+                       break;
+
+               usleep_range(512, 2048);
+       } while (retry_count++ < 5);
+
+       return rc;
+}
+
 /* return length of unicode string in bytes */
 static int cifs_unicode_bytelen(const char *str)
 {
index c5785fd..118403f 100644 (file)
@@ -799,30 +799,16 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
                }
        case CIFS_NEGFLAVOR_UNENCAP:
                switch (requested) {
-               case NTLM:
                case NTLMv2:
                        return requested;
                case Unspecified:
                        if (global_secflags & CIFSSEC_MAY_NTLMV2)
                                return NTLMv2;
-                       if (global_secflags & CIFSSEC_MAY_NTLM)
-                               return NTLM;
                        break;
                default:
                        break;
                }
-               fallthrough;    /* to attempt LANMAN authentication next */
-       case CIFS_NEGFLAVOR_LANMAN:
-               switch (requested) {
-               case LANMAN:
-                       return requested;
-               case Unspecified:
-                       if (global_secflags & CIFSSEC_MAY_LANMAN)
-                               return LANMAN;
-                       fallthrough;
-               default:
-                       return Unspecified;
-               }
+               fallthrough;
        default:
                return Unspecified;
        }
@@ -877,7 +863,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct)
        return 0;
 
 out_free_smb_buf:
-       kfree(smb_buf);
+       cifs_small_buf_release(smb_buf);
        sess_data->iov[0].iov_base = NULL;
        sess_data->iov[0].iov_len = 0;
        sess_data->buf0_type = CIFS_NO_BUFFER;
@@ -947,230 +933,6 @@ sess_sendreceive(struct sess_data *sess_data)
        return rc;
 }
 
-/*
- * LANMAN and plaintext are less secure and off by default.
- * So we make this explicitly be turned on in kconfig (in the
- * build) and turned on at runtime (changed from the default)
- * in proc/fs/cifs or via mount parm.  Unfortunately this is
- * needed for old Win (e.g. Win95), some obscure NAS and OS/2
- */
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-static void
-sess_auth_lanman(struct sess_data *sess_data)
-{
-       int rc = 0;
-       struct smb_hdr *smb_buf;
-       SESSION_SETUP_ANDX *pSMB;
-       char *bcc_ptr;
-       struct cifs_ses *ses = sess_data->ses;
-       char lnm_session_key[CIFS_AUTH_RESP_SIZE];
-       __u16 bytes_remaining;
-
-       /* lanman 2 style sessionsetup */
-       /* wct = 10 */
-       rc = sess_alloc_buffer(sess_data, 10);
-       if (rc)
-               goto out;
-
-       pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
-       bcc_ptr = sess_data->iov[2].iov_base;
-       (void)cifs_ssetup_hdr(ses, pSMB);
-
-       pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
-
-       if (ses->user_name != NULL) {
-               /* no capabilities flags in old lanman negotiation */
-               pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-
-               /* Calculate hash with password and copy into bcc_ptr.
-                * Encryption Key (stored as in cryptkey) gets used if the
-                * security mode bit in Negotiate Protocol response states
-                * to use challenge/response method (i.e. Password bit is 1).
-                */
-               rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
-                                     ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
-                                     true : false, lnm_session_key);
-               if (rc)
-                       goto out;
-
-               memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
-               bcc_ptr += CIFS_AUTH_RESP_SIZE;
-       } else {
-               pSMB->old_req.PasswordLength = 0;
-       }
-
-       /*
-        * can not sign if LANMAN negotiated so no need
-        * to calculate signing key? but what if server
-        * changed to do higher than lanman dialect and
-        * we reconnected would we ever calc signing_key?
-        */
-
-       cifs_dbg(FYI, "Negotiating LANMAN setting up strings\n");
-       /* Unicode not allowed for LANMAN dialects */
-       ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
-
-       sess_data->iov[2].iov_len = (long) bcc_ptr -
-                       (long) sess_data->iov[2].iov_base;
-
-       rc = sess_sendreceive(sess_data);
-       if (rc)
-               goto out;
-
-       pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
-       smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
-
-       /* lanman response has a word count of 3 */
-       if (smb_buf->WordCount != 3) {
-               rc = -EIO;
-               cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
-               goto out;
-       }
-
-       if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
-               cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
-
-       ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
-       cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
-
-       bytes_remaining = get_bcc(smb_buf);
-       bcc_ptr = pByteArea(smb_buf);
-
-       /* BB check if Unicode and decode strings */
-       if (bytes_remaining == 0) {
-               /* no string area to decode, do nothing */
-       } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
-               /* unicode string area must be word-aligned */
-               if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
-                       ++bcc_ptr;
-                       --bytes_remaining;
-               }
-               decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
-                                     sess_data->nls_cp);
-       } else {
-               decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
-                                   sess_data->nls_cp);
-       }
-
-       rc = sess_establish_session(sess_data);
-out:
-       sess_data->result = rc;
-       sess_data->func = NULL;
-       sess_free_buffer(sess_data);
-}
-
-#endif
-
-static void
-sess_auth_ntlm(struct sess_data *sess_data)
-{
-       int rc = 0;
-       struct smb_hdr *smb_buf;
-       SESSION_SETUP_ANDX *pSMB;
-       char *bcc_ptr;
-       struct cifs_ses *ses = sess_data->ses;
-       __u32 capabilities;
-       __u16 bytes_remaining;
-
-       /* old style NTLM sessionsetup */
-       /* wct = 13 */
-       rc = sess_alloc_buffer(sess_data, 13);
-       if (rc)
-               goto out;
-
-       pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
-       bcc_ptr = sess_data->iov[2].iov_base;
-       capabilities = cifs_ssetup_hdr(ses, pSMB);
-
-       pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
-       if (ses->user_name != NULL) {
-               pSMB->req_no_secext.CaseInsensitivePasswordLength =
-                               cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-               pSMB->req_no_secext.CaseSensitivePasswordLength =
-                               cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-
-               /* calculate ntlm response and session key */
-               rc = setup_ntlm_response(ses, sess_data->nls_cp);
-               if (rc) {
-                       cifs_dbg(VFS, "Error %d during NTLM authentication\n",
-                                        rc);
-                       goto out;
-               }
-
-               /* copy ntlm response */
-               memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-                               CIFS_AUTH_RESP_SIZE);
-               bcc_ptr += CIFS_AUTH_RESP_SIZE;
-               memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-                               CIFS_AUTH_RESP_SIZE);
-               bcc_ptr += CIFS_AUTH_RESP_SIZE;
-       } else {
-               pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
-               pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
-       }
-
-       if (ses->capabilities & CAP_UNICODE) {
-               /* unicode strings must be word aligned */
-               if (sess_data->iov[0].iov_len % 2) {
-                       *bcc_ptr = 0;
-                       bcc_ptr++;
-               }
-               unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
-       } else {
-               ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
-       }
-
-
-       sess_data->iov[2].iov_len = (long) bcc_ptr -
-                       (long) sess_data->iov[2].iov_base;
-
-       rc = sess_sendreceive(sess_data);
-       if (rc)
-               goto out;
-
-       pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
-       smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
-
-       if (smb_buf->WordCount != 3) {
-               rc = -EIO;
-               cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
-               goto out;
-       }
-
-       if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
-               cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
-
-       ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
-       cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
-
-       bytes_remaining = get_bcc(smb_buf);
-       bcc_ptr = pByteArea(smb_buf);
-
-       /* BB check if Unicode and decode strings */
-       if (bytes_remaining == 0) {
-               /* no string area to decode, do nothing */
-       } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
-               /* unicode string area must be word-aligned */
-               if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
-                       ++bcc_ptr;
-                       --bytes_remaining;
-               }
-               decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
-                                     sess_data->nls_cp);
-       } else {
-               decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
-                                   sess_data->nls_cp);
-       }
-
-       rc = sess_establish_session(sess_data);
-out:
-       sess_data->result = rc;
-       sess_data->func = NULL;
-       sess_free_buffer(sess_data);
-       kfree(ses->auth_key.response);
-       ses->auth_key.response = NULL;
-}
-
 static void
 sess_auth_ntlmv2(struct sess_data *sess_data)
 {
@@ -1675,21 +1437,6 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data)
        }
 
        switch (type) {
-       case LANMAN:
-               /* LANMAN and plaintext are less secure and off by default.
-                * So we make this explicitly be turned on in kconfig (in the
-                * build) and turned on at runtime (changed from the default)
-                * in proc/fs/cifs or via mount parm.  Unfortunately this is
-                * needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-               sess_data->func = sess_auth_lanman;
-               break;
-#else
-               return -EOPNOTSUPP;
-#endif
-       case NTLM:
-               sess_data->func = sess_auth_ntlm;
-               break;
        case NTLMv2:
                sess_data->func = sess_auth_ntlmv2;
                break;
index cea39bc..181514b 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/smb2/smb2maperror.c
  *
  *   Functions which do error mapping of SMB2 status codes to POSIX errors
  *
index 2dfd0d8..ddc0e8f 100644 (file)
@@ -3590,6 +3590,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                return rc;
        }
 
+       filemap_invalidate_lock(inode->i_mapping);
        /*
         * We implement the punch hole through ioctl, so we need remove the page
         * caches first, otherwise the data may be inconsistent with the server.
@@ -3607,6 +3608,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                        sizeof(struct file_zero_data_information),
                        CIFSMaxBufSize, NULL, NULL);
        free_xid(xid);
+       filemap_invalidate_unlock(inode->i_mapping);
        return rc;
 }
 
index 781d14e..b6d2e35 100644 (file)
@@ -2426,7 +2426,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
        memcpy(aclptr, &acl, sizeof(struct cifs_acl));
 
        buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
-       *len = ptr - (__u8 *)buf;
+       *len = roundup(ptr - (__u8 *)buf, 8);
 
        return buf;
 }
index 39a9384..10047cc 100644 (file)
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/random.h>
-#include <crypto/des.h>
 #include "cifs_fs_sb.h"
 #include "cifs_unicode.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifs_debug.h"
 #include "cifsproto.h"
+#include "../cifs_common/md4.h"
 
 #ifndef false
 #define false 0
 #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
 #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val)))
 
-static void
-str_to_key(unsigned char *str, unsigned char *key)
-{
-       int i;
-
-       key[0] = str[0] >> 1;
-       key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
-       key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
-       key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
-       key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
-       key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
-       key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
-       key[7] = str[6] & 0x7F;
-       for (i = 0; i < 8; i++)
-               key[i] = (key[i] << 1);
-}
-
-static int
-smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
-{
-       unsigned char key2[8];
-       struct des_ctx ctx;
-
-       str_to_key(key, key2);
-
-       if (fips_enabled) {
-               cifs_dbg(VFS, "FIPS compliance enabled: DES not permitted\n");
-               return -ENOENT;
-       }
-
-       des_expand_key(&ctx, key2, DES_KEY_SIZE);
-       des_encrypt(&ctx, out, in);
-       memzero_explicit(&ctx, sizeof(ctx));
-
-       return 0;
-}
-
-static int
-E_P16(unsigned char *p14, unsigned char *p16)
-{
-       int rc;
-       unsigned char sp8[8] =
-           { 0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25 };
-
-       rc = smbhash(p16, sp8, p14);
-       if (rc)
-               return rc;
-       rc = smbhash(p16 + 8, sp8, p14 + 7);
-       return rc;
-}
-
-static int
-E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
-{
-       int rc;
-
-       rc = smbhash(p24, c8, p21);
-       if (rc)
-               return rc;
-       rc = smbhash(p24 + 8, c8, p21 + 7);
-       if (rc)
-               return rc;
-       rc = smbhash(p24 + 16, c8, p21 + 14);
-       return rc;
-}
-
 /* produce a md4 message digest from data of length n bytes */
-int
+static int
 mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
 {
        int rc;
-       struct crypto_shash *md4 = NULL;
-       struct sdesc *sdescmd4 = NULL;
-
-       rc = cifs_alloc_hash("md4", &md4, &sdescmd4);
-       if (rc)
-               goto mdfour_err;
+       struct md4_ctx mctx;
 
-       rc = crypto_shash_init(&sdescmd4->shash);
+       rc = cifs_md4_init(&mctx);
        if (rc) {
-               cifs_dbg(VFS, "%s: Could not init md4 shash\n", __func__);
+               cifs_dbg(VFS, "%s: Could not init MD4\n", __func__);
                goto mdfour_err;
        }
-       rc = crypto_shash_update(&sdescmd4->shash, link_str, link_len);
+       rc = cifs_md4_update(&mctx, link_str, link_len);
        if (rc) {
-               cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__);
+               cifs_dbg(VFS, "%s: Could not update MD4\n", __func__);
                goto mdfour_err;
        }
-       rc = crypto_shash_final(&sdescmd4->shash, md4_hash);
+       rc = cifs_md4_final(&mctx, md4_hash);
        if (rc)
-               cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__);
-
-mdfour_err:
-       cifs_free_hash(&md4, &sdescmd4);
-       return rc;
-}
-
-/*
-   This implements the X/Open SMB password encryption
-   It takes a password, a 8 byte "crypt key" and puts 24 bytes of
-   encrypted password into p24 */
-/* Note that password must be uppercased and null terminated */
-int
-SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24)
-{
-       int rc;
-       unsigned char p14[14], p16[16], p21[21];
+               cifs_dbg(VFS, "%s: Could not finalize MD4\n", __func__);
 
-       memset(p14, '\0', 14);
-       memset(p16, '\0', 16);
-       memset(p21, '\0', 21);
-
-       memcpy(p14, passwd, 14);
-       rc = E_P16(p14, p16);
-       if (rc)
-               return rc;
-
-       memcpy(p21, p16, 16);
-       rc = E_P24(p21, c8, p24);
 
+mdfour_err:
        return rc;
 }
 
@@ -186,25 +89,3 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
 
        return rc;
 }
-
-/* Does the NT MD4 hash then des encryption. */
-int
-SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24,
-               const struct nls_table *codepage)
-{
-       int rc;
-       unsigned char p16[16], p21[21];
-
-       memset(p16, '\0', 16);
-       memset(p21, '\0', 21);
-
-       rc = E_md4hash(passwd, p16, codepage);
-       if (rc) {
-               cifs_dbg(FYI, "%s Can't generate NT hash, error: %d\n",
-                        __func__, rc);
-               return rc;
-       }
-       memcpy(p21, p16, 16);
-       rc = E_P24(p21, c8, p24);
-       return rc;
-}
diff --git a/fs/cifs_common/Makefile b/fs/cifs_common/Makefile
new file mode 100644 (file)
index 0000000..6fedd2f
--- /dev/null
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Linux filesystem routines that are shared by client and server.
+#
+
+obj-$(CONFIG_CIFS_COMMON) += cifs_arc4.o
+obj-$(CONFIG_CIFS_COMMON) += cifs_md4.o
diff --git a/fs/cifs_common/arc4.h b/fs/cifs_common/arc4.h
new file mode 100644 (file)
index 0000000..12e71ec
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Common values for ARC4 Cipher Algorithm
+ */
+
+#ifndef _CRYPTO_ARC4_H
+#define _CRYPTO_ARC4_H
+
+#include <linux/types.h>
+
+#define ARC4_MIN_KEY_SIZE      1
+#define ARC4_MAX_KEY_SIZE      256
+#define ARC4_BLOCK_SIZE                1
+
+struct arc4_ctx {
+       u32 S[256];
+       u32 x, y;
+};
+
+int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len);
+void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len);
+
+#endif /* _CRYPTO_ARC4_H */
diff --git a/fs/cifs_common/cifs_arc4.c b/fs/cifs_common/cifs_arc4.c
new file mode 100644 (file)
index 0000000..b964cc6
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API
+ *
+ * ARC4 Cipher Algorithm
+ *
+ * Jon Oberheide <jon@oberheide.org>
+ */
+
+#include <linux/module.h>
+#include "arc4.h"
+
+MODULE_LICENSE("GPL");
+
+int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
+{
+       int i, j = 0, k = 0;
+
+       ctx->x = 1;
+       ctx->y = 0;
+
+       for (i = 0; i < 256; i++)
+               ctx->S[i] = i;
+
+       for (i = 0; i < 256; i++) {
+               u32 a = ctx->S[i];
+
+               j = (j + in_key[k] + a) & 0xff;
+               ctx->S[i] = ctx->S[j];
+               ctx->S[j] = a;
+               if (++k >= key_len)
+                       k = 0;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cifs_arc4_setkey);
+
+void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
+{
+       u32 *const S = ctx->S;
+       u32 x, y, a, b;
+       u32 ty, ta, tb;
+
+       if (len == 0)
+               return;
+
+       x = ctx->x;
+       y = ctx->y;
+
+       a = S[x];
+       y = (y + a) & 0xff;
+       b = S[y];
+
+       do {
+               S[y] = a;
+               a = (a + b) & 0xff;
+               S[x] = b;
+               x = (x + 1) & 0xff;
+               ta = S[x];
+               ty = (y + ta) & 0xff;
+               tb = S[ty];
+               *out++ = *in++ ^ S[a];
+               if (--len == 0)
+                       break;
+               y = ty;
+               a = ta;
+               b = tb;
+       } while (true);
+
+       ctx->x = x;
+       ctx->y = y;
+}
+EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
+
+static int __init
+init_cifs_common(void)
+{
+       return 0;
+}
+static void __init
+exit_cifs_common(void)
+{
+}
+
+module_init(init_cifs_common)
+module_exit(exit_cifs_common)
diff --git a/fs/cifs_common/cifs_md4.c b/fs/cifs_common/cifs_md4.c
new file mode 100644 (file)
index 0000000..50f78cf
--- /dev/null
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cryptographic API.
+ *
+ * MD4 Message Digest Algorithm (RFC1320).
+ *
+ * Implementation derived from Andrew Tridgell and Steve French's
+ * CIFS MD4 implementation, and the cryptoapi implementation
+ * originally based on the public domain implementation written
+ * by Colin Plumb in 1993.
+ *
+ * Copyright (c) Andrew Tridgell 1997-1998.
+ * Modified by Steve French (sfrench@us.ibm.com) 2002
+ * Copyright (c) Cryptoapi developers.
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "md4.h"
+
+MODULE_LICENSE("GPL");
+
+static inline u32 lshift(u32 x, unsigned int s)
+{
+       x &= 0xFFFFFFFF;
+       return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
+}
+
+static inline u32 F(u32 x, u32 y, u32 z)
+{
+       return (x & y) | ((~x) & z);
+}
+
+static inline u32 G(u32 x, u32 y, u32 z)
+{
+       return (x & y) | (x & z) | (y & z);
+}
+
+static inline u32 H(u32 x, u32 y, u32 z)
+{
+       return x ^ y ^ z;
+}
+
+#define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s))
+#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
+#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
+
+static void md4_transform(u32 *hash, u32 const *in)
+{
+       u32 a, b, c, d;
+
+       a = hash[0];
+       b = hash[1];
+       c = hash[2];
+       d = hash[3];
+
+       ROUND1(a, b, c, d, in[0], 3);
+       ROUND1(d, a, b, c, in[1], 7);
+       ROUND1(c, d, a, b, in[2], 11);
+       ROUND1(b, c, d, a, in[3], 19);
+       ROUND1(a, b, c, d, in[4], 3);
+       ROUND1(d, a, b, c, in[5], 7);
+       ROUND1(c, d, a, b, in[6], 11);
+       ROUND1(b, c, d, a, in[7], 19);
+       ROUND1(a, b, c, d, in[8], 3);
+       ROUND1(d, a, b, c, in[9], 7);
+       ROUND1(c, d, a, b, in[10], 11);
+       ROUND1(b, c, d, a, in[11], 19);
+       ROUND1(a, b, c, d, in[12], 3);
+       ROUND1(d, a, b, c, in[13], 7);
+       ROUND1(c, d, a, b, in[14], 11);
+       ROUND1(b, c, d, a, in[15], 19);
+
+       ROUND2(a, b, c, d, in[0], 3);
+       ROUND2(d, a, b, c, in[4], 5);
+       ROUND2(c, d, a, b, in[8], 9);
+       ROUND2(b, c, d, a, in[12], 13);
+       ROUND2(a, b, c, d, in[1], 3);
+       ROUND2(d, a, b, c, in[5], 5);
+       ROUND2(c, d, a, b, in[9], 9);
+       ROUND2(b, c, d, a, in[13], 13);
+       ROUND2(a, b, c, d, in[2], 3);
+       ROUND2(d, a, b, c, in[6], 5);
+       ROUND2(c, d, a, b, in[10], 9);
+       ROUND2(b, c, d, a, in[14], 13);
+       ROUND2(a, b, c, d, in[3], 3);
+       ROUND2(d, a, b, c, in[7], 5);
+       ROUND2(c, d, a, b, in[11], 9);
+       ROUND2(b, c, d, a, in[15], 13);
+
+       ROUND3(a, b, c, d, in[0], 3);
+       ROUND3(d, a, b, c, in[8], 9);
+       ROUND3(c, d, a, b, in[4], 11);
+       ROUND3(b, c, d, a, in[12], 15);
+       ROUND3(a, b, c, d, in[2], 3);
+       ROUND3(d, a, b, c, in[10], 9);
+       ROUND3(c, d, a, b, in[6], 11);
+       ROUND3(b, c, d, a, in[14], 15);
+       ROUND3(a, b, c, d, in[1], 3);
+       ROUND3(d, a, b, c, in[9], 9);
+       ROUND3(c, d, a, b, in[5], 11);
+       ROUND3(b, c, d, a, in[13], 15);
+       ROUND3(a, b, c, d, in[3], 3);
+       ROUND3(d, a, b, c, in[11], 9);
+       ROUND3(c, d, a, b, in[7], 11);
+       ROUND3(b, c, d, a, in[15], 15);
+
+       hash[0] += a;
+       hash[1] += b;
+       hash[2] += c;
+       hash[3] += d;
+}
+
+static inline void md4_transform_helper(struct md4_ctx *ctx)
+{
+       le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block));
+       md4_transform(ctx->hash, ctx->block);
+}
+
+int cifs_md4_init(struct md4_ctx *mctx)
+{
+       memset(mctx, 0, sizeof(struct md4_ctx));
+       mctx->hash[0] = 0x67452301;
+       mctx->hash[1] = 0xefcdab89;
+       mctx->hash[2] = 0x98badcfe;
+       mctx->hash[3] = 0x10325476;
+       mctx->byte_count = 0;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cifs_md4_init);
+
+int cifs_md4_update(struct md4_ctx *mctx, const u8 *data, unsigned int len)
+{
+       const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+
+       mctx->byte_count += len;
+
+       if (avail > len) {
+               memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+                      data, len);
+               return 0;
+       }
+
+       memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+              data, avail);
+
+       md4_transform_helper(mctx);
+       data += avail;
+       len -= avail;
+
+       while (len >= sizeof(mctx->block)) {
+               memcpy(mctx->block, data, sizeof(mctx->block));
+               md4_transform_helper(mctx);
+               data += sizeof(mctx->block);
+               len -= sizeof(mctx->block);
+       }
+
+       memcpy(mctx->block, data, len);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cifs_md4_update);
+
+int cifs_md4_final(struct md4_ctx *mctx, u8 *out)
+{
+       const unsigned int offset = mctx->byte_count & 0x3f;
+       char *p = (char *)mctx->block + offset;
+       int padding = 56 - (offset + 1);
+
+       *p++ = 0x80;
+       if (padding < 0) {
+               memset(p, 0x00, padding + sizeof(u64));
+               md4_transform_helper(mctx);
+               p = (char *)mctx->block;
+               padding = 56;
+       }
+
+       memset(p, 0, padding);
+       mctx->block[14] = mctx->byte_count << 3;
+       mctx->block[15] = mctx->byte_count >> 29;
+       le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
+                         sizeof(u64)) / sizeof(u32));
+       md4_transform(mctx->hash, mctx->block);
+       cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash));
+       memcpy(out, mctx->hash, sizeof(mctx->hash));
+       memset(mctx, 0, sizeof(*mctx));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cifs_md4_final);
diff --git a/fs/cifs_common/md4.h b/fs/cifs_common/md4.h
new file mode 100644 (file)
index 0000000..5337bec
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Common values for ARC4 Cipher Algorithm
+ */
+
+#ifndef _CIFS_MD4_H
+#define _CIFS_MD4_H
+
+#include <linux/types.h>
+
+#define MD4_DIGEST_SIZE                16
+#define MD4_HMAC_BLOCK_SIZE    64
+#define MD4_BLOCK_WORDS                16
+#define MD4_HASH_WORDS         4
+
+struct md4_ctx {
+       u32 hash[MD4_HASH_WORDS];
+       u32 block[MD4_BLOCK_WORDS];
+       u64 byte_count;
+};
+
+
+int cifs_md4_init(struct md4_ctx *mctx);
+int cifs_md4_update(struct md4_ctx *mctx, const u8 *data, unsigned int len);
+int cifs_md4_final(struct md4_ctx *mctx, u8 *out);
+
+#endif /* _CIFS_MD4_H */
index 5a0be99..0ad3215 100644 (file)
@@ -177,28 +177,22 @@ out:
        return retval;
 }
 
-/* Fill [buffer, buffer + pos) with data coming from @from. */
-static int fill_write_buffer(struct configfs_buffer *buffer, loff_t pos,
+/* Fill @buffer with data coming from @from. */
+static int fill_write_buffer(struct configfs_buffer *buffer,
                             struct iov_iter *from)
 {
-       loff_t to_copy;
        int copied;
-       u8 *to;
 
        if (!buffer->page)
                buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
        if (!buffer->page)
                return -ENOMEM;
 
-       to_copy = SIMPLE_ATTR_SIZE - 1 - pos;
-       if (to_copy <= 0)
-               return 0;
-       to = buffer->page + pos;
-       copied = copy_from_iter(to, to_copy, from);
+       copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from);
        buffer->needs_read_fill = 1;
        /* if buf is assumed to contain a string, terminate it by \0,
         * so e.g. sscanf() can scan the string easily */
-       to[copied] = 0;
+       buffer->page[copied] = 0;
        return copied ? : -EFAULT;
 }
 
@@ -227,10 +221,10 @@ static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct configfs_buffer *buffer = file->private_data;
-       ssize_t len;
+       int len;
 
        mutex_lock(&buffer->mutex);
-       len = fill_write_buffer(buffer, iocb->ki_pos, from);
+       len = fill_write_buffer(buffer, from);
        if (len > 0)
                len = flush_write_buffer(file, buffer, len);
        if (len > 0)
index d004554..eb538c2 100644 (file)
@@ -26,7 +26,7 @@
  * it to find the directory entry again if requested.  Naively, that would just
  * mean using the ciphertext filenames.  However, since the ciphertext filenames
  * can contain illegal characters ('\0' and '/'), they must be encoded in some
- * way.  We use base64.  But that can cause names to exceed NAME_MAX (255
+ * way.  We use base64url.  But that can cause names to exceed NAME_MAX (255
  * bytes), so we also need to use a strong hash to abbreviate long names.
  *
  * The filesystem may also need another kind of hash, the "dirhash", to quickly
@@ -38,7 +38,7 @@
  * casefolded directories use this type of dirhash.  At least in these cases,
  * each no-key name must include the name's dirhash too.
  *
- * To meet all these requirements, we base64-encode the following
+ * To meet all these requirements, we base64url-encode the following
  * variable-length structure.  It contains the dirhash, or 0's if the filesystem
  * didn't provide one; up to 149 bytes of the ciphertext name; and for
  * ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes.
@@ -52,15 +52,19 @@ struct fscrypt_nokey_name {
        u32 dirhash[2];
        u8 bytes[149];
        u8 sha256[SHA256_DIGEST_SIZE];
-}; /* 189 bytes => 252 bytes base64-encoded, which is <= NAME_MAX (255) */
+}; /* 189 bytes => 252 bytes base64url-encoded, which is <= NAME_MAX (255) */
 
 /*
- * Decoded size of max-size nokey name, i.e. a name that was abbreviated using
+ * Decoded size of max-size no-key name, i.e. a name that was abbreviated using
  * the strong hash and thus includes the 'sha256' field.  This isn't simply
  * sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included.
  */
 #define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256)
 
+/* Encoded size of max-size no-key name */
+#define FSCRYPT_NOKEY_NAME_MAX_ENCODED \
+               FSCRYPT_BASE64URL_CHARS(FSCRYPT_NOKEY_NAME_MAX)
+
 static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
 {
        if (str->len == 1 && str->name[0] == '.')
@@ -175,62 +179,82 @@ static int fname_decrypt(const struct inode *inode,
        return 0;
 }
 
-static const char lookup_table[65] =
-       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+static const char base64url_table[65] =
+       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
 
-#define BASE64_CHARS(nbytes)   DIV_ROUND_UP((nbytes) * 4, 3)
+#define FSCRYPT_BASE64URL_CHARS(nbytes)        DIV_ROUND_UP((nbytes) * 4, 3)
 
 /**
- * base64_encode() - base64-encode some bytes
- * @src: the bytes to encode
- * @len: number of bytes to encode
- * @dst: (output) the base64-encoded string.  Not NUL-terminated.
+ * fscrypt_base64url_encode() - base64url-encode some binary data
+ * @src: the binary data to encode
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the base64url-encoded string.  Not NUL-terminated.
  *
- * Encodes the input string using characters from the set [A-Za-z0-9+,].
- * The encoded string is roughly 4/3 times the size of the input string.
+ * Encodes data using base64url encoding, i.e. the "Base 64 Encoding with URL
+ * and Filename Safe Alphabet" specified by RFC 4648.  '='-padding isn't used,
+ * as it's unneeded and not required by the RFC.  base64url is used instead of
+ * base64 to avoid the '/' character, which isn't allowed in filenames.
  *
- * Return: length of the encoded string
+ * Return: the length of the resulting base64url-encoded string in bytes.
+ *        This will be equal to FSCRYPT_BASE64URL_CHARS(srclen).
  */
-static int base64_encode(const u8 *src, int len, char *dst)
+static int fscrypt_base64url_encode(const u8 *src, int srclen, char *dst)
 {
-       int i, bits = 0, ac = 0;
+       u32 ac = 0;
+       int bits = 0;
+       int i;
        char *cp = dst;
 
-       for (i = 0; i < len; i++) {
-               ac += src[i] << bits;
+       for (i = 0; i < srclen; i++) {
+               ac = (ac << 8) | src[i];
                bits += 8;
                do {
-                       *cp++ = lookup_table[ac & 0x3f];
-                       ac >>= 6;
                        bits -= 6;
+                       *cp++ = base64url_table[(ac >> bits) & 0x3f];
                } while (bits >= 6);
        }
        if (bits)
-               *cp++ = lookup_table[ac & 0x3f];
+               *cp++ = base64url_table[(ac << (6 - bits)) & 0x3f];
        return cp - dst;
 }
 
-static int base64_decode(const char *src, int len, u8 *dst)
+/**
+ * fscrypt_base64url_decode() - base64url-decode a string
+ * @src: the string to decode.  Doesn't need to be NUL-terminated.
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the decoded binary data
+ *
+ * Decodes a string using base64url encoding, i.e. the "Base 64 Encoding with
+ * URL and Filename Safe Alphabet" specified by RFC 4648.  '='-padding isn't
+ * accepted, nor are non-encoding characters such as whitespace.
+ *
+ * This implementation hasn't been optimized for performance.
+ *
+ * Return: the length of the resulting decoded binary data in bytes,
+ *        or -1 if the string isn't a valid base64url string.
+ */
+static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst)
 {
-       int i, bits = 0, ac = 0;
-       const char *p;
-       u8 *cp = dst;
+       u32 ac = 0;
+       int bits = 0;
+       int i;
+       u8 *bp = dst;
+
+       for (i = 0; i < srclen; i++) {
+               const char *p = strchr(base64url_table, src[i]);
 
-       for (i = 0; i < len; i++) {
-               p = strchr(lookup_table, src[i]);
                if (p == NULL || src[i] == 0)
-                       return -2;
-               ac += (p - lookup_table) << bits;
+                       return -1;
+               ac = (ac << 6) | (p - base64url_table);
                bits += 6;
                if (bits >= 8) {
-                       *cp++ = ac & 0xff;
-                       ac >>= 8;
                        bits -= 8;
+                       *bp++ = (u8)(ac >> bits);
                }
        }
-       if (ac)
+       if (ac & ((1 << bits) - 1))
                return -1;
-       return cp - dst;
+       return bp - dst;
 }
 
 bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
@@ -263,10 +287,8 @@ bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
 int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
                               struct fscrypt_str *crypto_str)
 {
-       const u32 max_encoded_len = BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX);
-       u32 max_presented_len;
-
-       max_presented_len = max(max_encoded_len, max_encrypted_len);
+       u32 max_presented_len = max_t(u32, FSCRYPT_NOKEY_NAME_MAX_ENCODED,
+                                     max_encrypted_len);
 
        crypto_str->name = kmalloc(max_presented_len + 1, GFP_NOFS);
        if (!crypto_str->name)
@@ -342,7 +364,7 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
                     offsetof(struct fscrypt_nokey_name, bytes));
        BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) !=
                     offsetof(struct fscrypt_nokey_name, sha256));
-       BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
+       BUILD_BUG_ON(FSCRYPT_NOKEY_NAME_MAX_ENCODED > NAME_MAX);
 
        nokey_name.dirhash[0] = hash;
        nokey_name.dirhash[1] = minor_hash;
@@ -358,7 +380,8 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
                       nokey_name.sha256);
                size = FSCRYPT_NOKEY_NAME_MAX;
        }
-       oname->len = base64_encode((const u8 *)&nokey_name, size, oname->name);
+       oname->len = fscrypt_base64url_encode((const u8 *)&nokey_name, size,
+                                             oname->name);
        return 0;
 }
 EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
@@ -432,14 +455,15 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
         * user-supplied name
         */
 
-       if (iname->len > BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX))
+       if (iname->len > FSCRYPT_NOKEY_NAME_MAX_ENCODED)
                return -ENOENT;
 
        fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL);
        if (fname->crypto_buf.name == NULL)
                return -ENOMEM;
 
-       ret = base64_decode(iname->name, iname->len, fname->crypto_buf.name);
+       ret = fscrypt_base64url_decode(iname->name, iname->len,
+                                      fname->crypto_buf.name);
        if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) ||
            (ret > offsetof(struct fscrypt_nokey_name, sha256) &&
             ret != FSCRYPT_NOKEY_NAME_MAX)) {
index a73b037..af74599 100644 (file)
@@ -384,3 +384,47 @@ err_kfree:
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(fscrypt_get_symlink);
+
+/**
+ * fscrypt_symlink_getattr() - set the correct st_size for encrypted symlinks
+ * @path: the path for the encrypted symlink being queried
+ * @stat: the struct being filled with the symlink's attributes
+ *
+ * Override st_size of encrypted symlinks to be the length of the decrypted
+ * symlink target (or the no-key encoded symlink target, if the key is
+ * unavailable) rather than the length of the encrypted symlink target.  This is
+ * necessary for st_size to match the symlink target that userspace actually
+ * sees.  POSIX requires this, and some userspace programs depend on it.
+ *
+ * This requires reading the symlink target from disk if needed, setting up the
+ * inode's encryption key if possible, and then decrypting or encoding the
+ * symlink target.  This makes lstat() more heavyweight than is normally the
+ * case.  However, decrypted symlink targets will be cached in ->i_link, so
+ * usually the symlink won't have to be read and decrypted again later if/when
+ * it is actually followed, readlink() is called, or lstat() is called again.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat)
+{
+       struct dentry *dentry = path->dentry;
+       struct inode *inode = d_inode(dentry);
+       const char *link;
+       DEFINE_DELAYED_CALL(done);
+
+       /*
+        * To get the symlink target that userspace will see (whether it's the
+        * decrypted target or the no-key encoded target), we can just get it in
+        * the same way the VFS does during path resolution and readlink().
+        */
+       link = READ_ONCE(inode->i_link);
+       if (!link) {
+               link = inode->i_op->get_link(dentry, inode, &done);
+               if (IS_ERR(link))
+                       return PTR_ERR(link);
+       }
+       stat->size = strlen(link);
+       do_delayed_call(&done);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(fscrypt_symlink_getattr);
index da41f93..4e3e5a2 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -722,7 +722,7 @@ static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_d
                return rc;
 
        id = dax_read_lock();
-       rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
+       rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
        if (rc < 0) {
                dax_read_unlock(id);
                return rc;
@@ -1005,12 +1005,12 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 }
 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 
-static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
+static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
 {
        return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 }
 
-static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
+static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
                         pfn_t *pfnp)
 {
        const sector_t sector = dax_iomap_sector(iomap, pos);
@@ -1066,6 +1066,66 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
        return ret;
 }
 
+#ifdef CONFIG_FS_DAX_PMD
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+               const struct iomap *iomap, void **entry)
+{
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+       unsigned long pmd_addr = vmf->address & PMD_MASK;
+       struct vm_area_struct *vma = vmf->vma;
+       struct inode *inode = mapping->host;
+       pgtable_t pgtable = NULL;
+       struct page *zero_page;
+       spinlock_t *ptl;
+       pmd_t pmd_entry;
+       pfn_t pfn;
+
+       zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
+
+       if (unlikely(!zero_page))
+               goto fallback;
+
+       pfn = page_to_pfn_t(zero_page);
+       *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
+                       DAX_PMD | DAX_ZERO_PAGE, false);
+
+       if (arch_needs_pgtable_deposit()) {
+               pgtable = pte_alloc_one(vma->vm_mm);
+               if (!pgtable)
+                       return VM_FAULT_OOM;
+       }
+
+       ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
+       if (!pmd_none(*(vmf->pmd))) {
+               spin_unlock(ptl);
+               goto fallback;
+       }
+
+       if (pgtable) {
+               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+               mm_inc_nr_ptes(vma->vm_mm);
+       }
+       pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
+       pmd_entry = pmd_mkhuge(pmd_entry);
+       set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
+       spin_unlock(ptl);
+       trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
+       return VM_FAULT_NOPAGE;
+
+fallback:
+       if (pgtable)
+               pte_free(vma->vm_mm, pgtable);
+       trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
+       return VM_FAULT_FALLBACK;
+}
+#else
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+               const struct iomap *iomap, void **entry)
+{
+       return VM_FAULT_FALLBACK;
+}
+#endif /* CONFIG_FS_DAX_PMD */
+
 s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
 {
        sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
@@ -1103,20 +1163,21 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
        return size;
 }
 
-static loff_t
-dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
-               struct iomap *iomap, struct iomap *srcmap)
+static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
+               struct iov_iter *iter)
 {
+       const struct iomap *iomap = &iomi->iomap;
+       loff_t length = iomap_length(iomi);
+       loff_t pos = iomi->pos;
        struct block_device *bdev = iomap->bdev;
        struct dax_device *dax_dev = iomap->dax_dev;
-       struct iov_iter *iter = data;
        loff_t end = pos + length, done = 0;
        ssize_t ret = 0;
        size_t xfer;
        int id;
 
        if (iov_iter_rw(iter) == READ) {
-               end = min(end, i_size_read(inode));
+               end = min(end, i_size_read(iomi->inode));
                if (pos >= end)
                        return 0;
 
@@ -1133,7 +1194,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
         * written by write(2) is visible in mmap.
         */
        if (iomap->flags & IOMAP_F_NEW) {
-               invalidate_inode_pages2_range(inode->i_mapping,
+               invalidate_inode_pages2_range(iomi->inode->i_mapping,
                                              pos >> PAGE_SHIFT,
                                              (end - 1) >> PAGE_SHIFT);
        }
@@ -1209,31 +1270,29 @@ ssize_t
 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops)
 {
-       struct address_space *mapping = iocb->ki_filp->f_mapping;
-       struct inode *inode = mapping->host;
-       loff_t pos = iocb->ki_pos, ret = 0, done = 0;
-       unsigned flags = 0;
+       struct iomap_iter iomi = {
+               .inode          = iocb->ki_filp->f_mapping->host,
+               .pos            = iocb->ki_pos,
+               .len            = iov_iter_count(iter),
+       };
+       loff_t done = 0;
+       int ret;
 
        if (iov_iter_rw(iter) == WRITE) {
-               lockdep_assert_held_write(&inode->i_rwsem);
-               flags |= IOMAP_WRITE;
+               lockdep_assert_held_write(&iomi.inode->i_rwsem);
+               iomi.flags |= IOMAP_WRITE;
        } else {
-               lockdep_assert_held(&inode->i_rwsem);
+               lockdep_assert_held(&iomi.inode->i_rwsem);
        }
 
        if (iocb->ki_flags & IOCB_NOWAIT)
-               flags |= IOMAP_NOWAIT;
+               iomi.flags |= IOMAP_NOWAIT;
 
-       while (iov_iter_count(iter)) {
-               ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
-                               iter, dax_iomap_actor);
-               if (ret <= 0)
-                       break;
-               pos += ret;
-               done += ret;
-       }
+       while ((ret = iomap_iter(&iomi, ops)) > 0)
+               iomi.processed = dax_iomap_iter(&iomi, iter);
 
-       iocb->ki_pos += done;
+       done = iomi.pos - iocb->ki_pos;
+       iocb->ki_pos = iomi.pos;
        return done ? done : ret;
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
@@ -1250,44 +1309,146 @@ static vm_fault_t dax_fault_return(int error)
  * flushed on write-faults (non-cow), but not read-faults.
  */
 static bool dax_fault_is_synchronous(unsigned long flags,
-               struct vm_area_struct *vma, struct iomap *iomap)
+               struct vm_area_struct *vma, const struct iomap *iomap)
 {
        return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
                && (iomap->flags & IOMAP_F_DIRTY);
 }
 
+/*
+ * When handling a synchronous page fault and the inode need a fsync, we can
+ * insert the PTE/PMD into page tables only after that fsync happened. Skip
+ * insertion for now and return the pfn so that caller can insert it after the
+ * fsync is done.
+ */
+static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
+{
+       if (WARN_ON_ONCE(!pfnp))
+               return VM_FAULT_SIGBUS;
+       *pfnp = pfn;
+       return VM_FAULT_NEEDDSYNC;
+}
+
+static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
+               const struct iomap_iter *iter)
+{
+       sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
+       unsigned long vaddr = vmf->address;
+       vm_fault_t ret;
+       int error = 0;
+
+       switch (iter->iomap.type) {
+       case IOMAP_HOLE:
+       case IOMAP_UNWRITTEN:
+               clear_user_highpage(vmf->cow_page, vaddr);
+               break;
+       case IOMAP_MAPPED:
+               error = copy_cow_page_dax(iter->iomap.bdev, iter->iomap.dax_dev,
+                                         sector, vmf->cow_page, vaddr);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               error = -EIO;
+               break;
+       }
+
+       if (error)
+               return dax_fault_return(error);
+
+       __SetPageUptodate(vmf->cow_page);
+       ret = finish_fault(vmf);
+       if (!ret)
+               return VM_FAULT_DONE_COW;
+       return ret;
+}
+
+/**
+ * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
+ * @vmf:       vm fault instance
+ * @iter:      iomap iter
+ * @pfnp:      pfn to be returned
+ * @xas:       the dax mapping tree of a file
+ * @entry:     an unlocked dax entry to be inserted
+ * @pmd:       distinguish whether it is a pmd fault
+ */
+static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
+               const struct iomap_iter *iter, pfn_t *pfnp,
+               struct xa_state *xas, void **entry, bool pmd)
+{
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+       const struct iomap *iomap = &iter->iomap;
+       size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
+       loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
+       bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
+       unsigned long entry_flags = pmd ? DAX_PMD : 0;
+       int err = 0;
+       pfn_t pfn;
+
+       if (!pmd && vmf->cow_page)
+               return dax_fault_cow_page(vmf, iter);
+
+       /* if we are reading UNWRITTEN and HOLE, return a hole. */
+       if (!write &&
+           (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
+               if (!pmd)
+                       return dax_load_hole(xas, mapping, entry, vmf);
+               return dax_pmd_load_hole(xas, vmf, iomap, entry);
+       }
+
+       if (iomap->type != IOMAP_MAPPED) {
+               WARN_ON_ONCE(1);
+               return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
+       }
+
+       err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
+       if (err)
+               return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
+
+       *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
+                                 write && !sync);
+
+       if (sync)
+               return dax_fault_synchronous_pfnp(pfnp, pfn);
+
+       /* insert PMD pfn */
+       if (pmd)
+               return vmf_insert_pfn_pmd(vmf, pfn, write);
+
+       /* insert PTE pfn */
+       if (write)
+               return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
+       return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+}
+
 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
                               int *iomap_errp, const struct iomap_ops *ops)
 {
-       struct vm_area_struct *vma = vmf->vma;
-       struct address_space *mapping = vma->vm_file->f_mapping;
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
-       struct inode *inode = mapping->host;
-       unsigned long vaddr = vmf->address;
-       loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
-       struct iomap iomap = { .type = IOMAP_HOLE };
-       struct iomap srcmap = { .type = IOMAP_HOLE };
-       unsigned flags = IOMAP_FAULT;
-       int error, major = 0;
-       bool write = vmf->flags & FAULT_FLAG_WRITE;
-       bool sync;
+       struct iomap_iter iter = {
+               .inode          = mapping->host,
+               .pos            = (loff_t)vmf->pgoff << PAGE_SHIFT,
+               .len            = PAGE_SIZE,
+               .flags          = IOMAP_FAULT,
+       };
        vm_fault_t ret = 0;
        void *entry;
-       pfn_t pfn;
+       int error;
 
-       trace_dax_pte_fault(inode, vmf, ret);
+       trace_dax_pte_fault(iter.inode, vmf, ret);
        /*
         * Check whether offset isn't beyond end of file now. Caller is supposed
         * to hold locks serializing us with truncate / punch hole so this is
         * a reliable test.
         */
-       if (pos >= i_size_read(inode)) {
+       if (iter.pos >= i_size_read(iter.inode)) {
                ret = VM_FAULT_SIGBUS;
                goto out;
        }
 
-       if (write && !vmf->cow_page)
-               flags |= IOMAP_WRITE;
+       if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
+               iter.flags |= IOMAP_WRITE;
 
        entry = grab_mapping_entry(&xas, mapping, 0);
        if (xa_is_internal(entry)) {
@@ -1306,234 +1467,103 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
                goto unlock_entry;
        }
 
-       /*
-        * Note that we don't bother to use iomap_apply here: DAX required
-        * the file system block size to be equal the page size, which means
-        * that we never have to deal with more than a single extent here.
-        */
-       error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
-       if (iomap_errp)
-               *iomap_errp = error;
-       if (error) {
-               ret = dax_fault_return(error);
-               goto unlock_entry;
-       }
-       if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
-               error = -EIO;   /* fs corruption? */
-               goto error_finish_iomap;
-       }
-
-       if (vmf->cow_page) {
-               sector_t sector = dax_iomap_sector(&iomap, pos);
-
-               switch (iomap.type) {
-               case IOMAP_HOLE:
-               case IOMAP_UNWRITTEN:
-                       clear_user_highpage(vmf->cow_page, vaddr);
-                       break;
-               case IOMAP_MAPPED:
-                       error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
-                                                 sector, vmf->cow_page, vaddr);
-                       break;
-               default:
-                       WARN_ON_ONCE(1);
-                       error = -EIO;
-                       break;
+       while ((error = iomap_iter(&iter, ops)) > 0) {
+               if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
+                       iter.processed = -EIO;  /* fs corruption? */
+                       continue;
                }
 
-               if (error)
-                       goto error_finish_iomap;
-
-               __SetPageUptodate(vmf->cow_page);
-               ret = finish_fault(vmf);
-               if (!ret)
-                       ret = VM_FAULT_DONE_COW;
-               goto finish_iomap;
-       }
-
-       sync = dax_fault_is_synchronous(flags, vma, &iomap);
-
-       switch (iomap.type) {
-       case IOMAP_MAPPED:
-               if (iomap.flags & IOMAP_F_NEW) {
+               ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
+               if (ret != VM_FAULT_SIGBUS &&
+                   (iter.iomap.flags & IOMAP_F_NEW)) {
                        count_vm_event(PGMAJFAULT);
-                       count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
-                       major = VM_FAULT_MAJOR;
+                       count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
+                       ret |= VM_FAULT_MAJOR;
                }
-               error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
-               if (error < 0)
-                       goto error_finish_iomap;
-
-               entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
-                                                0, write && !sync);
 
-               /*
-                * If we are doing synchronous page fault and inode needs fsync,
-                * we can insert PTE into page tables only after that happens.
-                * Skip insertion for now and return the pfn so that caller can
-                * insert it after fsync is done.
-                */
-               if (sync) {
-                       if (WARN_ON_ONCE(!pfnp)) {
-                               error = -EIO;
-                               goto error_finish_iomap;
-                       }
-                       *pfnp = pfn;
-                       ret = VM_FAULT_NEEDDSYNC | major;
-                       goto finish_iomap;
-               }
-               trace_dax_insert_mapping(inode, vmf, entry);
-               if (write)
-                       ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
-               else
-                       ret = vmf_insert_mixed(vma, vaddr, pfn);
-
-               goto finish_iomap;
-       case IOMAP_UNWRITTEN:
-       case IOMAP_HOLE:
-               if (!write) {
-                       ret = dax_load_hole(&xas, mapping, &entry, vmf);
-                       goto finish_iomap;
-               }
-               fallthrough;
-       default:
-               WARN_ON_ONCE(1);
-               error = -EIO;
-               break;
+               if (!(ret & VM_FAULT_ERROR))
+                       iter.processed = PAGE_SIZE;
        }
 
- error_finish_iomap:
-       ret = dax_fault_return(error);
- finish_iomap:
-       if (ops->iomap_end) {
-               int copied = PAGE_SIZE;
+       if (iomap_errp)
+               *iomap_errp = error;
+       if (!ret && error)
+               ret = dax_fault_return(error);
 
-               if (ret & VM_FAULT_ERROR)
-                       copied = 0;
-               /*
-                * The fault is done by now and there's no way back (other
-                * thread may be already happily using PTE we have installed).
-                * Just ignore error from ->iomap_end since we cannot do much
-                * with it.
-                */
-               ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
-       }
- unlock_entry:
+unlock_entry:
        dax_unlock_entry(&xas, entry);
- out:
-       trace_dax_pte_fault_done(inode, vmf, ret);
-       return ret | major;
+out:
+       trace_dax_pte_fault_done(iter.inode, vmf, ret);
+       return ret;
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
-               struct iomap *iomap, void **entry)
+static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
+               pgoff_t max_pgoff)
 {
-       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        unsigned long pmd_addr = vmf->address & PMD_MASK;
-       struct vm_area_struct *vma = vmf->vma;
-       struct inode *inode = mapping->host;
-       pgtable_t pgtable = NULL;
-       struct page *zero_page;
-       spinlock_t *ptl;
-       pmd_t pmd_entry;
-       pfn_t pfn;
-
-       zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
-
-       if (unlikely(!zero_page))
-               goto fallback;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
 
-       pfn = page_to_pfn_t(zero_page);
-       *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
-                       DAX_PMD | DAX_ZERO_PAGE, false);
+       /*
+        * Make sure that the faulting address's PMD offset (color) matches
+        * the PMD offset from the start of the file.  This is necessary so
+        * that a PMD range in the page table overlaps exactly with a PMD
+        * range in the page cache.
+        */
+       if ((vmf->pgoff & PG_PMD_COLOUR) !=
+           ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
+               return true;
 
-       if (arch_needs_pgtable_deposit()) {
-               pgtable = pte_alloc_one(vma->vm_mm);
-               if (!pgtable)
-                       return VM_FAULT_OOM;
-       }
+       /* Fall back to PTEs if we're going to COW */
+       if (write && !(vmf->vma->vm_flags & VM_SHARED))
+               return true;
 
-       ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
-       if (!pmd_none(*(vmf->pmd))) {
-               spin_unlock(ptl);
-               goto fallback;
-       }
+       /* If the PMD would extend outside the VMA */
+       if (pmd_addr < vmf->vma->vm_start)
+               return true;
+       if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+               return true;
 
-       if (pgtable) {
-               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
-               mm_inc_nr_ptes(vma->vm_mm);
-       }
-       pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
-       pmd_entry = pmd_mkhuge(pmd_entry);
-       set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
-       spin_unlock(ptl);
-       trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
-       return VM_FAULT_NOPAGE;
+       /* If the PMD would extend beyond the file size */
+       if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
+               return true;
 
-fallback:
-       if (pgtable)
-               pte_free(vma->vm_mm, pgtable);
-       trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
-       return VM_FAULT_FALLBACK;
+       return false;
 }
 
 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
                               const struct iomap_ops *ops)
 {
-       struct vm_area_struct *vma = vmf->vma;
-       struct address_space *mapping = vma->vm_file->f_mapping;
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
-       unsigned long pmd_addr = vmf->address & PMD_MASK;
-       bool write = vmf->flags & FAULT_FLAG_WRITE;
-       bool sync;
-       unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
-       struct inode *inode = mapping->host;
-       vm_fault_t result = VM_FAULT_FALLBACK;
-       struct iomap iomap = { .type = IOMAP_HOLE };
-       struct iomap srcmap = { .type = IOMAP_HOLE };
+       struct iomap_iter iter = {
+               .inode          = mapping->host,
+               .len            = PMD_SIZE,
+               .flags          = IOMAP_FAULT,
+       };
+       vm_fault_t ret = VM_FAULT_FALLBACK;
        pgoff_t max_pgoff;
        void *entry;
-       loff_t pos;
        int error;
-       pfn_t pfn;
+
+       if (vmf->flags & FAULT_FLAG_WRITE)
+               iter.flags |= IOMAP_WRITE;
 
        /*
         * Check whether offset isn't beyond end of file now. Caller is
         * supposed to hold locks serializing us with truncate / punch hole so
         * this is a reliable test.
         */
-       max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
-
-       trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
-
-       /*
-        * Make sure that the faulting address's PMD offset (color) matches
-        * the PMD offset from the start of the file.  This is necessary so
-        * that a PMD range in the page table overlaps exactly with a PMD
-        * range in the page cache.
-        */
-       if ((vmf->pgoff & PG_PMD_COLOUR) !=
-           ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
-               goto fallback;
+       max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
 
-       /* Fall back to PTEs if we're going to COW */
-       if (write && !(vma->vm_flags & VM_SHARED))
-               goto fallback;
-
-       /* If the PMD would extend outside the VMA */
-       if (pmd_addr < vma->vm_start)
-               goto fallback;
-       if ((pmd_addr + PMD_SIZE) > vma->vm_end)
-               goto fallback;
+       trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
 
        if (xas.xa_index >= max_pgoff) {
-               result = VM_FAULT_SIGBUS;
+               ret = VM_FAULT_SIGBUS;
                goto out;
        }
 
-       /* If the PMD would extend beyond the file size */
-       if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
+       if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
                goto fallback;
 
        /*
@@ -1544,7 +1574,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
         */
        entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
        if (xa_is_internal(entry)) {
-               result = xa_to_internal(entry);
+               ret = xa_to_internal(entry);
                goto fallback;
        }
 
@@ -1556,88 +1586,30 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
         */
        if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
                        !pmd_devmap(*vmf->pmd)) {
-               result = 0;
+               ret = 0;
                goto unlock_entry;
        }
 
-       /*
-        * Note that we don't use iomap_apply here.  We aren't doing I/O, only
-        * setting up a mapping, so really we're using iomap_begin() as a way
-        * to look up our filesystem block.
-        */
-       pos = (loff_t)xas.xa_index << PAGE_SHIFT;
-       error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
-                       &srcmap);
-       if (error)
-               goto unlock_entry;
-
-       if (iomap.offset + iomap.length < pos + PMD_SIZE)
-               goto finish_iomap;
-
-       sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
-
-       switch (iomap.type) {
-       case IOMAP_MAPPED:
-               error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
-               if (error < 0)
-                       goto finish_iomap;
-
-               entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
-                                               DAX_PMD, write && !sync);
-
-               /*
-                * If we are doing synchronous page fault and inode needs fsync,
-                * we can insert PMD into page tables only after that happens.
-                * Skip insertion for now and return the pfn so that caller can
-                * insert it after fsync is done.
-                */
-               if (sync) {
-                       if (WARN_ON_ONCE(!pfnp))
-                               goto finish_iomap;
-                       *pfnp = pfn;
-                       result = VM_FAULT_NEEDDSYNC;
-                       goto finish_iomap;
-               }
+       iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
+       while ((error = iomap_iter(&iter, ops)) > 0) {
+               if (iomap_length(&iter) < PMD_SIZE)
+                       continue; /* actually breaks out of the loop */
 
-               trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
-               result = vmf_insert_pfn_pmd(vmf, pfn, write);
-               break;
-       case IOMAP_UNWRITTEN:
-       case IOMAP_HOLE:
-               if (WARN_ON_ONCE(write))
-                       break;
-               result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
-               break;
-       default:
-               WARN_ON_ONCE(1);
-               break;
+               ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
+               if (ret != VM_FAULT_FALLBACK)
+                       iter.processed = PMD_SIZE;
        }
 
- finish_iomap:
-       if (ops->iomap_end) {
-               int copied = PMD_SIZE;
-
-               if (result == VM_FAULT_FALLBACK)
-                       copied = 0;
-               /*
-                * The fault is done by now and there's no way back (other
-                * thread may be already happily using PMD we have installed).
-                * Just ignore error from ->iomap_end since we cannot do much
-                * with it.
-                */
-               ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
-                               &iomap);
-       }
- unlock_entry:
+unlock_entry:
        dax_unlock_entry(&xas, entry);
- fallback:
-       if (result == VM_FAULT_FALLBACK) {
-               split_huge_pmd(vma, vmf->pmd, vmf->address);
+fallback:
+       if (ret == VM_FAULT_FALLBACK) {
+               split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
                count_vm_event(THP_FAULT_FALLBACK);
        }
 out:
-       trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
-       return result;
+       trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
+       return ret;
 }
 #else
 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
index e265b6d..3627dd7 100644 (file)
@@ -25,8 +25,6 @@
 #include <linux/idr.h>
 #include <linux/uio.h>
 
-DEFINE_PER_CPU(int, eventfd_wake_count);
-
 static DEFINE_IDA(eventfd_ida);
 
 struct eventfd_ctx {
@@ -67,21 +65,21 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
         * Deadlock or stack overflow issues can happen if we recurse here
         * through waitqueue wakeup handlers. If the caller users potentially
         * nested waitqueues with custom wakeup handlers, then it should
-        * check eventfd_signal_count() before calling this function. If
-        * it returns true, the eventfd_signal() call should be deferred to a
+        * check eventfd_signal_allowed() before calling this function. If
+        * it returns false, the eventfd_signal() call should be deferred to a
         * safe context.
         */
-       if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+       if (WARN_ON_ONCE(current->in_eventfd_signal))
                return 0;
 
        spin_lock_irqsave(&ctx->wqh.lock, flags);
-       this_cpu_inc(eventfd_wake_count);
+       current->in_eventfd_signal = 1;
        if (ULLONG_MAX - ctx->count < n)
                n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, EPOLLIN);
-       this_cpu_dec(eventfd_wake_count);
+       current->in_eventfd_signal = 0;
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return n;
index 38f6345..3b78b22 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -2070,10 +2070,8 @@ SYSCALL_DEFINE5(execveat,
                const char __user *const __user *, envp,
                int, flags)
 {
-       int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
-
        return do_execveat(fd,
-                          getname_flags(filename, lookup_flags, NULL),
+                          getname_uflags(filename, flags),
                           argv, envp, flags);
 }
 
@@ -2091,10 +2089,8 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
                       const compat_uptr_t __user *, envp,
                       int,  flags)
 {
-       int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
-
        return compat_do_execveat(fd,
-                                 getname_flags(filename, lookup_flags, NULL),
+                                 getname_uflags(filename, flags),
                                  argv, envp, flags);
 }
 #endif
index 54eec91..1248ff4 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config EXT2_FS
        tristate "Second extended fs support"
+       select FS_IOMAP
        help
          Ext2 is a standard Linux file system for hard disks.
 
index e512630..3be9dd6 100644 (file)
@@ -667,9 +667,6 @@ struct ext2_inode_info {
        struct rw_semaphore xattr_sem;
 #endif
        rwlock_t i_meta_lock;
-#ifdef CONFIG_FS_DAX
-       struct rw_semaphore dax_sem;
-#endif
 
        /*
         * truncate_mutex is for serialising ext2_truncate() against
@@ -685,14 +682,6 @@ struct ext2_inode_info {
 #endif
 };
 
-#ifdef CONFIG_FS_DAX
-#define dax_sem_down_write(ext2_inode) down_write(&(ext2_inode)->dax_sem)
-#define dax_sem_up_write(ext2_inode)   up_write(&(ext2_inode)->dax_sem)
-#else
-#define dax_sem_down_write(ext2_inode)
-#define dax_sem_up_write(ext2_inode)
-#endif
-
 /*
  * Inode dynamic state flags
  */
index f98466a..eb97aa3 100644 (file)
@@ -81,7 +81,7 @@ out_unlock:
  *
  * mmap_lock (MM)
  *   sb_start_pagefault (vfs, freeze)
- *     ext2_inode_info->dax_sem
+ *     address_space->invalidate_lock
  *       address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
  *         ext2_inode_info->truncate_mutex
  *
@@ -91,7 +91,6 @@ out_unlock:
 static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
 {
        struct inode *inode = file_inode(vmf->vma->vm_file);
-       struct ext2_inode_info *ei = EXT2_I(inode);
        vm_fault_t ret;
        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
                (vmf->vma->vm_flags & VM_SHARED);
@@ -100,11 +99,11 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
                sb_start_pagefault(inode->i_sb);
                file_update_time(vmf->vma->vm_file);
        }
-       down_read(&ei->dax_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
 
        ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
 
-       up_read(&ei->dax_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
        if (write)
                sb_end_pagefault(inode->i_sb);
        return ret;
index dadb121..333fa62 100644 (file)
@@ -799,7 +799,6 @@ int ext2_get_block(struct inode *inode, sector_t iblock,
 
 }
 
-#ifdef CONFIG_FS_DAX
 static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
                unsigned flags, struct iomap *iomap, struct iomap *srcmap)
 {
@@ -852,16 +851,18 @@ const struct iomap_ops ext2_iomap_ops = {
        .iomap_begin            = ext2_iomap_begin,
        .iomap_end              = ext2_iomap_end,
 };
-#else
-/* Define empty ops for !CONFIG_FS_DAX case to avoid ugly ifdefs */
-const struct iomap_ops ext2_iomap_ops;
-#endif /* CONFIG_FS_DAX */
 
 int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len,
-                                   ext2_get_block);
+       int ret;
+
+       inode_lock(inode);
+       len = min_t(u64, len, i_size_read(inode));
+       ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops);
+       inode_unlock(inode);
+
+       return ret;
 }
 
 static int ext2_writepage(struct page *page, struct writeback_control *wbc)
@@ -1177,7 +1178,7 @@ static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int de
                ext2_free_data(inode, p, q);
 }
 
-/* dax_sem must be held when calling this function */
+/* mapping->invalidate_lock must be held when calling this function */
 static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
 {
        __le32 *i_data = EXT2_I(inode)->i_data;
@@ -1194,7 +1195,7 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
        iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
 
 #ifdef CONFIG_FS_DAX
-       WARN_ON(!rwsem_is_locked(&ei->dax_sem));
+       WARN_ON(!rwsem_is_locked(&inode->i_mapping->invalidate_lock));
 #endif
 
        n = ext2_block_to_path(inode, iblock, offsets, NULL);
@@ -1276,9 +1277,9 @@ static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
        if (ext2_inode_is_fast_symlink(inode))
                return;
 
-       dax_sem_down_write(EXT2_I(inode));
+       filemap_invalidate_lock(inode->i_mapping);
        __ext2_truncate_blocks(inode, offset);
-       dax_sem_up_write(EXT2_I(inode));
+       filemap_invalidate_unlock(inode->i_mapping);
 }
 
 static int ext2_setsize(struct inode *inode, loff_t newsize)
@@ -1308,10 +1309,10 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
        if (error)
                return error;
 
-       dax_sem_down_write(EXT2_I(inode));
+       filemap_invalidate_lock(inode->i_mapping);
        truncate_setsize(inode, newsize);
        __ext2_truncate_blocks(inode, newsize);
-       dax_sem_up_write(EXT2_I(inode));
+       filemap_invalidate_unlock(inode->i_mapping);
 
        inode->i_mtime = inode->i_ctime = current_time(inode);
        if (inode_needs_sync(inode)) {
index 21e09fb..987bcf3 100644 (file)
@@ -206,9 +206,6 @@ static void init_once(void *foo)
        init_rwsem(&ei->xattr_sem);
 #endif
        mutex_init(&ei->truncate_mutex);
-#ifdef CONFIG_FS_DAX
-       init_rwsem(&ei->dax_sem);
-#endif
        inode_init_once(&ei->vfs_inode);
 }
 
index 3c51e24..7ebaf66 100644 (file)
@@ -1086,15 +1086,6 @@ struct ext4_inode_info {
         * by other means, so we have i_data_sem.
         */
        struct rw_semaphore i_data_sem;
-       /*
-        * i_mmap_sem is for serializing page faults with truncate / punch hole
-        * operations. We have to make sure that new page cannot be faulted in
-        * a section of the inode that is being punched. We cannot easily use
-        * i_data_sem for this since we need protection for the whole punch
-        * operation and i_data_sem ranks below transaction start so we have
-        * to occasionally drop it.
-        */
-       struct rw_semaphore i_mmap_sem;
        struct inode vfs_inode;
        struct jbd2_inode *jinode;
 
@@ -2972,7 +2963,6 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
 extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
                             loff_t lstart, loff_t lend);
 extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
-extern vm_fault_t ext4_filemap_fault(struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern int ext4_get_projid(struct inode *inode, kprojid_t *projid);
 extern void ext4_da_release_space(struct inode *inode, int to_free);
index b96ecba..b60f015 100644 (file)
@@ -244,9 +244,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
  * "bh" may be NULL: a metadata block may have been freed from memory
  * but there may still be a record of it in the journal, and that record
  * still needs to be revoked.
- *
- * If the handle isn't valid we're not journaling, but we still need to
- * call into ext4_journal_revoke() to put the buffer head.
  */
 int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
                  int is_metadata, struct inode *inode,
index 92ad64b..c33e0a2 100644 (file)
@@ -4474,6 +4474,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                            loff_t len, int mode)
 {
        struct inode *inode = file_inode(file);
+       struct address_space *mapping = file->f_mapping;
        handle_t *handle = NULL;
        unsigned int max_blocks;
        loff_t new_size = 0;
@@ -4560,17 +4561,17 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                 * Prevent page faults from reinstantiating pages we have
                 * released from page cache.
                 */
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
 
                ret = ext4_break_layouts(inode);
                if (ret) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        goto out_mutex;
                }
 
                ret = ext4_update_disksize_before_punch(inode, offset, len);
                if (ret) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        goto out_mutex;
                }
                /* Now release the pages and zero block aligned part of pages */
@@ -4579,7 +4580,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
                ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
                                             flags);
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                if (ret)
                        goto out_mutex;
        }
@@ -5221,6 +5222,7 @@ out:
 static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct super_block *sb = inode->i_sb;
+       struct address_space *mapping = inode->i_mapping;
        ext4_lblk_t punch_start, punch_stop;
        handle_t *handle;
        unsigned int credits;
@@ -5274,7 +5276,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -5289,15 +5291,15 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
         * Write tail of the last page before removed range since it will get
         * removed from the page cache below.
         */
-       ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
+       ret = filemap_write_and_wait_range(mapping, ioffset, offset);
        if (ret)
                goto out_mmap;
        /*
         * Write data that will be shifted to preserve them when discarding
         * page cache below. We are also protected from pages becoming dirty
-        * by i_mmap_sem.
+        * by i_rwsem and invalidate_lock.
         */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
+       ret = filemap_write_and_wait_range(mapping, offset + len,
                                           LLONG_MAX);
        if (ret)
                goto out_mmap;
@@ -5350,7 +5352,7 @@ out_stop:
        ext4_journal_stop(handle);
        ext4_fc_stop_ineligible(sb);
 out_mmap:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
@@ -5367,6 +5369,7 @@ out_mutex:
 static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct super_block *sb = inode->i_sb;
+       struct address_space *mapping = inode->i_mapping;
        handle_t *handle;
        struct ext4_ext_path *path;
        struct ext4_extent *extent;
@@ -5425,7 +5428,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -5526,7 +5529,7 @@ out_stop:
        ext4_journal_stop(handle);
        ext4_fc_stop_ineligible(sb);
 out_mmap:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
index 816dedc..d3b4ed9 100644 (file)
@@ -704,22 +704,23 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
         */
        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
                (vmf->vma->vm_flags & VM_SHARED);
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        pfn_t pfn;
 
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vmf->vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock_shared(mapping);
 retry:
                handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
                                               EXT4_DATA_TRANS_BLOCKS(sb));
                if (IS_ERR(handle)) {
-                       up_read(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock_shared(mapping);
                        sb_end_pagefault(sb);
                        return VM_FAULT_SIGBUS;
                }
        } else {
-               down_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock_shared(mapping);
        }
        result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
        if (write) {
@@ -731,10 +732,10 @@ retry:
                /* Handling synchronous page fault? */
                if (result & VM_FAULT_NEEDDSYNC)
                        result = dax_finish_sync_fault(vmf, pe_size, pfn);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(mapping);
                sb_end_pagefault(sb);
        } else {
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(mapping);
        }
 
        return result;
@@ -756,7 +757,7 @@ static const struct vm_operations_struct ext4_dax_vm_ops = {
 #endif
 
 static const struct vm_operations_struct ext4_file_vm_ops = {
-       .fault          = ext4_filemap_fault,
+       .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
        .page_mkwrite   = ext4_page_mkwrite,
 };
index d8de607..325c038 100644 (file)
@@ -3950,20 +3950,19 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
        return ret;
 }
 
-static void ext4_wait_dax_page(struct ext4_inode_info *ei)
+static void ext4_wait_dax_page(struct inode *inode)
 {
-       up_write(&ei->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        schedule();
-       down_write(&ei->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 }
 
 int ext4_break_layouts(struct inode *inode)
 {
-       struct ext4_inode_info *ei = EXT4_I(inode);
        struct page *page;
        int error;
 
-       if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
+       if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
                return -EINVAL;
 
        do {
@@ -3974,7 +3973,7 @@ int ext4_break_layouts(struct inode *inode)
                error = ___wait_var_event(&page->_refcount,
                                atomic_read(&page->_refcount) == 1,
                                TASK_INTERRUPTIBLE, 0, 0,
-                               ext4_wait_dax_page(ei));
+                               ext4_wait_dax_page(inode));
        } while (error == 0);
 
        return error;
@@ -4005,9 +4004,9 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
        ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
        if (ext4_has_inline_data(inode)) {
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
                ret = ext4_convert_inline_data(inode);
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                if (ret)
                        return ret;
        }
@@ -4058,7 +4057,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -4131,7 +4130,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 out_stop:
        ext4_journal_stop(handle);
 out_dio:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
@@ -5426,11 +5425,11 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                        inode_dio_wait(inode);
                }
 
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
 
                rc = ext4_break_layouts(inode);
                if (rc) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        goto err_out;
                }
 
@@ -5506,7 +5505,7 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                                error = rc;
                }
 out_mmap_sem:
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
        }
 
        if (!error) {
@@ -5983,10 +5982,10 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
         * data (and journalled aops don't know how to handle these cases).
         */
        if (val) {
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
                err = filemap_write_and_wait(inode->i_mapping);
                if (err < 0) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        return err;
                }
        }
@@ -6019,7 +6018,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
        percpu_up_write(&sbi->s_writepages_rwsem);
 
        if (val)
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        /* Finally we can mark the inode as dirty. */
 
@@ -6063,7 +6062,7 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
        sb_start_pagefault(inode->i_sb);
        file_update_time(vma->vm_file);
 
-       down_read(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(mapping);
 
        err = ext4_convert_inline_data(inode);
        if (err)
@@ -6176,7 +6175,7 @@ retry_alloc:
 out_ret:
        ret = block_page_mkwrite_return(err);
 out:
-       up_read(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(mapping);
        sb_end_pagefault(inode->i_sb);
        return ret;
 out_error:
@@ -6184,15 +6183,3 @@ out_error:
        ext4_journal_stop(handle);
        goto out;
 }
-
-vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
-{
-       struct inode *inode = file_inode(vmf->vma->vm_file);
-       vm_fault_t ret;
-
-       down_read(&EXT4_I(inode)->i_mmap_sem);
-       ret = filemap_fault(vmf);
-       up_read(&EXT4_I(inode)->i_mmap_sem);
-
-       return ret;
-}
index 6eed617..4fb5fe0 100644 (file)
@@ -148,7 +148,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
                goto journal_err_out;
        }
 
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        err = filemap_write_and_wait(inode->i_mapping);
        if (err)
                goto err_out;
@@ -256,7 +256,7 @@ err_out1:
        ext4_double_up_write_data_sem(inode, inode_bl);
 
 err_out:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 journal_err_out:
        unlock_two_nondirectories(inode, inode_bl);
        iput(inode_bl);
index bc364c1..cebea42 100644 (file)
@@ -138,7 +138,7 @@ static int kmmpd(void *data)
        unsigned mmp_check_interval;
        unsigned long last_update_time;
        unsigned long diff;
-       int retval;
+       int retval = 0;
 
        mmp_block = le64_to_cpu(es->s_mmp_block);
        mmp = (struct mmp_struct *)(bh->b_data);
index 5fd56f6..f3bbcd4 100644 (file)
@@ -2517,7 +2517,7 @@ again:
                                goto journal_error;
                        err = ext4_handle_dirty_dx_node(handle, dir,
                                                        frame->bh);
-                       if (err)
+                       if (restart || err)
                                goto journal_error;
                } else {
                        struct dx_root *dxroot;
index dfa09a2..d6df62f 100644 (file)
@@ -90,12 +90,9 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
 /*
  * Lock ordering
  *
- * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
- * i_mmap_rwsem (inode->i_mmap_rwsem)!
- *
  * page fault path:
- * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
- *   page lock -> i_data_sem (rw)
+ * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
+ *   -> page lock -> i_data_sem (rw)
  *
  * buffered write path:
  * sb_start_write -> i_mutex -> mmap_lock
@@ -103,8 +100,9 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
  *   i_data_sem (rw)
  *
  * truncate:
- * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
- * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
+ * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
+ *   page lock
+ * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
  *   i_data_sem (rw)
  *
  * direct IO:
@@ -1360,7 +1358,6 @@ static void init_once(void *foo)
        INIT_LIST_HEAD(&ei->i_orphan);
        init_rwsem(&ei->xattr_sem);
        init_rwsem(&ei->i_data_sem);
-       init_rwsem(&ei->i_mmap_sem);
        inode_init_once(&ei->vfs_inode);
        ext4_fc_init_inode(&ei->vfs_inode);
 }
index dd05af9..6910974 100644 (file)
@@ -52,10 +52,20 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
        return paddr;
 }
 
+static int ext4_encrypted_symlink_getattr(struct user_namespace *mnt_userns,
+                                         const struct path *path,
+                                         struct kstat *stat, u32 request_mask,
+                                         unsigned int query_flags)
+{
+       ext4_getattr(mnt_userns, path, stat, request_mask, query_flags);
+
+       return fscrypt_symlink_getattr(path, stat);
+}
+
 const struct inode_operations ext4_encrypted_symlink_inode_operations = {
        .get_link       = ext4_encrypted_get_link,
        .setattr        = ext4_setattr,
-       .getattr        = ext4_getattr,
+       .getattr        = ext4_encrypted_symlink_getattr,
        .listxattr      = ext4_listxattr,
 };
 
index bcbe366..ce84aa2 100644 (file)
  */
 static inline void ext4_truncate_failed_write(struct inode *inode)
 {
+       struct address_space *mapping = inode->i_mapping;
+
        /*
         * We don't need to call ext4_break_layouts() because the blocks we
         * are truncating were never visible to userspace.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
-       truncate_inode_pages(inode->i_mapping, inode->i_size);
+       filemap_invalidate_lock(mapping);
+       truncate_inode_pages(mapping, inode->i_size);
        ext4_truncate(inode);
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 }
 
 /*
index d2cf48c..eb222b3 100644 (file)
@@ -3187,12 +3187,12 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
        /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
        if (to > i_size && !f2fs_verity_in_progress(inode)) {
                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-               down_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
 
                truncate_pagecache(inode, i_size);
                f2fs_truncate_blocks(inode, i_size, true);
 
-               up_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        }
 }
@@ -3852,7 +3852,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
        int ret = 0;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        set_inode_flag(inode, FI_ALIGNED_WRITE);
 
@@ -3894,7 +3894,7 @@ done:
        clear_inode_flag(inode, FI_DO_DEFRAG);
        clear_inode_flag(inode, FI_ALIGNED_WRITE);
 
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
        return ret;
index ee8eb33..906b2c4 100644 (file)
@@ -754,7 +754,6 @@ struct f2fs_inode_info {
 
        /* avoid racing between foreground op and gc */
        struct rw_semaphore i_gc_rwsem[2];
-       struct rw_semaphore i_mmap_sem;
        struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
 
        int i_extra_isize;              /* size of extra space located in i_addr */
index 6afd456..1ff3337 100644 (file)
@@ -38,10 +38,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
        struct inode *inode = file_inode(vmf->vma->vm_file);
        vm_fault_t ret;
 
-       down_read(&F2FS_I(inode)->i_mmap_sem);
        ret = filemap_fault(vmf);
-       up_read(&F2FS_I(inode)->i_mmap_sem);
-
        if (!ret)
                f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
                                                        F2FS_BLKSIZE);
@@ -101,7 +98,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
        f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
 
        file_update_time(vmf->vma->vm_file);
-       down_read(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        lock_page(page);
        if (unlikely(page->mapping != inode->i_mapping ||
                        page_offset(page) > i_size_read(inode) ||
@@ -159,7 +156,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 
        trace_f2fs_vm_page_mkwrite(page, DATA);
 out_sem:
-       up_read(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
 err:
@@ -940,7 +937,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                }
 
                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-               down_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
 
                truncate_setsize(inode, attr->ia_size);
 
@@ -950,7 +947,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                 * do not trim all blocks after i_size if target size is
                 * larger than i_size.
                 */
-               up_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                if (err)
                        return err;
@@ -1095,7 +1092,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
                        blk_end = (loff_t)pg_end << PAGE_SHIFT;
 
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(mapping);
 
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
@@ -1104,7 +1101,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
                        ret = f2fs_truncate_hole(inode, pg_start, pg_end);
                        f2fs_unlock_op(sbi);
 
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                }
        }
@@ -1339,7 +1336,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
 
        /* avoid gc operation during block exchange */
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        f2fs_lock_op(sbi);
        f2fs_drop_extent_tree(inode);
@@ -1347,7 +1344,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
        ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
        f2fs_unlock_op(sbi);
 
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        return ret;
 }
@@ -1378,13 +1375,13 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                return ret;
 
        /* write out all moved pages, if possible */
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
 
        new_size = i_size_read(inode) - len;
        ret = f2fs_truncate_blocks(inode, new_size, true);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        if (!ret)
                f2fs_i_size_write(inode, new_size);
        return ret;
@@ -1484,7 +1481,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        pgoff_t end;
 
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(mapping);
 
                        truncate_pagecache_range(inode,
                                (loff_t)index << PAGE_SHIFT,
@@ -1496,7 +1493,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
                        if (ret) {
                                f2fs_unlock_op(sbi);
-                               up_write(&F2FS_I(inode)->i_mmap_sem);
+                               filemap_invalidate_unlock(mapping);
                                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                                goto out;
                        }
@@ -1508,7 +1505,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        f2fs_put_dnode(&dn);
 
                        f2fs_unlock_op(sbi);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
                        f2fs_balance_fs(sbi, dn.node_changed);
@@ -1543,6 +1540,7 @@ out:
 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct address_space *mapping = inode->i_mapping;
        pgoff_t nr, pg_start, pg_end, delta, idx;
        loff_t new_size;
        int ret = 0;
@@ -1565,14 +1563,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        f2fs_balance_fs(sbi, true);
 
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
        ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        if (ret)
                return ret;
 
        /* write out all dirty pages from offset */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
        if (ret)
                return ret;
 
@@ -1583,7 +1581,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        /* avoid gc operation during block exchange */
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
        truncate_pagecache(inode, offset);
 
        while (!ret && idx > pg_start) {
@@ -1599,14 +1597,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
                                        idx + delta, nr, false);
                f2fs_unlock_op(sbi);
        }
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
        /* write out all moved pages, if possible */
-       down_write(&F2FS_I(inode)->i_mmap_sem);
-       filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       filemap_invalidate_lock(mapping);
+       filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 
        if (!ret)
                f2fs_i_size_write(inode, new_size);
@@ -3440,7 +3438,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
                goto out;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
@@ -3476,7 +3474,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 out:
        inode_unlock(inode);
 
@@ -3593,7 +3591,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
@@ -3629,7 +3627,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 
        if (ret >= 0) {
                clear_inode_flag(inode, FI_COMPRESS_RELEASED);
@@ -3748,7 +3746,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
                goto err;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = filemap_write_and_wait_range(mapping, range.start,
                        to_end ? LLONG_MAX : end_addr - 1);
@@ -3835,7 +3833,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
                ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
                                prev_block, len, range.flags);
 out:
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 err:
        inode_unlock(inode);
@@ -4313,9 +4311,9 @@ write:
                /* if we couldn't write data, we should deallocate blocks. */
                if (preallocated && i_size_read(inode) < target_size) {
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(inode->i_mapping);
                        f2fs_truncate(inode);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                }
 
index e149c8c..9c528e5 100644 (file)
@@ -1323,9 +1323,19 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
        return target;
 }
 
+static int f2fs_encrypted_symlink_getattr(struct user_namespace *mnt_userns,
+                                         const struct path *path,
+                                         struct kstat *stat, u32 request_mask,
+                                         unsigned int query_flags)
+{
+       f2fs_getattr(mnt_userns, path, stat, request_mask, query_flags);
+
+       return fscrypt_symlink_getattr(path, stat);
+}
+
 const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
        .get_link       = f2fs_encrypted_get_link,
-       .getattr        = f2fs_getattr,
+       .getattr        = f2fs_encrypted_symlink_getattr,
        .setattr        = f2fs_setattr,
        .listxattr      = f2fs_listxattr,
 };
index 8fecd30..ce2ab1b 100644 (file)
@@ -1289,7 +1289,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
        mutex_init(&fi->inmem_lock);
        init_rwsem(&fi->i_gc_rwsem[READ]);
        init_rwsem(&fi->i_gc_rwsem[WRITE]);
-       init_rwsem(&fi->i_mmap_sem);
        init_rwsem(&fi->i_xattr_sem);
 
        /* Will be used by directory only */
index 6642246..daad532 100644 (file)
@@ -378,7 +378,7 @@ out:
                ret = kstrtol(name, 10, &data);
                if (ret)
                        return ret;
-               if (data >= IOPRIO_BE_NR || data < 0)
+               if (data >= IOPRIO_NR_LEVELS || data < 0)
                        return -EINVAL;
 
                cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, data);
index 860e884..978ac67 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/blkdev.h>
 #include <linux/sched/signal.h>
+#include <linux/backing-dev-defs.h>
 #include "fat.h"
 
 struct fatent_operations {
index f946bec..68added 100644 (file)
@@ -150,7 +150,8 @@ void f_delown(struct file *filp)
 pid_t f_getown(struct file *filp)
 {
        pid_t pid = 0;
-       read_lock(&filp->f_owner.lock);
+
+       read_lock_irq(&filp->f_owner.lock);
        rcu_read_lock();
        if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
                pid = pid_vnr(filp->f_owner.pid);
@@ -158,7 +159,7 @@ pid_t f_getown(struct file *filp)
                        pid = -pid;
        }
        rcu_read_unlock();
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
        return pid;
 }
 
@@ -208,7 +209,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
        struct f_owner_ex owner = {};
        int ret = 0;
 
-       read_lock(&filp->f_owner.lock);
+       read_lock_irq(&filp->f_owner.lock);
        rcu_read_lock();
        if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
                owner.pid = pid_vnr(filp->f_owner.pid);
@@ -231,7 +232,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
                ret = -EINVAL;
                break;
        }
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
 
        if (!ret) {
                ret = copy_to_user(owner_p, &owner, sizeof(owner));
@@ -249,10 +250,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
        uid_t src[2];
        int err;
 
-       read_lock(&filp->f_owner.lock);
+       read_lock_irq(&filp->f_owner.lock);
        src[0] = from_kuid(user_ns, filp->f_owner.uid);
        src[1] = from_kuid(user_ns, filp->f_owner.euid);
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
 
        err  = put_user(src[0], &dst[0]);
        err |= put_user(src[1], &dst[1]);
@@ -1003,13 +1004,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
 {
        while (fa) {
                struct fown_struct *fown;
+               unsigned long flags;
 
                if (fa->magic != FASYNC_MAGIC) {
                        printk(KERN_ERR "kill_fasync: bad magic number in "
                               "fasync_struct!\n");
                        return;
                }
-               read_lock(&fa->fa_lock);
+               read_lock_irqsave(&fa->fa_lock, flags);
                if (fa->fa_file) {
                        fown = &fa->fa_file->f_owner;
                        /* Don't send SIGURG to processes which have not set a
@@ -1018,7 +1020,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
                        if (!(sig == SIGURG && fown->signum == 0))
                                send_sigio(fown, fa->fa_fd, band);
                }
-               read_unlock(&fa->fa_lock);
+               read_unlock_irqrestore(&fa->fa_lock, flags);
                fa = rcu_dereference(fa->fa_next);
        }
 }
index 86dc995..d8afa82 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -596,18 +596,32 @@ void fd_install(unsigned int fd, struct file *file)
 
 EXPORT_SYMBOL(fd_install);
 
+/**
+ * pick_file - return file associatd with fd
+ * @files: file struct to retrieve file from
+ * @fd: file descriptor to retrieve file for
+ *
+ * If this functions returns an EINVAL error pointer the fd was beyond the
+ * current maximum number of file descriptors for that fdtable.
+ *
+ * Returns: The file associated with @fd, on error returns an error pointer.
+ */
 static struct file *pick_file(struct files_struct *files, unsigned fd)
 {
-       struct file *file = NULL;
+       struct file *file;
        struct fdtable *fdt;
 
        spin_lock(&files->file_lock);
        fdt = files_fdtable(files);
-       if (fd >= fdt->max_fds)
+       if (fd >= fdt->max_fds) {
+               file = ERR_PTR(-EINVAL);
                goto out_unlock;
+       }
        file = fdt->fd[fd];
-       if (!file)
+       if (!file) {
+               file = ERR_PTR(-EBADF);
                goto out_unlock;
+       }
        rcu_assign_pointer(fdt->fd[fd], NULL);
        __put_unused_fd(files, fd);
 
@@ -622,7 +636,7 @@ int close_fd(unsigned fd)
        struct file *file;
 
        file = pick_file(files, fd);
-       if (!file)
+       if (IS_ERR(file))
                return -EBADF;
 
        return filp_close(file, files);
@@ -663,11 +677,16 @@ static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
                struct file *file;
 
                file = pick_file(cur_fds, fd++);
-               if (!file)
+               if (!IS_ERR(file)) {
+                       /* found a valid file to close */
+                       filp_close(file, cur_fds);
+                       cond_resched();
                        continue;
+               }
 
-               filp_close(file, cur_fds);
-               cond_resched();
+               /* beyond the last fd in that table */
+               if (PTR_ERR(file) == -EINVAL)
+                       return;
        }
 }
 
@@ -682,7 +701,6 @@ static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
  */
 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
 {
-       unsigned int cur_max;
        struct task_struct *me = current;
        struct files_struct *cur_fds = me->files, *fds = NULL;
 
@@ -692,26 +710,26 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
        if (fd > max_fd)
                return -EINVAL;
 
-       rcu_read_lock();
-       cur_max = files_fdtable(cur_fds)->max_fds;
-       rcu_read_unlock();
-
-       /* cap to last valid index into fdtable */
-       cur_max--;
-
        if (flags & CLOSE_RANGE_UNSHARE) {
                int ret;
                unsigned int max_unshare_fds = NR_OPEN_MAX;
 
                /*
-                * If the requested range is greater than the current maximum,
-                * we're closing everything so only copy all file descriptors
-                * beneath the lowest file descriptor.
-                * If the caller requested all fds to be made cloexec copy all
-                * of the file descriptors since they still want to use them.
+                * If the caller requested all fds to be made cloexec we always
+                * copy all of the file descriptors since they still want to
+                * use them.
                 */
-               if (!(flags & CLOSE_RANGE_CLOEXEC) && (max_fd >= cur_max))
-                       max_unshare_fds = fd;
+               if (!(flags & CLOSE_RANGE_CLOEXEC)) {
+                       /*
+                        * If the requested range is greater than the current
+                        * maximum, we're closing everything so only copy all
+                        * file descriptors beneath the lowest file descriptor.
+                        */
+                       rcu_read_lock();
+                       if (max_fd >= last_fd(files_fdtable(cur_fds)))
+                               max_unshare_fds = fd;
+                       rcu_read_unlock();
+               }
 
                ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
                if (ret)
@@ -725,8 +743,6 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
                        swap(cur_fds, fds);
        }
 
-       max_fd = min(max_fd, cur_max);
-
        if (flags & CLOSE_RANGE_CLOEXEC)
                __range_cloexec(cur_fds, fd, max_fd);
        else
index 4c33705..eb57dad 100644 (file)
@@ -2729,23 +2729,6 @@ int write_inode_now(struct inode *inode, int sync)
 }
 EXPORT_SYMBOL(write_inode_now);
 
-/**
- * sync_inode - write an inode and its pages to disk.
- * @inode: the inode to sync
- * @wbc: controls the writeback mode
- *
- * sync_inode() will write an inode and its pages to disk.  It will also
- * correctly update the inode on its superblock's dirty inode lists and will
- * update inode->i_state.
- *
- * The caller must have a ref on the inode.
- */
-int sync_inode(struct inode *inode, struct writeback_control *wbc)
-{
-       return writeback_single_inode(inode, wbc);
-}
-EXPORT_SYMBOL(sync_inode);
-
 /**
  * sync_inode_metadata - write an inode to disk
  * @inode: the inode to sync
@@ -2762,6 +2745,6 @@ int sync_inode_metadata(struct inode *inode, int wait)
                .nr_to_write = 0, /* metadata-only */
        };
 
-       return sync_inode(inode, &wbc);
+       return writeback_single_inode(inode, &wbc);
 }
 EXPORT_SYMBOL(sync_inode_metadata);
index e557237..281d79f 100644 (file)
@@ -444,12 +444,12 @@ static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
        /*
         * Can't do inline reclaim in fault path. We call
         * dax_layout_busy_page() before we free a range. And
-        * fuse_wait_dax_page() drops fi->i_mmap_sem lock and requires it.
-        * In fault path we enter with fi->i_mmap_sem held and can't drop
-        * it. Also in fault path we hold fi->i_mmap_sem shared and not
-        * exclusive, so that creates further issues with fuse_wait_dax_page().
-        * Hence return -EAGAIN and fuse_dax_fault() will wait for a memory
-        * range to become free and retry.
+        * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
+        * In fault path we enter with mapping->invalidate_lock held and can't
+        * drop it. Also in fault path we hold mapping->invalidate_lock shared
+        * and not exclusive, so that creates further issues with
+        * fuse_wait_dax_page().  Hence return -EAGAIN and fuse_dax_fault()
+        * will wait for a memory range to become free and retry.
         */
        if (flags & IOMAP_FAULT) {
                alloc_dmap = alloc_dax_mapping(fcd);
@@ -513,7 +513,7 @@ static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
        down_write(&fi->dax->sem);
        node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
 
-       /* We are holding either inode lock or i_mmap_sem, and that should
+       /* We are holding either inode lock or invalidate_lock, and that should
         * ensure that dmap can't be truncated. We are holding a reference
         * on dmap and that should make sure it can't be reclaimed. So dmap
         * should still be there in tree despite the fact we dropped and
@@ -660,14 +660,12 @@ static const struct iomap_ops fuse_iomap_ops = {
 
 static void fuse_wait_dax_page(struct inode *inode)
 {
-       struct fuse_inode *fi = get_fuse_inode(inode);
-
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        schedule();
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 }
 
-/* Should be called with fi->i_mmap_sem lock held exclusively */
+/* Should be called with mapping->invalidate_lock held exclusively */
 static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
                                    loff_t start, loff_t end)
 {
@@ -813,18 +811,18 @@ retry:
         * we do not want any read/write/mmap to make progress and try
         * to populate page cache or access memory we are trying to free.
         */
-       down_read(&get_fuse_inode(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        ret = dax_iomap_fault(vmf, pe_size, &pfn, &error, &fuse_iomap_ops);
        if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
                error = 0;
                retry = true;
-               up_read(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(inode->i_mapping);
                goto retry;
        }
 
        if (ret & VM_FAULT_NEEDDSYNC)
                ret = dax_finish_sync_fault(vmf, pe_size, pfn);
-       up_read(&get_fuse_inode(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        if (write)
                sb_end_pagefault(sb);
@@ -960,7 +958,7 @@ inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
        int ret;
        struct interval_tree_node *node;
 
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        /* Lookup a dmap and corresponding file offset to reclaim. */
        down_read(&fi->dax->sem);
@@ -1021,7 +1019,7 @@ inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
 out_write_dmap_sem:
        up_write(&fi->dax->sem);
 out_mmap_sem:
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        return dmap;
 }
 
@@ -1050,10 +1048,10 @@ alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
                 * had a reference or some other temporary failure,
                 * Try again. We want to give up inline reclaim only
                 * if there is no range assigned to this node. Otherwise
-                * if a deadlock is possible if we sleep with fi->i_mmap_sem
-                * held and worker to free memory can't make progress due
-                * to unavailability of fi->i_mmap_sem lock. So sleep
-                * only if fi->dax->nr=0
+                * if a deadlock is possible if we sleep with
+                * mapping->invalidate_lock held and worker to free memory
+                * can't make progress due to unavailability of
+                * mapping->invalidate_lock.  So sleep only if fi->dax->nr=0
                 */
                if (retry)
                        continue;
@@ -1061,8 +1059,8 @@ alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
                 * There are no mappings which can be reclaimed. Wait for one.
                 * We are not holding fi->dax->sem. So it is possible
                 * that range gets added now. But as we are not holding
-                * fi->i_mmap_sem, worker should still be able to free up
-                * a range and wake us up.
+                * mapping->invalidate_lock, worker should still be able to
+                * free up a range and wake us up.
                 */
                if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
                        if (wait_event_killable_exclusive(fcd->range_waitq,
@@ -1108,7 +1106,7 @@ static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
 /*
  * Free a range of memory.
  * Locking:
- * 1. Take fi->i_mmap_sem to block dax faults.
+ * 1. Take mapping->invalidate_lock to block dax faults.
  * 2. Take fi->dax->sem to protect interval tree and also to make sure
  *    read/write can not reuse a dmap which we might be freeing.
  */
@@ -1122,7 +1120,7 @@ static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
        loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
        loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
 
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
        if (ret) {
                pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
@@ -1134,7 +1132,7 @@ static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
        ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
        up_write(&fi->dax->sem);
 out_mmap_sem:
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        return ret;
 }
 
@@ -1235,8 +1233,6 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
 static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
 {
        long nr_pages, nr_ranges;
-       void *kaddr;
-       pfn_t pfn;
        struct fuse_dax_mapping *range;
        int ret, id;
        size_t dax_size = -1;
@@ -1248,8 +1244,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
        INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
 
        id = dax_read_lock();
-       nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
-                                    &pfn);
+       nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL,
+                                    NULL);
        dax_read_unlock(id);
        if (nr_pages < 0) {
                pr_debug("dax_direct_access() returned %ld\n", nr_pages);
index eade6f9..d9b977c 100644 (file)
@@ -1556,6 +1556,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        struct fuse_mount *fm = get_fuse_mount(inode);
        struct fuse_conn *fc = fm->fc;
        struct fuse_inode *fi = get_fuse_inode(inode);
+       struct address_space *mapping = inode->i_mapping;
        FUSE_ARGS(args);
        struct fuse_setattr_in inarg;
        struct fuse_attr_out outarg;
@@ -1580,11 +1581,11 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        }
 
        if (FUSE_IS_DAX(inode) && is_truncate) {
-               down_write(&fi->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
                fault_blocked = true;
                err = fuse_dax_break_layouts(inode, 0, 0);
                if (err) {
-                       up_write(&fi->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        return err;
                }
        }
@@ -1694,13 +1695,13 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        if ((is_truncate || !is_wb) &&
            S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
                truncate_pagecache(inode, outarg.attr.size);
-               invalidate_inode_pages2(inode->i_mapping);
+               invalidate_inode_pages2(mapping);
        }
 
        clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 out:
        if (fault_blocked)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
 
        return 0;
 
@@ -1711,7 +1712,7 @@ error:
        clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
        if (fault_blocked)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
        return err;
 }
 
index 97f860c..621a662 100644 (file)
@@ -243,7 +243,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
        }
 
        if (dax_truncate) {
-               down_write(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
                err = fuse_dax_break_layouts(inode, 0, 0);
                if (err)
                        goto out;
@@ -255,7 +255,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
 
 out:
        if (dax_truncate)
-               up_write(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        if (is_wb_truncate | dax_truncate) {
                fuse_release_nowrite(inode);
@@ -2920,7 +2920,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        if (lock_inode) {
                inode_lock(inode);
                if (block_faults) {
-                       down_write(&fi->i_mmap_sem);
+                       filemap_invalidate_lock(inode->i_mapping);
                        err = fuse_dax_break_layouts(inode, 0, 0);
                        if (err)
                                goto out;
@@ -2976,7 +2976,7 @@ out:
                clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
        if (block_faults)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        if (lock_inode)
                inode_unlock(inode);
@@ -3045,7 +3045,7 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
         * modifications.  Yet this does give less guarantees than if the
         * copying was performed with write(2).
         *
-        * To fix this a i_mmap_sem style lock could be used to prevent new
+        * To fix this a mapping->invalidate_lock could be used to prevent new
         * faults while the copy is ongoing.
         */
        err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
index 07829ce..6fb639b 100644 (file)
@@ -149,13 +149,6 @@ struct fuse_inode {
        /** Lock to protect write related fields */
        spinlock_t lock;
 
-       /**
-        * Can't take inode lock in fault path (leads to circular dependency).
-        * Introduce another semaphore which can be taken in fault path and
-        * then other filesystem paths can take this to block faults.
-        */
-       struct rw_semaphore i_mmap_sem;
-
 #ifdef CONFIG_FUSE_DAX
        /*
         * Dax specific inode data
index b9beb39..e07e429 100644 (file)
@@ -85,7 +85,6 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->orig_ino = 0;
        fi->state = 0;
        mutex_init(&fi->mutex);
-       init_rwsem(&fi->i_mmap_sem);
        spin_lock_init(&fi->lock);
        fi->forget = fuse_alloc_forget();
        if (!fi->forget)
index 81d8f06..005e920 100644 (file)
@@ -574,10 +574,9 @@ void adjust_fs_space(struct inode *inode)
 {
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
-       struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
-       struct buffer_head *m_bh, *l_bh;
+       struct buffer_head *m_bh;
        u64 fs_total, new_free;
 
        if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
@@ -600,11 +599,7 @@ void adjust_fs_space(struct inode *inode)
                (unsigned long long)new_free);
        gfs2_statfs_change(sdp, new_free, new_free, 0);
 
-       if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
-               goto out2;
-       update_statfs(sdp, m_bh, l_bh);
-       brelse(l_bh);
-out2:
+       update_statfs(sdp, m_bh);
        brelse(m_bh);
 out:
        sdp->sd_rindex_uptodate = 0;
index ed8b67b..5414c2c 100644 (file)
@@ -1002,7 +1002,7 @@ static void gfs2_write_unlock(struct inode *inode)
 }
 
 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
-                                  unsigned len, struct iomap *iomap)
+                                  unsigned len)
 {
        unsigned int blockmask = i_blocksize(inode) - 1;
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1013,8 +1013,7 @@ static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
 }
 
 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
-                                unsigned copied, struct page *page,
-                                struct iomap *iomap)
+                                unsigned copied, struct page *page)
 {
        struct gfs2_trans *tr = current->journal_info;
        struct gfs2_inode *ip = GFS2_I(inode);
index 84ec053..c559827 100644 (file)
@@ -1237,9 +1237,6 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if (cmd == F_CANCELLK) {
                /* Hack: */
                cmd = F_SETLK;
index 1f3902e..e0eaa9c 100644 (file)
@@ -1494,12 +1494,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
 
        list_del_init(&gh->gh_list);
        clear_bit(HIF_HOLDER, &gh->gh_iflags);
-       if (find_first_holder(gl) == NULL) {
-               if (list_empty(&gl->gl_holders) &&
-                   !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
-                   !test_bit(GLF_DEMOTE, &gl->gl_flags))
-                       fast_path = 1;
-       }
+       if (list_empty(&gl->gl_holders) &&
+           !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+           !test_bit(GLF_DEMOTE, &gl->gl_flags))
+               fast_path = 1;
+
        if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
                gfs2_glock_add_to_lru(gl);
 
@@ -2077,8 +2076,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
                *p++ = 'H';
        if (test_bit(HIF_WAIT, &iflags))
                *p++ = 'W';
-       if (test_bit(HIF_FIRST, &iflags))
-               *p++ = 'F';
        *p = 0;
        return buf;
 }
index 54d3fbe..79c621c 100644 (file)
@@ -33,16 +33,18 @@ extern struct workqueue_struct *gfs2_control_wq;
 
 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
 {
-       fs_err(gl->gl_name.ln_sbd,
+       struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+       fs_err(sdp,
               "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
               "state 0x%lx\n",
               bh, (unsigned long long)bh->b_blocknr, bh->b_state,
               bh->b_page->mapping, bh->b_page->flags);
-       fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
+       fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
               gl->gl_name.ln_type, gl->gl_name.ln_number,
               gfs2_glock2aspace(gl));
-       gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
-       gfs2_withdraw(gl->gl_name.ln_sbd);
+       gfs2_lm(sdp, "AIL error\n");
+       gfs2_withdraw_delayed(sdp);
 }
 
 /**
@@ -610,16 +612,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
                j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
 
                error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
-               if (error)
-                       gfs2_consist(sdp);
-               if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
-                       gfs2_consist(sdp);
-
-               /*  Initialize some head of the log stuff  */
-               if (!gfs2_withdrawn(sdp)) {
-                       sdp->sd_log_sequence = head.lh_sequence + 1;
-                       gfs2_log_pointers_init(sdp, head.lh_blkno);
-               }
+               if (gfs2_assert_withdraw_delayed(sdp, !error))
+                       return error;
+               if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
+                                                GFS2_LOG_HEAD_UNMOUNT))
+                       return -EIO;
+               sdp->sd_log_sequence = head.lh_sequence + 1;
+               gfs2_log_pointers_init(sdp, head.lh_blkno);
        }
        return 0;
 }
index e6f820f..0fe4977 100644 (file)
@@ -253,7 +253,6 @@ struct gfs2_lkstats {
 enum {
        /* States */
        HIF_HOLDER              = 6,  /* Set for gh that "holds" the glock */
-       HIF_FIRST               = 7,
        HIF_WAIT                = 10,
 };
 
@@ -768,6 +767,7 @@ struct gfs2_sbd {
        struct gfs2_glock *sd_jinode_gl;
 
        struct gfs2_holder sd_sc_gh;
+       struct buffer_head *sd_sc_bh;
        struct gfs2_holder sd_qc_gh;
 
        struct completion sd_journal_ready;
index dac0401..50578f8 100644 (file)
@@ -299,6 +299,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
        gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
        gfs2_update_request_times(gl);
 
+       /* don't want to call dlm if we've unmounted the lock protocol */
+       if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+               gfs2_glock_free(gl);
+               return;
+       }
        /* don't want to skip dlm_unlock writing the lvb when lock has one */
 
        if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
index 42c15cf..f0ee3ff 100644 (file)
@@ -594,7 +594,7 @@ void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
 {
        unsigned int blks = tr->tr_reserved;
        unsigned int revokes = tr->tr_revokes;
-       unsigned int revoke_blks = 0;
+       unsigned int revoke_blks;
 
        *extra_revokes = 0;
        if (revokes) {
index 8ee05d2..ca0bb3a 100644 (file)
@@ -761,6 +761,32 @@ static void buf_lo_before_scan(struct gfs2_jdesc *jd,
        jd->jd_replayed_blocks = 0;
 }
 
+#define obsolete_rgrp_replay \
+"Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
+#define obsolete_rgrp_replay2 \
+"busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
+
+static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
+                         u64 blkno)
+{
+       struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
+       struct gfs2_rgrpd *rgd;
+       struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
+
+       rgd = gfs2_blk2rgrpd(sdp, blkno, false);
+       if (rgd && rgd->rd_addr == blkno &&
+           rgd->rd_bits && rgd->rd_bits->bi_bh) {
+               fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
+                       jd->jd_jid, bh_log->b_blocknr);
+               fs_info(sdp, obsolete_rgrp_replay2,
+                       buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
+                       buffer_pinned(rgd->rd_bits->bi_bh),
+                       rgd->rd_igeneration,
+                       be64_to_cpu(jrgd->rg_igeneration));
+               gfs2_dump_glock(NULL, rgd->rd_gl, true);
+       }
+}
+
 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
                                struct gfs2_log_descriptor *ld, __be64 *ptr,
                                int pass)
@@ -799,21 +825,9 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
                        struct gfs2_meta_header *mh =
                                (struct gfs2_meta_header *)bh_ip->b_data;
 
-                       if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
-                               struct gfs2_rgrpd *rgd;
-
-                               rgd = gfs2_blk2rgrpd(sdp, blkno, false);
-                               if (rgd && rgd->rd_addr == blkno &&
-                                   rgd->rd_bits && rgd->rd_bits->bi_bh) {
-                                       fs_info(sdp, "Replaying 0x%llx but we "
-                                               "already have a bh!\n",
-                                               (unsigned long long)blkno);
-                                       fs_info(sdp, "busy:%d, pinned:%d\n",
-                                               buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
-                                               buffer_pinned(rgd->rd_bits->bi_bh));
-                                       gfs2_dump_glock(NULL, rgd->rd_gl, true);
-                               }
-                       }
+                       if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
+                               obsolete_rgrp(jd, bh_log, blkno);
+
                        mark_buffer_dirty(bh_ip);
                }
                brelse(bh_log);
index 7c96199..72d30a6 100644 (file)
@@ -258,8 +258,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
        struct buffer_head *bh, *bhs[2];
        int num = 0;
 
-       if (unlikely(gfs2_withdrawn(sdp)) &&
-           (!sdp->sd_jdesc || gl != sdp->sd_jinode_gl)) {
+       if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) {
                *bhp = NULL;
                return -EIO;
        }
@@ -317,7 +316,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
 
 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
 {
-       if (unlikely(gfs2_withdrawn(sdp)))
+       if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
                return -EIO;
 
        wait_on_buffer(bh);
@@ -328,7 +327,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
                        gfs2_io_error_bh_wd(sdp, bh);
                return -EIO;
        }
-       if (unlikely(gfs2_withdrawn(sdp)))
+       if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
                return -EIO;
 
        return 0;
index 5f4504d..7f8410d 100644 (file)
@@ -614,6 +614,7 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
                        break;
                }
 
+               d_mark_dontcache(jd->jd_inode);
                spin_lock(&sdp->sd_jindex_spin);
                jd->jd_jid = sdp->sd_journals++;
                jip = GFS2_I(jd->jd_inode);
@@ -677,6 +678,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
                        error = PTR_ERR(lsi->si_sc_inode);
                        fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
                               jd->jd_jid, error);
+                       kfree(lsi);
                        goto free_local;
                }
                lsi->si_jid = jd->jd_jid;
@@ -695,8 +697,16 @@ static int init_statfs(struct gfs2_sbd *sdp)
                fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
                goto free_local;
        }
+       /* read in the local statfs buffer - other nodes don't change it. */
+       error = gfs2_meta_inode_buffer(ip, &sdp->sd_sc_bh);
+       if (error) {
+               fs_err(sdp, "Cannot read in local statfs: %d\n", error);
+               goto unlock_sd_gh;
+       }
        return 0;
 
+unlock_sd_gh:
+       gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
 free_local:
        free_local_statfs_inodes(sdp);
        iput(pn);
@@ -710,6 +720,7 @@ out:
 static void uninit_statfs(struct gfs2_sbd *sdp)
 {
        if (!sdp->sd_args.ar_spectator) {
+               brelse(sdp->sd_sc_bh);
                gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
                free_local_statfs_inodes(sdp);
        }
@@ -1088,6 +1099,34 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
        kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
 }
 
+static int init_threads(struct gfs2_sbd *sdp)
+{
+       struct task_struct *p;
+       int error = 0;
+
+       p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
+       if (IS_ERR(p)) {
+               error = PTR_ERR(p);
+               fs_err(sdp, "can't start logd thread: %d\n", error);
+               return error;
+       }
+       sdp->sd_logd_process = p;
+
+       p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
+       if (IS_ERR(p)) {
+               error = PTR_ERR(p);
+               fs_err(sdp, "can't start quotad thread: %d\n", error);
+               goto fail;
+       }
+       sdp->sd_quotad_process = p;
+       return 0;
+
+fail:
+       kthread_stop(sdp->sd_logd_process);
+       sdp->sd_logd_process = NULL;
+       return error;
+}
+
 /**
  * gfs2_fill_super - Read in superblock
  * @sb: The VFS superblock
@@ -1216,6 +1255,14 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
                goto fail_per_node;
        }
 
+       if (!sb_rdonly(sb)) {
+               error = init_threads(sdp);
+               if (error) {
+                       gfs2_withdraw_delayed(sdp);
+                       goto fail_per_node;
+               }
+       }
+
        error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
        if (error)
                goto fail_per_node;
@@ -1225,6 +1272,12 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
 
        gfs2_freeze_unlock(&freeze_gh);
        if (error) {
+               if (sdp->sd_quotad_process)
+                       kthread_stop(sdp->sd_quotad_process);
+               sdp->sd_quotad_process = NULL;
+               if (sdp->sd_logd_process)
+                       kthread_stop(sdp->sd_logd_process);
+               sdp->sd_logd_process = NULL;
                fs_err(sdp, "can't make FS RW: %d\n", error);
                goto fail_per_node;
        }
index 4d4ceb0..6e00d15 100644 (file)
@@ -119,34 +119,6 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
        return 0;
 }
 
-static int init_threads(struct gfs2_sbd *sdp)
-{
-       struct task_struct *p;
-       int error = 0;
-
-       p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
-       if (IS_ERR(p)) {
-               error = PTR_ERR(p);
-               fs_err(sdp, "can't start logd thread: %d\n", error);
-               return error;
-       }
-       sdp->sd_logd_process = p;
-
-       p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
-       if (IS_ERR(p)) {
-               error = PTR_ERR(p);
-               fs_err(sdp, "can't start quotad thread: %d\n", error);
-               goto fail;
-       }
-       sdp->sd_quotad_process = p;
-       return 0;
-
-fail:
-       kthread_stop(sdp->sd_logd_process);
-       sdp->sd_logd_process = NULL;
-       return error;
-}
-
 /**
  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
  * @sdp: the filesystem
@@ -161,26 +133,17 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        struct gfs2_log_header_host head;
        int error;
 
-       error = init_threads(sdp);
-       if (error) {
-               gfs2_withdraw_delayed(sdp);
-               return error;
-       }
-
        j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
-       if (gfs2_withdrawn(sdp)) {
-               error = -EIO;
-               goto fail;
-       }
+       if (gfs2_withdrawn(sdp))
+               return -EIO;
 
        error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
        if (error || gfs2_withdrawn(sdp))
-               goto fail;
+               return error;
 
        if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
                gfs2_consist(sdp);
-               error = -EIO;
-               goto fail;
+               return -EIO;
        }
 
        /*  Initialize some head of the log stuff  */
@@ -188,20 +151,8 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        gfs2_log_pointers_init(sdp, head.lh_blkno);
 
        error = gfs2_quota_init(sdp);
-       if (error || gfs2_withdrawn(sdp))
-               goto fail;
-
-       set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
-
-       return 0;
-
-fail:
-       if (sdp->sd_quotad_process)
-               kthread_stop(sdp->sd_quotad_process);
-       sdp->sd_quotad_process = NULL;
-       if (sdp->sd_logd_process)
-               kthread_stop(sdp->sd_logd_process);
-       sdp->sd_logd_process = NULL;
+       if (!error && !gfs2_withdrawn(sdp))
+               set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
        return error;
 }
 
@@ -227,9 +178,8 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp)
 {
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
-       struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
-       struct buffer_head *m_bh, *l_bh;
+       struct buffer_head *m_bh;
        struct gfs2_holder gh;
        int error;
 
@@ -248,21 +198,15 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp)
                                      sizeof(struct gfs2_dinode));
                spin_unlock(&sdp->sd_statfs_spin);
        } else {
-               error = gfs2_meta_inode_buffer(l_ip, &l_bh);
-               if (error)
-                       goto out_m_bh;
-
                spin_lock(&sdp->sd_statfs_spin);
                gfs2_statfs_change_in(m_sc, m_bh->b_data +
                                      sizeof(struct gfs2_dinode));
-               gfs2_statfs_change_in(l_sc, l_bh->b_data +
+               gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
                                      sizeof(struct gfs2_dinode));
                spin_unlock(&sdp->sd_statfs_spin);
 
-               brelse(l_bh);
        }
 
-out_m_bh:
        brelse(m_bh);
 out:
        gfs2_glock_dq_uninit(&gh);
@@ -275,22 +219,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
-       struct buffer_head *l_bh;
        s64 x, y;
        int need_sync = 0;
-       int error;
-
-       error = gfs2_meta_inode_buffer(l_ip, &l_bh);
-       if (error)
-               return;
 
-       gfs2_trans_add_meta(l_ip->i_gl, l_bh);
+       gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
 
        spin_lock(&sdp->sd_statfs_spin);
        l_sc->sc_total += total;
        l_sc->sc_free += free;
        l_sc->sc_dinodes += dinodes;
-       gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
+       gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
+                              sizeof(struct gfs2_dinode));
        if (sdp->sd_args.ar_statfs_percent) {
                x = 100 * l_sc->sc_free;
                y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
@@ -299,20 +238,18 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
        }
        spin_unlock(&sdp->sd_statfs_spin);
 
-       brelse(l_bh);
        if (need_sync)
                gfs2_wake_up_statfs(sdp);
 }
 
-void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
-                  struct buffer_head *l_bh)
+void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
 {
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 
-       gfs2_trans_add_meta(l_ip->i_gl, l_bh);
+       gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
        gfs2_trans_add_meta(m_ip->i_gl, m_bh);
 
        spin_lock(&sdp->sd_statfs_spin);
@@ -320,7 +257,7 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
        m_sc->sc_free += l_sc->sc_free;
        m_sc->sc_dinodes += l_sc->sc_dinodes;
        memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
-       memset(l_bh->b_data + sizeof(struct gfs2_dinode),
+       memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
               0, sizeof(struct gfs2_statfs_change));
        gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
        spin_unlock(&sdp->sd_statfs_spin);
@@ -330,11 +267,10 @@ int gfs2_statfs_sync(struct super_block *sb, int type)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
-       struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
        struct gfs2_holder gh;
-       struct buffer_head *m_bh, *l_bh;
+       struct buffer_head *m_bh;
        int error;
 
        error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
@@ -355,21 +291,15 @@ int gfs2_statfs_sync(struct super_block *sb, int type)
        }
        spin_unlock(&sdp->sd_statfs_spin);
 
-       error = gfs2_meta_inode_buffer(l_ip, &l_bh);
-       if (error)
-               goto out_bh;
-
        error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
        if (error)
-               goto out_bh2;
+               goto out_bh;
 
-       update_statfs(sdp, m_bh, l_bh);
+       update_statfs(sdp, m_bh);
        sdp->sd_statfs_force_sync = 0;
 
        gfs2_trans_end(sdp);
 
-out_bh2:
-       brelse(l_bh);
 out_bh:
        brelse(m_bh);
 out_unlock:
@@ -675,6 +605,7 @@ restart:
                        gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
                if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
                        gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
+               brelse(sdp->sd_sc_bh);
                gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
                gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
                free_local_statfs_inodes(sdp);
@@ -1016,7 +947,7 @@ static int gfs2_drop_inode(struct inode *inode)
                gfs2_glock_hold(gl);
                if (!gfs2_queue_delete_work(gl, 0))
                        gfs2_glock_queue_put(gl);
-               return false;
+               return 0;
        }
 
        return generic_drop_inode(inode);
index ec4affb..58d13fd 100644 (file)
@@ -43,8 +43,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
                                  const void *buf);
 extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
                                   void *buf);
-extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
-                         struct buffer_head *l_bh);
+extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
 extern int gfs2_statfs_sync(struct super_block *sb, int type);
 extern void gfs2_freeze_func(struct work_struct *work);
 
index f4325b4..cf345a8 100644 (file)
@@ -278,6 +278,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
                goto skip_recovery;
        }
        sdp->sd_jdesc->jd_inode = inode;
+       d_mark_dontcache(inode);
 
        /*
         * Now wait until recovery is complete.
@@ -295,7 +296,7 @@ skip_recovery:
                fs_warn(sdp, "Journal recovery complete for jid %d.\n",
                        sdp->sd_lockstruct.ls_jid);
        else
-               fs_warn(sdp, "Journal recovery skipped for %d until next "
+               fs_warn(sdp, "Journal recovery skipped for jid %d until next "
                        "mount.\n", sdp->sd_lockstruct.ls_jid);
        fs_warn(sdp, "Glock dequeues delayed: %lu\n", sdp->sd_glock_dqs_held);
        sdp->sd_glock_dqs_held = 0;
index 69e1a0a..78ec190 100644 (file)
@@ -218,6 +218,11 @@ static inline bool gfs2_withdrawing(struct gfs2_sbd *sdp)
               !test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
 }
 
+static inline bool gfs2_withdraw_in_prog(struct gfs2_sbd *sdp)
+{
+       return test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
+}
+
 #define gfs2_tune_get(sdp, field) \
 gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
 
index 2b36dc6..ec975f4 100644 (file)
@@ -2,6 +2,7 @@
 config HPFS_FS
        tristate "OS/2 HPFS file system support"
        depends on BLOCK
+       select FS_IOMAP
        help
          OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
          is the file system used for organizing files on OS/2 hard disk
index c3a49aa..fb37f57 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "hpfs_fn.h"
 #include <linux/mpage.h>
+#include <linux/iomap.h>
 #include <linux/fiemap.h>
 
 #define BLOCKS(size) (((size) + 511) >> 9)
@@ -116,6 +117,47 @@ static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_he
        return r;
 }
 
+static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+               unsigned flags, struct iomap *iomap, struct iomap *srcmap)
+{
+       struct super_block *sb = inode->i_sb;
+       unsigned int blkbits = inode->i_blkbits;
+       unsigned int n_secs;
+       secno s;
+
+       if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
+               return -EINVAL;
+
+       iomap->bdev = inode->i_sb->s_bdev;
+       iomap->offset = offset;
+
+       hpfs_lock(sb);
+       s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
+       if (s) {
+               n_secs = hpfs_search_hotfix_map_for_range(sb, s,
+                               min_t(loff_t, n_secs, length));
+               if (unlikely(!n_secs)) {
+                       s = hpfs_search_hotfix_map(sb, s);
+                       n_secs = 1;
+               }
+               iomap->type = IOMAP_MAPPED;
+               iomap->flags = IOMAP_F_MERGED;
+               iomap->addr = (u64)s << blkbits;
+               iomap->length = (u64)n_secs << blkbits;
+       } else {
+               iomap->type = IOMAP_HOLE;
+               iomap->addr = IOMAP_NULL_ADDR;
+               iomap->length = 1 << blkbits;
+       }
+
+       hpfs_unlock(sb);
+       return 0;
+}
+
+static const struct iomap_ops hpfs_iomap_ops = {
+       .iomap_begin            = hpfs_iomap_begin,
+};
+
 static int hpfs_readpage(struct file *file, struct page *page)
 {
        return mpage_readpage(page, hpfs_get_block);
@@ -192,7 +234,14 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 
 static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len, hpfs_get_block);
+       int ret;
+
+       inode_lock(inode);
+       len = min_t(u64, len, i_size_read(inode));
+       ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
+       inode_unlock(inode);
+
+       return ret;
 }
 
 const struct address_space_operations hpfs_aops = {
index c93500d..84c528c 100644 (file)
@@ -190,6 +190,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
        mapping->private_data = NULL;
        mapping->writeback_index = 0;
+       __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock",
+                    &sb->s_type->invalidate_lock_key);
        inode->i_private = NULL;
        inode->i_mapping = mapping;
        INIT_HLIST_HEAD(&inode->i_dentry);      /* buggered by rcu freeing */
index 82e8eb3..68a2ae0 100644 (file)
@@ -48,8 +48,8 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
 /*
  * buffer.c
  */
-extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
-               get_block_t *get_block, struct iomap *iomap);
+int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+               get_block_t *get_block, const struct iomap *iomap);
 
 /*
  * char_dev.c
@@ -71,11 +71,15 @@ extern int filename_lookup(int dfd, struct filename *name, unsigned flags,
                           struct path *path, struct path *root);
 extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
                           const char *, unsigned int, struct path *);
-long do_rmdir(int dfd, struct filename *name);
-long do_unlinkat(int dfd, struct filename *name);
+int do_rmdir(int dfd, struct filename *name);
+int do_unlinkat(int dfd, struct filename *name);
 int may_linkat(struct user_namespace *mnt_userns, struct path *link);
 int do_renameat2(int olddfd, struct filename *oldname, int newdfd,
                 struct filename *newname, unsigned int flags);
+int do_mkdirat(int dfd, struct filename *name, umode_t mode);
+int do_symlinkat(struct filename *from, int newdfd, struct filename *to);
+int do_linkat(int olddfd, struct filename *old, int newdfd,
+                       struct filename *new, int flags);
 
 /*
  * namespace.c
index cf086b0..cd9bd09 100644 (file)
@@ -51,6 +51,10 @@ struct io_worker {
 
        struct completion ref_done;
 
+       unsigned long create_state;
+       struct callback_head create_work;
+       int create_index;
+
        struct rcu_head rcu;
 };
 
@@ -129,7 +133,8 @@ struct io_cb_cancel_data {
        bool cancel_all;
 };
 
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first);
+static void io_wqe_dec_running(struct io_worker *worker);
 
 static bool io_worker_get(struct io_worker *worker)
 {
@@ -168,27 +173,22 @@ static void io_worker_exit(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
        struct io_wqe_acct *acct = io_wqe_get_acct(worker);
-       unsigned flags;
 
        if (refcount_dec_and_test(&worker->ref))
                complete(&worker->ref_done);
        wait_for_completion(&worker->ref_done);
 
-       preempt_disable();
-       current->flags &= ~PF_IO_WORKER;
-       flags = worker->flags;
-       worker->flags = 0;
-       if (flags & IO_WORKER_F_RUNNING)
-               atomic_dec(&acct->nr_running);
-       worker->flags = 0;
-       preempt_enable();
-
-       raw_spin_lock_irq(&wqe->lock);
-       if (flags & IO_WORKER_F_FREE)
+       raw_spin_lock(&wqe->lock);
+       if (worker->flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
        list_del_rcu(&worker->all_list);
        acct->nr_workers--;
-       raw_spin_unlock_irq(&wqe->lock);
+       preempt_disable();
+       io_wqe_dec_running(worker);
+       worker->flags = 0;
+       current->flags &= ~PF_IO_WORKER;
+       preempt_enable();
+       raw_spin_unlock(&wqe->lock);
 
        kfree_rcu(worker, rcu);
        io_worker_ref_put(wqe->wq);
@@ -214,15 +214,19 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
        struct hlist_nulls_node *n;
        struct io_worker *worker;
 
-       n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
-       if (is_a_nulls(n))
-               return false;
-
-       worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
-       if (io_worker_get(worker)) {
-               wake_up_process(worker->task);
+       /*
+        * Iterate free_list and see if we can find an idle worker to
+        * activate. If a given worker is on the free_list but in the process
+        * of exiting, keep trying.
+        */
+       hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
+               if (!io_worker_get(worker))
+                       continue;
+               if (wake_up_process(worker->task)) {
+                       io_worker_release(worker);
+                       return true;
+               }
                io_worker_release(worker);
-               return true;
        }
 
        return false;
@@ -247,10 +251,22 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
        ret = io_wqe_activate_free_worker(wqe);
        rcu_read_unlock();
 
-       if (!ret && acct->nr_workers < acct->max_workers) {
-               atomic_inc(&acct->nr_running);
-               atomic_inc(&wqe->wq->worker_refs);
-               create_io_worker(wqe->wq, wqe, acct->index);
+       if (!ret) {
+               bool do_create = false, first = false;
+
+               raw_spin_lock(&wqe->lock);
+               if (acct->nr_workers < acct->max_workers) {
+                       if (!acct->nr_workers)
+                               first = true;
+                       acct->nr_workers++;
+                       do_create = true;
+               }
+               raw_spin_unlock(&wqe->lock);
+               if (do_create) {
+                       atomic_inc(&acct->nr_running);
+                       atomic_inc(&wqe->wq->worker_refs);
+                       create_io_worker(wqe->wq, wqe, acct->index, first);
+               }
        }
 }
 
@@ -261,42 +277,63 @@ static void io_wqe_inc_running(struct io_worker *worker)
        atomic_inc(&acct->nr_running);
 }
 
-struct create_worker_data {
-       struct callback_head work;
-       struct io_wqe *wqe;
-       int index;
-};
-
 static void create_worker_cb(struct callback_head *cb)
 {
-       struct create_worker_data *cwd;
+       struct io_worker *worker;
        struct io_wq *wq;
-
-       cwd = container_of(cb, struct create_worker_data, work);
-       wq = cwd->wqe->wq;
-       create_io_worker(wq, cwd->wqe, cwd->index);
-       kfree(cwd);
+       struct io_wqe *wqe;
+       struct io_wqe_acct *acct;
+       bool do_create = false, first = false;
+
+       worker = container_of(cb, struct io_worker, create_work);
+       wqe = worker->wqe;
+       wq = wqe->wq;
+       acct = &wqe->acct[worker->create_index];
+       raw_spin_lock(&wqe->lock);
+       if (acct->nr_workers < acct->max_workers) {
+               if (!acct->nr_workers)
+                       first = true;
+               acct->nr_workers++;
+               do_create = true;
+       }
+       raw_spin_unlock(&wqe->lock);
+       if (do_create) {
+               create_io_worker(wq, wqe, worker->create_index, first);
+       } else {
+               atomic_dec(&acct->nr_running);
+               io_worker_ref_put(wq);
+       }
+       clear_bit_unlock(0, &worker->create_state);
+       io_worker_release(worker);
 }
 
-static void io_queue_worker_create(struct io_wqe *wqe, struct io_wqe_acct *acct)
+static void io_queue_worker_create(struct io_wqe *wqe, struct io_worker *worker,
+                                  struct io_wqe_acct *acct)
 {
-       struct create_worker_data *cwd;
        struct io_wq *wq = wqe->wq;
 
        /* raced with exit, just ignore create call */
        if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
                goto fail;
+       if (!io_worker_get(worker))
+               goto fail;
+       /*
+        * create_state manages ownership of create_work/index. We should
+        * only need one entry per worker, as the worker going to sleep
+        * will trigger the condition, and waking will clear it once it
+        * runs the task_work.
+        */
+       if (test_bit(0, &worker->create_state) ||
+           test_and_set_bit_lock(0, &worker->create_state))
+               goto fail_release;
 
-       cwd = kmalloc(sizeof(*cwd), GFP_ATOMIC);
-       if (cwd) {
-               init_task_work(&cwd->work, create_worker_cb);
-               cwd->wqe = wqe;
-               cwd->index = acct->index;
-               if (!task_work_add(wq->task, &cwd->work, TWA_SIGNAL))
-                       return;
-
-               kfree(cwd);
-       }
+       init_task_work(&worker->create_work, create_worker_cb);
+       worker->create_index = acct->index;
+       if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL))
+               return;
+       clear_bit_unlock(0, &worker->create_state);
+fail_release:
+       io_worker_release(worker);
 fail:
        atomic_dec(&acct->nr_running);
        io_worker_ref_put(wq);
@@ -314,7 +351,7 @@ static void io_wqe_dec_running(struct io_worker *worker)
        if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) {
                atomic_inc(&acct->nr_running);
                atomic_inc(&wqe->wq->worker_refs);
-               io_queue_worker_create(wqe, acct);
+               io_queue_worker_create(wqe, worker, acct);
        }
 }
 
@@ -387,7 +424,28 @@ static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
        spin_unlock(&wq->hash->wait.lock);
 }
 
-static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
+/*
+ * We can always run the work if the worker is currently the same type as
+ * the work (eg both are bound, or both are unbound). If they are not the
+ * same, only allow it if incrementing the worker count would be allowed.
+ */
+static bool io_worker_can_run_work(struct io_worker *worker,
+                                  struct io_wq_work *work)
+{
+       struct io_wqe_acct *acct;
+
+       if (!(worker->flags & IO_WORKER_F_BOUND) !=
+           !(work->flags & IO_WQ_WORK_UNBOUND))
+               return true;
+
+       /* not the same type, check if we'd go over the limit */
+       acct = io_work_get_acct(worker->wqe, work);
+       return acct->nr_workers < acct->max_workers;
+}
+
+static struct io_wq_work *io_get_next_work(struct io_wqe *wqe,
+                                          struct io_worker *worker,
+                                          bool *stalled)
        __must_hold(wqe->lock)
 {
        struct io_wq_work_node *node, *prev;
@@ -399,6 +457,9 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
 
                work = container_of(node, struct io_wq_work, list);
 
+               if (!io_worker_can_run_work(worker, work))
+                       break;
+
                /* not hashed, can run anytime */
                if (!io_wq_is_hashed(work)) {
                        wq_list_del(&wqe->work_list, node, prev);
@@ -425,6 +486,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
                raw_spin_unlock(&wqe->lock);
                io_wait_on_hash(wqe, stall_hash);
                raw_spin_lock(&wqe->lock);
+               *stalled = true;
        }
 
        return NULL;
@@ -448,9 +510,9 @@ static void io_assign_current_work(struct io_worker *worker,
                cond_resched();
        }
 
-       spin_lock_irq(&worker->lock);
+       spin_lock(&worker->lock);
        worker->cur_work = work;
-       spin_unlock_irq(&worker->lock);
+       spin_unlock(&worker->lock);
 }
 
 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
@@ -464,6 +526,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 
        do {
                struct io_wq_work *work;
+               bool stalled;
 get_next:
                /*
                 * If we got some work, mark us as busy. If we didn't, but
@@ -472,13 +535,14 @@ get_next:
                 * can't make progress, any work completion or insertion will
                 * clear the stalled flag.
                 */
-               work = io_get_next_work(wqe);
+               stalled = false;
+               work = io_get_next_work(wqe, worker, &stalled);
                if (work)
                        __io_worker_busy(wqe, worker, work);
-               else if (!wq_list_empty(&wqe->work_list))
+               else if (stalled)
                        wqe->flags |= IO_WQE_FLAG_STALLED;
 
-               raw_spin_unlock_irq(&wqe->lock);
+               raw_spin_unlock(&wqe->lock);
                if (!work)
                        break;
                io_assign_current_work(worker, work);
@@ -510,16 +574,16 @@ get_next:
                                clear_bit(hash, &wq->hash->map);
                                if (wq_has_sleeper(&wq->hash->wait))
                                        wake_up(&wq->hash->wait);
-                               raw_spin_lock_irq(&wqe->lock);
+                               raw_spin_lock(&wqe->lock);
                                wqe->flags &= ~IO_WQE_FLAG_STALLED;
                                /* skip unnecessary unlock-lock wqe->lock */
                                if (!work)
                                        goto get_next;
-                               raw_spin_unlock_irq(&wqe->lock);
+                               raw_spin_unlock(&wqe->lock);
                        }
                } while (work);
 
-               raw_spin_lock_irq(&wqe->lock);
+               raw_spin_lock(&wqe->lock);
        } while (1);
 }
 
@@ -540,13 +604,13 @@ static int io_wqe_worker(void *data)
 
                set_current_state(TASK_INTERRUPTIBLE);
 loop:
-               raw_spin_lock_irq(&wqe->lock);
+               raw_spin_lock(&wqe->lock);
                if (io_wqe_run_queue(wqe)) {
                        io_worker_handle_work(worker);
                        goto loop;
                }
                __io_worker_idle(wqe, worker);
-               raw_spin_unlock_irq(&wqe->lock);
+               raw_spin_unlock(&wqe->lock);
                if (io_flush_signals())
                        continue;
                ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
@@ -565,7 +629,7 @@ loop:
        }
 
        if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
-               raw_spin_lock_irq(&wqe->lock);
+               raw_spin_lock(&wqe->lock);
                io_worker_handle_work(worker);
        }
 
@@ -607,12 +671,12 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
 
        worker->flags &= ~IO_WORKER_F_RUNNING;
 
-       raw_spin_lock_irq(&worker->wqe->lock);
+       raw_spin_lock(&worker->wqe->lock);
        io_wqe_dec_running(worker);
-       raw_spin_unlock_irq(&worker->wqe->lock);
+       raw_spin_unlock(&worker->wqe->lock);
 }
 
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first)
 {
        struct io_wqe_acct *acct = &wqe->acct[index];
        struct io_worker *worker;
@@ -635,6 +699,9 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
                kfree(worker);
 fail:
                atomic_dec(&acct->nr_running);
+               raw_spin_lock(&wqe->lock);
+               acct->nr_workers--;
+               raw_spin_unlock(&wqe->lock);
                io_worker_ref_put(wq);
                return;
        }
@@ -644,16 +711,15 @@ fail:
        set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
        tsk->flags |= PF_NO_SETAFFINITY;
 
-       raw_spin_lock_irq(&wqe->lock);
+       raw_spin_lock(&wqe->lock);
        hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
        list_add_tail_rcu(&worker->all_list, &wqe->all_list);
        worker->flags |= IO_WORKER_F_FREE;
        if (index == IO_WQ_ACCT_BOUND)
                worker->flags |= IO_WORKER_F_BOUND;
-       if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+       if (first && (worker->flags & IO_WORKER_F_BOUND))
                worker->flags |= IO_WORKER_F_FIXED;
-       acct->nr_workers++;
-       raw_spin_unlock_irq(&wqe->lock);
+       raw_spin_unlock(&wqe->lock);
        wake_up_new_task(tsk);
 }
 
@@ -728,8 +794,7 @@ append:
 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
 {
        struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
-       int work_flags;
-       unsigned long flags;
+       bool do_wake;
 
        /*
         * If io-wq is exiting for this task, or if the request has explicitly
@@ -741,14 +806,14 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
                return;
        }
 
-       work_flags = work->flags;
-       raw_spin_lock_irqsave(&wqe->lock, flags);
+       raw_spin_lock(&wqe->lock);
        io_wqe_insert_work(wqe, work);
        wqe->flags &= ~IO_WQE_FLAG_STALLED;
-       raw_spin_unlock_irqrestore(&wqe->lock, flags);
+       do_wake = (work->flags & IO_WQ_WORK_CONCURRENT) ||
+                       !atomic_read(&acct->nr_running);
+       raw_spin_unlock(&wqe->lock);
 
-       if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
-           !atomic_read(&acct->nr_running))
+       if (do_wake)
                io_wqe_wake_worker(wqe, acct);
 }
 
@@ -774,19 +839,18 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
 {
        struct io_cb_cancel_data *match = data;
-       unsigned long flags;
 
        /*
         * Hold the lock to avoid ->cur_work going out of scope, caller
         * may dereference the passed in work.
         */
-       spin_lock_irqsave(&worker->lock, flags);
+       spin_lock(&worker->lock);
        if (worker->cur_work &&
            match->fn(worker->cur_work, match->data)) {
                set_notify_signal(worker->task);
                match->nr_running++;
        }
-       spin_unlock_irqrestore(&worker->lock, flags);
+       spin_unlock(&worker->lock);
 
        return match->nr_running && !match->cancel_all;
 }
@@ -814,16 +878,15 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
 {
        struct io_wq_work_node *node, *prev;
        struct io_wq_work *work;
-       unsigned long flags;
 
 retry:
-       raw_spin_lock_irqsave(&wqe->lock, flags);
+       raw_spin_lock(&wqe->lock);
        wq_list_for_each(node, prev, &wqe->work_list) {
                work = container_of(node, struct io_wq_work, list);
                if (!match->fn(work, match->data))
                        continue;
                io_wqe_remove_pending(wqe, work, prev);
-               raw_spin_unlock_irqrestore(&wqe->lock, flags);
+               raw_spin_unlock(&wqe->lock);
                io_run_cancel(work, wqe);
                match->nr_pending++;
                if (!match->cancel_all)
@@ -832,7 +895,7 @@ retry:
                /* not safe to continue after unlock */
                goto retry;
        }
-       raw_spin_unlock_irqrestore(&wqe->lock, flags);
+       raw_spin_unlock(&wqe->lock);
 }
 
 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
@@ -973,12 +1036,12 @@ err_wq:
 
 static bool io_task_work_match(struct callback_head *cb, void *data)
 {
-       struct create_worker_data *cwd;
+       struct io_worker *worker;
 
        if (cb->func != create_worker_cb)
                return false;
-       cwd = container_of(cb, struct create_worker_data, work);
-       return cwd->wqe->wq == data;
+       worker = container_of(cb, struct io_worker, create_work);
+       return worker->wqe->wq == data;
 }
 
 void io_wq_exit_start(struct io_wq *wq)
@@ -995,12 +1058,13 @@ static void io_wq_exit_workers(struct io_wq *wq)
                return;
 
        while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
-               struct create_worker_data *cwd;
+               struct io_worker *worker;
 
-               cwd = container_of(cb, struct create_worker_data, work);
-               atomic_dec(&cwd->wqe->acct[cwd->index].nr_running);
+               worker = container_of(cb, struct io_worker, create_work);
+               atomic_dec(&worker->wqe->acct[worker->create_index].nr_running);
                io_worker_ref_put(wq);
-               kfree(cwd);
+               clear_bit_unlock(0, &worker->create_state);
+               io_worker_release(worker);
        }
 
        rcu_read_lock();
@@ -1112,6 +1176,35 @@ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
        return 0;
 }
 
+/*
+ * Set max number of unbounded workers, returns old value. If new_count is 0,
+ * then just return the old value.
+ */
+int io_wq_max_workers(struct io_wq *wq, int *new_count)
+{
+       int i, node, prev = 0;
+
+       for (i = 0; i < 2; i++) {
+               if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
+                       new_count[i] = task_rlimit(current, RLIMIT_NPROC);
+       }
+
+       rcu_read_lock();
+       for_each_node(node) {
+               struct io_wqe_acct *acct;
+
+               for (i = 0; i < 2; i++) {
+                       acct = &wq->wqes[node]->acct[i];
+                       prev = max_t(int, acct->max_workers, prev);
+                       if (new_count[i])
+                               acct->max_workers = new_count[i];
+                       new_count[i] = prev;
+               }
+       }
+       rcu_read_unlock();
+       return 0;
+}
+
 static __init int io_wq_init(void)
 {
        int ret;
index 3999ee5..bf5c4c5 100644 (file)
@@ -44,6 +44,7 @@ static inline void wq_list_add_after(struct io_wq_work_node *node,
 static inline void wq_list_add_tail(struct io_wq_work_node *node,
                                    struct io_wq_work_list *list)
 {
+       node->next = NULL;
        if (!list->first) {
                list->last = node;
                WRITE_ONCE(list->first, node);
@@ -51,7 +52,6 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node,
                list->last->next = node;
                list->last = node;
        }
-       node->next = NULL;
 }
 
 static inline void wq_list_cut(struct io_wq_work_list *list,
@@ -128,6 +128,7 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
 void io_wq_hash_work(struct io_wq_work *work, void *val);
 
 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+int io_wq_max_workers(struct io_wq *wq, int *new_count);
 
 static inline bool io_wq_is_hashed(struct io_wq_work *work)
 {
index bf548af..6f35b12 100644 (file)
@@ -78,6 +78,7 @@
 #include <linux/task_work.h>
 #include <linux/pagemap.h>
 #include <linux/io_uring.h>
+#include <linux/tracehook.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
 #define IORING_MAX_CQ_ENTRIES  (2 * IORING_MAX_ENTRIES)
 #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
 
-/*
- * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
- */
-#define IORING_FILE_TABLE_SHIFT        9
-#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
-#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
-#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
+/* only define max */
+#define IORING_MAX_FIXED_FILES (1U << 15)
 #define IORING_MAX_RESTRICTIONS        (IORING_RESTRICTION_LAST + \
                                 IORING_REGISTER_LAST + IORING_OP_LAST)
 
-#define IO_RSRC_TAG_TABLE_SHIFT        9
+#define IO_RSRC_TAG_TABLE_SHIFT        (PAGE_SHIFT - 3)
 #define IO_RSRC_TAG_TABLE_MAX  (1U << IO_RSRC_TAG_TABLE_SHIFT)
 #define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
 
@@ -234,8 +230,7 @@ struct io_rsrc_put {
 };
 
 struct io_file_table {
-       /* two level table */
-       struct io_fixed_file **files;
+       struct io_fixed_file *files;
 };
 
 struct io_rsrc_node {
@@ -300,18 +295,10 @@ struct io_sq_data {
        struct completion       exited;
 };
 
-#define IO_IOPOLL_BATCH                        8
 #define IO_COMPL_BATCH                 32
 #define IO_REQ_CACHE_SIZE              32
 #define IO_REQ_ALLOC_BATCH             8
 
-struct io_comp_state {
-       struct io_kiocb         *reqs[IO_COMPL_BATCH];
-       unsigned int            nr;
-       /* inline/task_work completion list, under ->uring_lock */
-       struct list_head        free_list;
-};
-
 struct io_submit_link {
        struct io_kiocb         *head;
        struct io_kiocb         *last;
@@ -332,14 +319,11 @@ struct io_submit_state {
        /*
         * Batch completion logic
         */
-       struct io_comp_state    comp;
+       struct io_kiocb         *compl_reqs[IO_COMPL_BATCH];
+       unsigned int            compl_nr;
+       /* inline/task_work completion list, under ->uring_lock */
+       struct list_head        free_list;
 
-       /*
-        * File reference cache
-        */
-       struct file             *file;
-       unsigned int            fd;
-       unsigned int            file_refs;
        unsigned int            ios_left;
 };
 
@@ -391,6 +375,7 @@ struct io_ring_ctx {
 
                struct io_submit_state  submit_state;
                struct list_head        timeout_list;
+               struct list_head        ltimeout_list;
                struct list_head        cq_overflow_list;
                struct xarray           io_buffers;
                struct xarray           personalities;
@@ -425,6 +410,8 @@ struct io_ring_ctx {
        struct {
                spinlock_t              completion_lock;
 
+               spinlock_t              timeout_lock;
+
                /*
                 * ->iopoll_list is protected by the ctx->uring_lock for
                 * io_uring instances that don't use IORING_SETUP_SQPOLL.
@@ -486,8 +473,8 @@ struct io_uring_task {
 
        spinlock_t              task_lock;
        struct io_wq_work_list  task_list;
-       unsigned long           task_state;
        struct callback_head    task_work;
+       bool                    task_running;
 };
 
 /*
@@ -522,6 +509,7 @@ struct io_timeout_data {
        struct hrtimer                  timer;
        struct timespec64               ts;
        enum hrtimer_mode               mode;
+       u32                             flags;
 };
 
 struct io_accept {
@@ -529,6 +517,7 @@ struct io_accept {
        struct sockaddr __user          *addr;
        int __user                      *addr_len;
        int                             flags;
+       u32                             file_slot;
        unsigned long                   nofile;
 };
 
@@ -552,6 +541,8 @@ struct io_timeout {
        struct list_head                list;
        /* head of the link, used by linked timeouts only */
        struct io_kiocb                 *head;
+       /* for linked completions */
+       struct io_kiocb                 *prev;
 };
 
 struct io_timeout_rem {
@@ -561,6 +552,7 @@ struct io_timeout_rem {
        /* timeout update */
        struct timespec64               ts;
        u32                             flags;
+       bool                            ltimeout;
 };
 
 struct io_rw {
@@ -592,6 +584,7 @@ struct io_sr_msg {
 struct io_open {
        struct file                     *file;
        int                             dfd;
+       u32                             file_slot;
        struct filename                 *filename;
        struct open_how                 how;
        unsigned long                   nofile;
@@ -674,9 +667,31 @@ struct io_unlink {
        struct filename                 *filename;
 };
 
+struct io_mkdir {
+       struct file                     *file;
+       int                             dfd;
+       umode_t                         mode;
+       struct filename                 *filename;
+};
+
+struct io_symlink {
+       struct file                     *file;
+       int                             new_dfd;
+       struct filename                 *oldpath;
+       struct filename                 *newpath;
+};
+
+struct io_hardlink {
+       struct file                     *file;
+       int                             old_dfd;
+       int                             new_dfd;
+       struct filename                 *oldpath;
+       struct filename                 *newpath;
+       int                             flags;
+};
+
 struct io_completion {
        struct file                     *file;
-       struct list_head                list;
        u32                             cflags;
 };
 
@@ -718,14 +733,15 @@ enum {
        REQ_F_NEED_CLEANUP_BIT,
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
-       REQ_F_LTIMEOUT_ACTIVE_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
        REQ_F_REISSUE_BIT,
        REQ_F_DONT_REISSUE_BIT,
        REQ_F_CREDS_BIT,
+       REQ_F_REFCOUNT_BIT,
+       REQ_F_ARM_LTIMEOUT_BIT,
        /* keep async read/write and isreg together and in order */
-       REQ_F_ASYNC_READ_BIT,
-       REQ_F_ASYNC_WRITE_BIT,
+       REQ_F_NOWAIT_READ_BIT,
+       REQ_F_NOWAIT_WRITE_BIT,
        REQ_F_ISREG_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
@@ -762,8 +778,6 @@ enum {
        REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
        /* buffer already selected */
        REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
-       /* linked timeout is active, i.e. prepared by link's head */
-       REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
        /* completion is deferred through io_comp_state */
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
        /* caller should reissue async */
@@ -771,13 +785,17 @@ enum {
        /* don't attempt request reissue, see io_rw_reissue() */
        REQ_F_DONT_REISSUE      = BIT(REQ_F_DONT_REISSUE_BIT),
        /* supports async reads */
-       REQ_F_ASYNC_READ        = BIT(REQ_F_ASYNC_READ_BIT),
+       REQ_F_NOWAIT_READ       = BIT(REQ_F_NOWAIT_READ_BIT),
        /* supports async writes */
-       REQ_F_ASYNC_WRITE       = BIT(REQ_F_ASYNC_WRITE_BIT),
+       REQ_F_NOWAIT_WRITE      = BIT(REQ_F_NOWAIT_WRITE_BIT),
        /* regular file */
        REQ_F_ISREG             = BIT(REQ_F_ISREG_BIT),
        /* has creds assigned */
        REQ_F_CREDS             = BIT(REQ_F_CREDS_BIT),
+       /* skip refcounting if not set */
+       REQ_F_REFCOUNT          = BIT(REQ_F_REFCOUNT_BIT),
+       /* there is a linked timeout that has to be armed */
+       REQ_F_ARM_LTIMEOUT      = BIT(REQ_F_ARM_LTIMEOUT_BIT),
 };
 
 struct async_poll {
@@ -785,7 +803,7 @@ struct async_poll {
        struct io_poll_iocb     *double_poll;
 };
 
-typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
 
 struct io_task_work {
        union {
@@ -831,6 +849,9 @@ struct io_kiocb {
                struct io_shutdown      shutdown;
                struct io_rename        rename;
                struct io_unlink        unlink;
+               struct io_mkdir         mkdir;
+               struct io_symlink       symlink;
+               struct io_hardlink      hardlink;
                /* use only after cleaning per-op data, see io_clean_op() */
                struct io_completion    compl;
        };
@@ -1042,39 +1063,43 @@ static const struct io_op_def io_op_defs[] = {
        },
        [IORING_OP_RENAMEAT] = {},
        [IORING_OP_UNLINKAT] = {},
+       [IORING_OP_MKDIRAT] = {},
+       [IORING_OP_SYMLINKAT] = {},
+       [IORING_OP_LINKAT] = {},
 };
 
+/* requests with any of those set should undergo io_disarm_next() */
+#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
+
 static bool io_disarm_next(struct io_kiocb *req);
 static void io_uring_del_tctx_node(unsigned long index);
 static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
                                         struct task_struct *task,
                                         bool cancel_all);
 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
-static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
 
 static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
                                 long res, unsigned int cflags);
 static void io_put_req(struct io_kiocb *req);
-static void io_put_req_deferred(struct io_kiocb *req, int nr);
+static void io_put_req_deferred(struct io_kiocb *req);
 static void io_dismantle_req(struct io_kiocb *req);
-static void io_put_task(struct task_struct *task, int nr);
-static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
 static void io_queue_linked_timeout(struct io_kiocb *req);
 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
                                     struct io_uring_rsrc_update2 *up,
                                     unsigned nr_args);
 static void io_clean_op(struct io_kiocb *req);
-static struct file *io_file_get(struct io_submit_state *state,
+static struct file *io_file_get(struct io_ring_ctx *ctx,
                                struct io_kiocb *req, int fd, bool fixed);
 static void __io_queue_sqe(struct io_kiocb *req);
 static void io_rsrc_put_work(struct work_struct *work);
 
 static void io_req_task_queue(struct io_kiocb *req);
 static void io_submit_flush_completions(struct io_ring_ctx *ctx);
-static bool io_poll_remove_waitqs(struct io_kiocb *req);
 static int io_req_prep_async(struct io_kiocb *req);
 
-static void io_fallback_req_func(struct work_struct *unused);
+static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
+                                unsigned int issue_flags, u32 slot_index);
+static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
 
 static struct kmem_cache *req_cachep;
 
@@ -1093,9 +1118,65 @@ struct sock *io_uring_get_socket(struct file *file)
 }
 EXPORT_SYMBOL(io_uring_get_socket);
 
+static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
+{
+       if (!*locked) {
+               mutex_lock(&ctx->uring_lock);
+               *locked = true;
+       }
+}
+
 #define io_for_each_link(pos, head) \
        for (pos = (head); pos; pos = pos->link)
 
+/*
+ * Shamelessly stolen from the mm implementation of page reference checking,
+ * see commit f958d7b528b1 for details.
+ */
+#define req_ref_zero_or_close_to_overflow(req) \
+       ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
+
+static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
+{
+       WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+       return atomic_inc_not_zero(&req->refs);
+}
+
+static inline bool req_ref_put_and_test(struct io_kiocb *req)
+{
+       if (likely(!(req->flags & REQ_F_REFCOUNT)))
+               return true;
+
+       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+       return atomic_dec_and_test(&req->refs);
+}
+
+static inline void req_ref_put(struct io_kiocb *req)
+{
+       WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+       WARN_ON_ONCE(req_ref_put_and_test(req));
+}
+
+static inline void req_ref_get(struct io_kiocb *req)
+{
+       WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+       atomic_inc(&req->refs);
+}
+
+static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
+{
+       if (!(req->flags & REQ_F_REFCOUNT)) {
+               req->flags |= REQ_F_REFCOUNT;
+               atomic_set(&req->refs, nr);
+       }
+}
+
+static inline void io_req_set_refcount(struct io_kiocb *req)
+{
+       __io_req_set_refcount(req, 1);
+}
+
 static inline void io_req_set_rsrc_node(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -1140,6 +1221,12 @@ static inline void req_set_fail(struct io_kiocb *req)
        req->flags |= REQ_F_FAIL;
 }
 
+static inline void req_fail_link_node(struct io_kiocb *req, int res)
+{
+       req_set_fail(req);
+       req->result = res;
+}
+
 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
 {
        struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
@@ -1152,6 +1239,27 @@ static inline bool io_is_timeout_noseq(struct io_kiocb *req)
        return !req->timeout.off;
 }
 
+static void io_fallback_req_func(struct work_struct *work)
+{
+       struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
+                                               fallback_work.work);
+       struct llist_node *node = llist_del_all(&ctx->fallback_llist);
+       struct io_kiocb *req, *tmp;
+       bool locked = false;
+
+       percpu_ref_get(&ctx->refs);
+       llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
+               req->io_task_work.func(req, &locked);
+
+       if (locked) {
+               if (ctx->submit_state.compl_nr)
+                       io_submit_flush_completions(ctx);
+               mutex_unlock(&ctx->uring_lock);
+       }
+       percpu_ref_put(&ctx->refs);
+
+}
+
 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 {
        struct io_ring_ctx *ctx;
@@ -1197,15 +1305,17 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        mutex_init(&ctx->uring_lock);
        init_waitqueue_head(&ctx->cq_wait);
        spin_lock_init(&ctx->completion_lock);
+       spin_lock_init(&ctx->timeout_lock);
        INIT_LIST_HEAD(&ctx->iopoll_list);
        INIT_LIST_HEAD(&ctx->defer_list);
        INIT_LIST_HEAD(&ctx->timeout_list);
+       INIT_LIST_HEAD(&ctx->ltimeout_list);
        spin_lock_init(&ctx->rsrc_ref_lock);
        INIT_LIST_HEAD(&ctx->rsrc_ref_list);
        INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
        init_llist_head(&ctx->rsrc_put_llist);
        INIT_LIST_HEAD(&ctx->tctx_list);
-       INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
+       INIT_LIST_HEAD(&ctx->submit_state.free_list);
        INIT_LIST_HEAD(&ctx->locked_free_list);
        INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
        return ctx;
@@ -1235,6 +1345,20 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
        return false;
 }
 
+#define FFS_ASYNC_READ         0x1UL
+#define FFS_ASYNC_WRITE                0x2UL
+#ifdef CONFIG_64BIT
+#define FFS_ISREG              0x4UL
+#else
+#define FFS_ISREG              0x0UL
+#endif
+#define FFS_MASK               ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
+
+static inline bool io_req_ffs_set(struct io_kiocb *req)
+{
+       return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
+}
+
 static void io_req_track_inflight(struct io_kiocb *req)
 {
        if (!(req->flags & REQ_F_INFLIGHT)) {
@@ -1243,6 +1367,32 @@ static void io_req_track_inflight(struct io_kiocb *req)
        }
 }
 
+static inline void io_unprep_linked_timeout(struct io_kiocb *req)
+{
+       req->flags &= ~REQ_F_LINK_TIMEOUT;
+}
+
+static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
+{
+       if (WARN_ON_ONCE(!req->link))
+               return NULL;
+
+       req->flags &= ~REQ_F_ARM_LTIMEOUT;
+       req->flags |= REQ_F_LINK_TIMEOUT;
+
+       /* linked timeouts should have two refs once prep'ed */
+       io_req_set_refcount(req);
+       __io_req_set_refcount(req->link, 2);
+       return req->link;
+}
+
+static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
+{
+       if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
+               return NULL;
+       return __io_prep_linked_timeout(req);
+}
+
 static void io_prep_async_work(struct io_kiocb *req)
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
@@ -1282,22 +1432,25 @@ static void io_prep_async_link(struct io_kiocb *req)
        if (req->flags & REQ_F_LINK_TIMEOUT) {
                struct io_ring_ctx *ctx = req->ctx;
 
-               spin_lock_irq(&ctx->completion_lock);
+               spin_lock(&ctx->completion_lock);
                io_for_each_link(cur, req)
                        io_prep_async_work(cur);
-               spin_unlock_irq(&ctx->completion_lock);
+               spin_unlock(&ctx->completion_lock);
        } else {
                io_for_each_link(cur, req)
                        io_prep_async_work(cur);
        }
 }
 
-static void io_queue_async_work(struct io_kiocb *req)
+static void io_queue_async_work(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_kiocb *link = io_prep_linked_timeout(req);
        struct io_uring_task *tctx = req->task->io_uring;
 
+       /* must not take the lock, NULL it as a precaution */
+       locked = NULL;
+
        BUG_ON(!tctx);
        BUG_ON(!tctx->io_wq);
 
@@ -1323,6 +1476,7 @@ static void io_queue_async_work(struct io_kiocb *req)
 
 static void io_kill_timeout(struct io_kiocb *req, int status)
        __must_hold(&req->ctx->completion_lock)
+       __must_hold(&req->ctx->timeout_lock)
 {
        struct io_timeout_data *io = req->async_data;
 
@@ -1331,7 +1485,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
                        atomic_read(&req->ctx->cq_timeouts) + 1);
                list_del_init(&req->timeout.list);
                io_cqring_fill_event(req->ctx, req->user_data, status, 0);
-               io_put_req_deferred(req, 1);
+               io_put_req_deferred(req);
        }
 }
 
@@ -1350,9 +1504,11 @@ static void io_queue_deferred(struct io_ring_ctx *ctx)
 }
 
 static void io_flush_timeouts(struct io_ring_ctx *ctx)
+       __must_hold(&ctx->completion_lock)
 {
        u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
 
+       spin_lock_irq(&ctx->timeout_lock);
        while (!list_empty(&ctx->timeout_list)) {
                u32 events_needed, events_got;
                struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
@@ -1377,6 +1533,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
                io_kill_timeout(req, 0);
        }
        ctx->cq_last_tm_flush = seq;
+       spin_unlock_irq(&ctx->timeout_lock);
 }
 
 static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
@@ -1433,13 +1590,22 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
        return !ctx->eventfd_async || io_wq_current_is_worker();
 }
 
+/*
+ * This should only get called when at least one event has been posted.
+ * Some applications rely on the eventfd notification count only changing
+ * IFF a new CQE has been added to the CQ ring. There's no depedency on
+ * 1:1 relationship between how many times this function is called (and
+ * hence the eventfd count) and number of CQEs posted to the CQ ring.
+ */
 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
 {
-       /* see waitqueue_active() comment */
-       smp_mb();
-
-       if (waitqueue_active(&ctx->cq_wait))
-               wake_up(&ctx->cq_wait);
+       /*
+        * wake_up_all() may seem excessive, but io_wake_function() and
+        * io_should_wake() handle the termination of the loop and only
+        * wake as many waiters as we need to.
+        */
+       if (wq_has_sleeper(&ctx->cq_wait))
+               wake_up_all(&ctx->cq_wait);
        if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
@@ -1452,12 +1618,9 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
 
 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 {
-       /* see waitqueue_active() comment */
-       smp_mb();
-
        if (ctx->flags & IORING_SETUP_SQPOLL) {
-               if (waitqueue_active(&ctx->cq_wait))
-                       wake_up(&ctx->cq_wait);
+               if (wq_has_sleeper(&ctx->cq_wait))
+                       wake_up_all(&ctx->cq_wait);
        }
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
@@ -1470,14 +1633,13 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 /* Returns true if there are no backlogged entries after the flush */
 static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 {
-       unsigned long flags;
        bool all_flushed, posted;
 
        if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
                return false;
 
        posted = false;
-       spin_lock_irqsave(&ctx->completion_lock, flags);
+       spin_lock(&ctx->completion_lock);
        while (!list_empty(&ctx->cq_overflow_list)) {
                struct io_uring_cqe *cqe = io_get_cqe(ctx);
                struct io_overflow_cqe *ocqe;
@@ -1499,18 +1661,19 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
        all_flushed = list_empty(&ctx->cq_overflow_list);
        if (all_flushed) {
                clear_bit(0, &ctx->check_cq_overflow);
-               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
        }
 
        if (posted)
                io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       spin_unlock(&ctx->completion_lock);
        if (posted)
                io_cqring_ev_posted(ctx);
        return all_flushed;
 }
 
-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
+static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
 {
        bool ret = true;
 
@@ -1518,7 +1681,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
                /* iopoll syncs against uring_lock, not completion_lock */
                if (ctx->flags & IORING_SETUP_IOPOLL)
                        mutex_lock(&ctx->uring_lock);
-               ret = __io_cqring_overflow_flush(ctx, force);
+               ret = __io_cqring_overflow_flush(ctx, false);
                if (ctx->flags & IORING_SETUP_IOPOLL)
                        mutex_unlock(&ctx->uring_lock);
        }
@@ -1526,39 +1689,37 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
        return ret;
 }
 
-/*
- * Shamelessly stolen from the mm implementation of page reference checking,
- * see commit f958d7b528b1 for details.
- */
-#define req_ref_zero_or_close_to_overflow(req) \
-       ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
-
-static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
+/* must to be called somewhat shortly after putting a request */
+static inline void io_put_task(struct task_struct *task, int nr)
 {
-       return atomic_inc_not_zero(&req->refs);
-}
+       struct io_uring_task *tctx = task->io_uring;
 
-static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
-{
-       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
-       return atomic_sub_and_test(refs, &req->refs);
+       if (likely(task == current)) {
+               tctx->cached_refs += nr;
+       } else {
+               percpu_counter_sub(&tctx->inflight, nr);
+               if (unlikely(atomic_read(&tctx->in_idle)))
+                       wake_up(&tctx->wait);
+               put_task_struct_many(task, nr);
+       }
 }
 
-static inline bool req_ref_put_and_test(struct io_kiocb *req)
+static void io_task_refs_refill(struct io_uring_task *tctx)
 {
-       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
-       return atomic_dec_and_test(&req->refs);
-}
+       unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
 
-static inline void req_ref_put(struct io_kiocb *req)
-{
-       WARN_ON_ONCE(req_ref_put_and_test(req));
+       percpu_counter_add(&tctx->inflight, refill);
+       refcount_add(refill, &current->usage);
+       tctx->cached_refs += refill;
 }
 
-static inline void req_ref_get(struct io_kiocb *req)
+static inline void io_get_task_refs(int nr)
 {
-       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
-       atomic_inc(&req->refs);
+       struct io_uring_task *tctx = current->io_uring;
+
+       tctx->cached_refs -= nr;
+       if (unlikely(tctx->cached_refs < 0))
+               io_task_refs_refill(tctx);
 }
 
 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
@@ -1578,7 +1739,9 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        }
        if (list_empty(&ctx->cq_overflow_list)) {
                set_bit(0, &ctx->check_cq_overflow);
-               ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
+
        }
        ocqe->cqe.user_data = user_data;
        ocqe->cqe.res = res;
@@ -1620,9 +1783,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
                                 unsigned int cflags)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       unsigned long flags;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
+       spin_lock(&ctx->completion_lock);
        __io_cqring_fill_event(ctx, req->user_data, res, cflags);
        /*
         * If we're the last reference to this request, add to our locked
@@ -1630,7 +1792,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
         */
        if (req_ref_put_and_test(req)) {
                if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
-                       if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
+                       if (req->flags & IO_DISARM_MASK)
                                io_disarm_next(req);
                        if (req->link) {
                                io_req_task_queue(req->link);
@@ -1639,14 +1801,14 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
                }
                io_dismantle_req(req);
                io_put_task(req->task, 1);
-               list_add(&req->compl.list, &ctx->locked_free_list);
+               list_add(&req->inflight_entry, &ctx->locked_free_list);
                ctx->locked_free_nr++;
        } else {
                if (!percpu_ref_tryget(&ctx->refs))
                        req = NULL;
        }
        io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       spin_unlock(&ctx->completion_lock);
 
        if (req) {
                io_cqring_ev_posted(ctx);
@@ -1686,24 +1848,35 @@ static inline void io_req_complete(struct io_kiocb *req, long res)
 static void io_req_complete_failed(struct io_kiocb *req, long res)
 {
        req_set_fail(req);
-       io_put_req(req);
        io_req_complete_post(req, res, 0);
 }
 
+/*
+ * Don't initialise the fields below on every allocation, but do that in
+ * advance and keep them valid across allocations.
+ */
+static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
+{
+       req->ctx = ctx;
+       req->link = NULL;
+       req->async_data = NULL;
+       /* not necessary, but safer to zero */
+       req->result = 0;
+}
+
 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
-                                       struct io_comp_state *cs)
+                                       struct io_submit_state *state)
 {
-       spin_lock_irq(&ctx->completion_lock);
-       list_splice_init(&ctx->locked_free_list, &cs->free_list);
+       spin_lock(&ctx->completion_lock);
+       list_splice_init(&ctx->locked_free_list, &state->free_list);
        ctx->locked_free_nr = 0;
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
 }
 
 /* Returns true IFF there are requests in the cache */
 static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
 {
        struct io_submit_state *state = &ctx->submit_state;
-       struct io_comp_state *cs = &state->comp;
        int nr;
 
        /*
@@ -1712,14 +1885,14 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
         * side cache.
         */
        if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
-               io_flush_cached_locked_reqs(ctx, cs);
+               io_flush_cached_locked_reqs(ctx, state);
 
        nr = state->free_reqs;
-       while (!list_empty(&cs->free_list)) {
-               struct io_kiocb *req = list_first_entry(&cs->free_list,
-                                               struct io_kiocb, compl.list);
+       while (!list_empty(&state->free_list)) {
+               struct io_kiocb *req = list_first_entry(&state->free_list,
+                                       struct io_kiocb, inflight_entry);
 
-               list_del(&req->compl.list);
+               list_del(&req->inflight_entry);
                state->reqs[nr++] = req;
                if (nr == ARRAY_SIZE(state->reqs))
                        break;
@@ -1729,48 +1902,41 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
        return nr != 0;
 }
 
+/*
+ * A request might get retired back into the request caches even before opcode
+ * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
+ * Because of that, io_alloc_req() should be called only under ->uring_lock
+ * and with extra caution to not get a request that is still worked on.
+ */
 static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+       __must_hold(&ctx->uring_lock)
 {
        struct io_submit_state *state = &ctx->submit_state;
+       gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+       int ret, i;
 
        BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
 
-       if (!state->free_reqs) {
-               gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
-               int ret, i;
-
-               if (io_flush_cached_reqs(ctx))
-                       goto got_req;
+       if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
+               goto got_req;
 
-               ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
-                                           state->reqs);
+       ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
+                                   state->reqs);
 
-               /*
-                * Bulk alloc is all-or-nothing. If we fail to get a batch,
-                * retry single alloc to be on the safe side.
-                */
-               if (unlikely(ret <= 0)) {
-                       state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
-                       if (!state->reqs[0])
-                               return NULL;
-                       ret = 1;
-               }
-
-               /*
-                * Don't initialise the fields below on every allocation, but
-                * do that in advance and keep valid on free.
-                */
-               for (i = 0; i < ret; i++) {
-                       struct io_kiocb *req = state->reqs[i];
-
-                       req->ctx = ctx;
-                       req->link = NULL;
-                       req->async_data = NULL;
-                       /* not necessary, but safer to zero */
-                       req->result = 0;
-               }
-               state->free_reqs = ret;
+       /*
+        * Bulk alloc is all-or-nothing. If we fail to get a batch,
+        * retry single alloc to be on the safe side.
+        */
+       if (unlikely(ret <= 0)) {
+               state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
+               if (!state->reqs[0])
+                       return NULL;
+               ret = 1;
        }
+
+       for (i = 0; i < ret; i++)
+               io_preinit_req(state->reqs[i], ctx);
+       state->free_reqs = ret;
 got_req:
        state->free_reqs--;
        return state->reqs[state->free_reqs];
@@ -1798,17 +1964,6 @@ static void io_dismantle_req(struct io_kiocb *req)
        }
 }
 
-/* must to be called somewhat shortly after putting a request */
-static inline void io_put_task(struct task_struct *task, int nr)
-{
-       struct io_uring_task *tctx = task->io_uring;
-
-       percpu_counter_sub(&tctx->inflight, nr);
-       if (unlikely(atomic_read(&tctx->in_idle)))
-               wake_up(&tctx->wait);
-       put_task_struct_many(task, nr);
-}
-
 static void __io_free_req(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -1816,7 +1971,11 @@ static void __io_free_req(struct io_kiocb *req)
        io_dismantle_req(req);
        io_put_task(req->task, 1);
 
-       kmem_cache_free(req_cachep, req);
+       spin_lock(&ctx->completion_lock);
+       list_add(&req->inflight_entry, &ctx->locked_free_list);
+       ctx->locked_free_nr++;
+       spin_unlock(&ctx->completion_lock);
+
        percpu_ref_put(&ctx->refs);
 }
 
@@ -1830,22 +1989,20 @@ static inline void io_remove_next_linked(struct io_kiocb *req)
 
 static bool io_kill_linked_timeout(struct io_kiocb *req)
        __must_hold(&req->ctx->completion_lock)
+       __must_hold(&req->ctx->timeout_lock)
 {
        struct io_kiocb *link = req->link;
 
-       /*
-        * Can happen if a linked timeout fired and link had been like
-        * req -> link t-out -> link t-out [-> ...]
-        */
-       if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
+       if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
                struct io_timeout_data *io = link->async_data;
 
                io_remove_next_linked(req);
                link->timeout.head = NULL;
                if (hrtimer_try_to_cancel(&io->timer) != -1) {
+                       list_del(&link->timeout.list);
                        io_cqring_fill_event(link->ctx, link->user_data,
                                             -ECANCELED, 0);
-                       io_put_req_deferred(link, 1);
+                       io_put_req_deferred(link);
                        return true;
                }
        }
@@ -1859,12 +2016,17 @@ static void io_fail_links(struct io_kiocb *req)
 
        req->link = NULL;
        while (link) {
+               long res = -ECANCELED;
+
+               if (link->flags & REQ_F_FAIL)
+                       res = link->result;
+
                nxt = link->link;
                link->link = NULL;
 
                trace_io_uring_fail_link(req, link);
-               io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
-               io_put_req_deferred(link, 2);
+               io_cqring_fill_event(link->ctx, link->user_data, res, 0);
+               io_put_req_deferred(link);
                link = nxt;
        }
 }
@@ -1874,8 +2036,24 @@ static bool io_disarm_next(struct io_kiocb *req)
 {
        bool posted = false;
 
-       if (likely(req->flags & REQ_F_LINK_TIMEOUT))
+       if (req->flags & REQ_F_ARM_LTIMEOUT) {
+               struct io_kiocb *link = req->link;
+
+               req->flags &= ~REQ_F_ARM_LTIMEOUT;
+               if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
+                       io_remove_next_linked(req);
+                       io_cqring_fill_event(link->ctx, link->user_data,
+                                            -ECANCELED, 0);
+                       io_put_req_deferred(link);
+                       posted = true;
+               }
+       } else if (req->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = req->ctx;
+
+               spin_lock_irq(&ctx->timeout_lock);
                posted = io_kill_linked_timeout(req);
+               spin_unlock_irq(&ctx->timeout_lock);
+       }
        if (unlikely((req->flags & REQ_F_FAIL) &&
                     !(req->flags & REQ_F_HARDLINK))) {
                posted |= (req->link != NULL);
@@ -1894,16 +2072,15 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
         * dependencies to the next request. In case of failure, fail the rest
         * of the chain.
         */
-       if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
+       if (req->flags & IO_DISARM_MASK) {
                struct io_ring_ctx *ctx = req->ctx;
-               unsigned long flags;
                bool posted;
 
-               spin_lock_irqsave(&ctx->completion_lock, flags);
+               spin_lock(&ctx->completion_lock);
                posted = io_disarm_next(req);
                if (posted)
                        io_commit_cqring(req->ctx);
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
+               spin_unlock(&ctx->completion_lock);
                if (posted)
                        io_cqring_ev_posted(ctx);
        }
@@ -1919,20 +2096,22 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
        return __io_req_find_next(req);
 }
 
-static void ctx_flush_and_put(struct io_ring_ctx *ctx)
+static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
 {
        if (!ctx)
                return;
-       if (ctx->submit_state.comp.nr) {
-               mutex_lock(&ctx->uring_lock);
-               io_submit_flush_completions(ctx);
+       if (*locked) {
+               if (ctx->submit_state.compl_nr)
+                       io_submit_flush_completions(ctx);
                mutex_unlock(&ctx->uring_lock);
+               *locked = false;
        }
        percpu_ref_put(&ctx->refs);
 }
 
 static void tctx_task_work(struct callback_head *cb)
 {
+       bool locked = false;
        struct io_ring_ctx *ctx = NULL;
        struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
                                                  task_work);
@@ -1943,37 +2122,32 @@ static void tctx_task_work(struct callback_head *cb)
                spin_lock_irq(&tctx->task_lock);
                node = tctx->task_list.first;
                INIT_WQ_LIST(&tctx->task_list);
+               if (!node)
+                       tctx->task_running = false;
                spin_unlock_irq(&tctx->task_lock);
+               if (!node)
+                       break;
 
-               while (node) {
+               do {
                        struct io_wq_work_node *next = node->next;
                        struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                            io_task_work.node);
 
                        if (req->ctx != ctx) {
-                               ctx_flush_and_put(ctx);
+                               ctx_flush_and_put(ctx, &locked);
                                ctx = req->ctx;
+                               /* if not contended, grab and improve batching */
+                               locked = mutex_trylock(&ctx->uring_lock);
                                percpu_ref_get(&ctx->refs);
                        }
-                       req->io_task_work.func(req);
+                       req->io_task_work.func(req, &locked);
                        node = next;
-               }
-               if (wq_list_empty(&tctx->task_list)) {
-                       spin_lock_irq(&tctx->task_lock);
-                       clear_bit(0, &tctx->task_state);
-                       if (wq_list_empty(&tctx->task_list)) {
-                               spin_unlock_irq(&tctx->task_lock);
-                               break;
-                       }
-                       spin_unlock_irq(&tctx->task_lock);
-                       /* another tctx_task_work() is enqueued, yield */
-                       if (test_and_set_bit(0, &tctx->task_state))
-                               break;
-               }
+               } while (node);
+
                cond_resched();
        }
 
-       ctx_flush_and_put(ctx);
+       ctx_flush_and_put(ctx, &locked);
 }
 
 static void io_req_task_work_add(struct io_kiocb *req)
@@ -1983,19 +2157,20 @@ static void io_req_task_work_add(struct io_kiocb *req)
        enum task_work_notify_mode notify;
        struct io_wq_work_node *node;
        unsigned long flags;
+       bool running;
 
        WARN_ON_ONCE(!tctx);
 
        spin_lock_irqsave(&tctx->task_lock, flags);
        wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
+       running = tctx->task_running;
+       if (!running)
+               tctx->task_running = true;
        spin_unlock_irqrestore(&tctx->task_lock, flags);
 
        /* task_work already pending, we're done */
-       if (test_bit(0, &tctx->task_state) ||
-           test_and_set_bit(0, &tctx->task_state))
+       if (running)
                return;
-       if (unlikely(tsk->flags & PF_EXITING))
-               goto fail;
 
        /*
         * SQPOLL kernel thread doesn't need notification, just a wakeup. For
@@ -2008,9 +2183,9 @@ static void io_req_task_work_add(struct io_kiocb *req)
                wake_up_process(tsk);
                return;
        }
-fail:
-       clear_bit(0, &tctx->task_state);
+
        spin_lock_irqsave(&tctx->task_lock, flags);
+       tctx->task_running = false;
        node = tctx->task_list.first;
        INIT_WQ_LIST(&tctx->task_list);
        spin_unlock_irqrestore(&tctx->task_lock, flags);
@@ -2024,27 +2199,25 @@ fail:
        }
 }
 
-static void io_req_task_cancel(struct io_kiocb *req)
+static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       /* ctx is guaranteed to stay alive while we hold uring_lock */
-       mutex_lock(&ctx->uring_lock);
+       /* not needed for normal modes, but SQPOLL depends on it */
+       io_tw_lock(ctx, locked);
        io_req_complete_failed(req, req->result);
-       mutex_unlock(&ctx->uring_lock);
 }
 
-static void io_req_task_submit(struct io_kiocb *req)
+static void io_req_task_submit(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
-       mutex_lock(&ctx->uring_lock);
-       if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
+       io_tw_lock(ctx, locked);
+       /* req->task == current here, checking PF_EXITING is safe */
+       if (likely(!(req->task->flags & PF_EXITING)))
                __io_queue_sqe(req);
        else
                io_req_complete_failed(req, -EFAULT);
-       mutex_unlock(&ctx->uring_lock);
 }
 
 static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
@@ -2080,6 +2253,11 @@ static void io_free_req(struct io_kiocb *req)
        __io_free_req(req);
 }
 
+static void io_free_req_work(struct io_kiocb *req, bool *locked)
+{
+       io_free_req(req);
+}
+
 struct req_batch {
        struct task_struct      *task;
        int                     task_refs;
@@ -2096,10 +2274,10 @@ static inline void io_init_req_batch(struct req_batch *rb)
 static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
                                     struct req_batch *rb)
 {
-       if (rb->task)
-               io_put_task(rb->task, rb->task_refs);
        if (rb->ctx_refs)
                percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
+       if (rb->task)
+               io_put_task(rb->task, rb->task_refs);
 }
 
 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
@@ -2120,37 +2298,37 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
        if (state->free_reqs != ARRAY_SIZE(state->reqs))
                state->reqs[state->free_reqs++] = req;
        else
-               list_add(&req->compl.list, &state->comp.free_list);
+               list_add(&req->inflight_entry, &state->free_list);
 }
 
 static void io_submit_flush_completions(struct io_ring_ctx *ctx)
+       __must_hold(&ctx->uring_lock)
 {
-       struct io_comp_state *cs = &ctx->submit_state.comp;
-       int i, nr = cs->nr;
+       struct io_submit_state *state = &ctx->submit_state;
+       int i, nr = state->compl_nr;
        struct req_batch rb;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        for (i = 0; i < nr; i++) {
-               struct io_kiocb *req = cs->reqs[i];
+               struct io_kiocb *req = state->compl_reqs[i];
 
                __io_cqring_fill_event(ctx, req->user_data, req->result,
                                        req->compl.cflags);
        }
        io_commit_cqring(ctx);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
        io_cqring_ev_posted(ctx);
 
        io_init_req_batch(&rb);
        for (i = 0; i < nr; i++) {
-               struct io_kiocb *req = cs->reqs[i];
+               struct io_kiocb *req = state->compl_reqs[i];
 
-               /* submission and completion refs */
-               if (req_ref_sub_and_test(req, 2))
+               if (req_ref_put_and_test(req))
                        io_req_free_batch(&rb, req, &ctx->submit_state);
        }
 
        io_req_free_batch_finish(ctx, &rb);
-       cs->nr = 0;
+       state->compl_nr = 0;
 }
 
 /*
@@ -2174,16 +2352,12 @@ static inline void io_put_req(struct io_kiocb *req)
                io_free_req(req);
 }
 
-static void io_free_req_deferred(struct io_kiocb *req)
-{
-       req->io_task_work.func = io_free_req;
-       io_req_task_work_add(req);
-}
-
-static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+static inline void io_put_req_deferred(struct io_kiocb *req)
 {
-       if (req_ref_sub_and_test(req, refs))
-               io_free_req_deferred(req);
+       if (req_ref_put_and_test(req)) {
+               req->io_task_work.func = io_free_req_work;
+               io_req_task_work_add(req);
+       }
 }
 
 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
@@ -2216,15 +2390,17 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
 {
        struct io_buffer *kbuf;
 
+       if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
+               return 0;
        kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
        return io_put_kbuf(req, kbuf);
 }
 
 static inline bool io_run_task_work(void)
 {
-       if (current->task_works) {
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
                __set_current_state(TASK_RUNNING);
-               task_work_run();
+               tracehook_notify_signal();
                return true;
        }
 
@@ -2235,7 +2411,7 @@ static inline bool io_run_task_work(void)
  * Find and free completed poll iocbs
  */
 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                              struct list_head *done, bool resubmit)
+                              struct list_head *done)
 {
        struct req_batch rb;
        struct io_kiocb *req;
@@ -2245,23 +2421,18 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 
        io_init_req_batch(&rb);
        while (!list_empty(done)) {
-               int cflags = 0;
-
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
+               if (READ_ONCE(req->result) == -EAGAIN &&
                    !(req->flags & REQ_F_DONT_REISSUE)) {
                        req->iopoll_completed = 0;
-                       req_ref_get(req);
                        io_req_task_queue_reissue(req);
                        continue;
                }
 
-               if (req->flags & REQ_F_BUFFER_SELECTED)
-                       cflags = io_put_rw_kbuf(req);
-
-               __io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
+               __io_cqring_fill_event(ctx, req->user_data, req->result,
+                                       io_put_rw_kbuf(req));
                (*nr_events)++;
 
                if (req_ref_put_and_test(req))
@@ -2274,12 +2445,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                       long min, bool resubmit)
+                       long min)
 {
        struct io_kiocb *req, *tmp;
        LIST_HEAD(done);
        bool spin;
-       int ret;
 
        /*
         * Only spin for completions if we don't have multiple devices hanging
@@ -2287,9 +2457,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
         */
        spin = !ctx->poll_multi_queue && *nr_events < min;
 
-       ret = 0;
        list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
                struct kiocb *kiocb = &req->rw.kiocb;
+               int ret;
 
                /*
                 * Move completed and retryable entries to our local lists.
@@ -2304,22 +2474,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
                        break;
 
                ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
-               if (ret < 0)
-                       break;
+               if (unlikely(ret < 0))
+                       return ret;
+               else if (ret)
+                       spin = false;
 
                /* iopoll may have completed current req */
                if (READ_ONCE(req->iopoll_completed))
                        list_move_tail(&req->inflight_entry, &done);
-
-               if (ret && spin)
-                       spin = false;
-               ret = 0;
        }
 
        if (!list_empty(&done))
-               io_iopoll_complete(ctx, nr_events, &done, resubmit);
+               io_iopoll_complete(ctx, nr_events, &done);
 
-       return ret;
+       return 0;
 }
 
 /*
@@ -2335,7 +2503,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
        while (!list_empty(&ctx->iopoll_list)) {
                unsigned int nr_events = 0;
 
-               io_do_iopoll(ctx, &nr_events, 0, false);
+               io_do_iopoll(ctx, &nr_events, 0);
 
                /* let it sleep and repeat later if can't complete a request */
                if (nr_events == 0)
@@ -2397,7 +2565,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                            list_empty(&ctx->iopoll_list))
                                break;
                }
-               ret = io_do_iopoll(ctx, &nr_events, min, true);
+               ret = io_do_iopoll(ctx, &nr_events, min);
        } while (!ret && nr_events < min && !need_resched());
 out:
        mutex_unlock(&ctx->uring_lock);
@@ -2466,42 +2634,57 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
 }
 #endif
 
-static void io_fallback_req_func(struct work_struct *work)
-{
-       struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
-                                               fallback_work.work);
-       struct llist_node *node = llist_del_all(&ctx->fallback_llist);
-       struct io_kiocb *req, *tmp;
-
-       llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
-               req->io_task_work.func(req);
-}
-
-static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
-                            unsigned int issue_flags)
+static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 {
-       int cflags = 0;
-
        if (req->rw.kiocb.ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
        if (res != req->result) {
                if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
                    io_rw_should_reissue(req)) {
                        req->flags |= REQ_F_REISSUE;
-                       return;
+                       return true;
                }
                req_set_fail(req);
+               req->result = res;
        }
-       if (req->flags & REQ_F_BUFFER_SELECTED)
-               cflags = io_put_rw_kbuf(req);
-       __io_req_complete(req, issue_flags, res, cflags);
+       return false;
+}
+
+static void io_req_task_complete(struct io_kiocb *req, bool *locked)
+{
+       unsigned int cflags = io_put_rw_kbuf(req);
+       long res = req->result;
+
+       if (*locked) {
+               struct io_ring_ctx *ctx = req->ctx;
+               struct io_submit_state *state = &ctx->submit_state;
+
+               io_req_complete_state(req, res, cflags);
+               state->compl_reqs[state->compl_nr++] = req;
+               if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
+                       io_submit_flush_completions(ctx);
+       } else {
+               io_req_complete_post(req, res, cflags);
+       }
+}
+
+static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
+                            unsigned int issue_flags)
+{
+       if (__io_complete_rw_common(req, res))
+               return;
+       __io_req_complete(req, 0, req->result, io_put_rw_kbuf(req));
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
-       __io_complete_rw(req, res, res2, 0);
+       if (__io_complete_rw_common(req, res))
+               return;
+       req->result = res;
+       req->io_task_work.func = io_req_task_complete;
+       io_req_task_work_add(req);
 }
 
 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -2587,40 +2770,6 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
        }
 }
 
-static inline void io_state_file_put(struct io_submit_state *state)
-{
-       if (state->file_refs) {
-               fput_many(state->file, state->file_refs);
-               state->file_refs = 0;
-       }
-}
-
-/*
- * Get as many references to a file as we have IOs left in this submission,
- * assuming most submissions are for one file, or at least that each file
- * has more than one submission.
- */
-static struct file *__io_file_get(struct io_submit_state *state, int fd)
-{
-       if (!state)
-               return fget(fd);
-
-       if (state->file_refs) {
-               if (state->fd == fd) {
-                       state->file_refs--;
-                       return state->file;
-               }
-               io_state_file_put(state);
-       }
-       state->file = fget_many(fd, state->ios_left);
-       if (unlikely(!state->file))
-               return NULL;
-
-       state->fd = fd;
-       state->file_refs = state->ios_left - 1;
-       return state->file;
-}
-
 static bool io_bdev_nowait(struct block_device *bdev)
 {
        return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
@@ -2631,7 +2780,7 @@ static bool io_bdev_nowait(struct block_device *bdev)
  * any file. For now, just ensure that anything potentially problematic is done
  * inline.
  */
-static bool __io_file_supports_async(struct file *file, int rw)
+static bool __io_file_supports_nowait(struct file *file, int rw)
 {
        umode_t mode = file_inode(file)->i_mode;
 
@@ -2664,14 +2813,14 @@ static bool __io_file_supports_async(struct file *file, int rw)
        return file->f_op->write_iter != NULL;
 }
 
-static bool io_file_supports_async(struct io_kiocb *req, int rw)
+static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
 {
-       if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
+       if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
                return true;
-       else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
+       else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
                return true;
 
-       return __io_file_supports_async(req->file, rw);
+       return __io_file_supports_nowait(req->file, rw);
 }
 
 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -2682,7 +2831,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        unsigned ioprio;
        int ret;
 
-       if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
+       if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
                req->flags |= REQ_F_ISREG;
 
        kiocb->ki_pos = READ_ONCE(sqe->off);
@@ -2715,7 +2864,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                    !kiocb->ki_filp->f_op->iopoll)
                        return -EOPNOTSUPP;
 
-               kiocb->ki_flags |= IOCB_HIPRI;
+               kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
                kiocb->ki_complete = io_complete_rw_iopoll;
                req->iopoll_completed = 0;
        } else {
@@ -2782,15 +2931,11 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
        if (check_reissue && (req->flags & REQ_F_REISSUE)) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
-                       req_ref_get(req);
                        io_req_task_queue_reissue(req);
                } else {
-                       int cflags = 0;
-
                        req_set_fail(req);
-                       if (req->flags & REQ_F_BUFFER_SELECTED)
-                               cflags = io_put_rw_kbuf(req);
-                       __io_req_complete(req, issue_flags, ret, cflags);
+                       __io_req_complete(req, issue_flags, ret,
+                                         io_put_rw_kbuf(req));
                }
        }
 }
@@ -3208,9 +3353,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 
        req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
        list_del_init(&wait->entry);
-
-       /* submit ref gets dropped, acquire a new one */
-       req_ref_get(req);
        io_req_task_queue(req);
        return 1;
 }
@@ -3295,7 +3437,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                kiocb->ki_flags |= IOCB_NOWAIT;
 
        /* If the file doesn't support async, just async punt */
-       if (force_nonblock && !io_file_supports_async(req, READ)) {
+       if (force_nonblock && !io_file_supports_nowait(req, READ)) {
                ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
                return ret ?: -EAGAIN;
        }
@@ -3400,7 +3542,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
                kiocb->ki_flags |= IOCB_NOWAIT;
 
        /* If the file doesn't support async, just async punt */
-       if (force_nonblock && !io_file_supports_async(req, WRITE))
+       if (force_nonblock && !io_file_supports_nowait(req, WRITE))
                goto copy_iov;
 
        /* file path doesn't support NOWAIT for non-direct_IO */
@@ -3433,83 +3575,224 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        else
                ret2 = -EINVAL;
 
-       if (req->flags & REQ_F_REISSUE) {
-               req->flags &= ~REQ_F_REISSUE;
-               ret2 = -EAGAIN;
-       }
+       if (req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               ret2 = -EAGAIN;
+       }
+
+       /*
+        * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
+        * retry them without IOCB_NOWAIT.
+        */
+       if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
+               ret2 = -EAGAIN;
+       /* no retry on NONBLOCK nor RWF_NOWAIT */
+       if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
+               goto done;
+       if (!force_nonblock || ret2 != -EAGAIN) {
+               /* IOPOLL retry should happen for io-wq threads */
+               if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
+                       goto copy_iov;
+done:
+               kiocb_done(kiocb, ret2, issue_flags);
+       } else {
+copy_iov:
+               /* some cases will consume bytes even on error returns */
+               iov_iter_revert(iter, io_size - iov_iter_count(iter));
+               ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
+               return ret ?: -EAGAIN;
+       }
+out_free:
+       /* it's reportedly faster than delegating the null check to kfree() */
+       if (iovec)
+               kfree(iovec);
+       return ret;
+}
+
+static int io_renameat_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_rename *ren = &req->rename;
+       const char __user *oldf, *newf;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
+               return -EBADF;
+
+       ren->old_dfd = READ_ONCE(sqe->fd);
+       oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+       ren->new_dfd = READ_ONCE(sqe->len);
+       ren->flags = READ_ONCE(sqe->rename_flags);
+
+       ren->oldpath = getname(oldf);
+       if (IS_ERR(ren->oldpath))
+               return PTR_ERR(ren->oldpath);
+
+       ren->newpath = getname(newf);
+       if (IS_ERR(ren->newpath)) {
+               putname(ren->oldpath);
+               return PTR_ERR(ren->newpath);
+       }
+
+       req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
+
+static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
+{
+       struct io_rename *ren = &req->rename;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
+                               ren->newpath, ren->flags);
+
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+}
+
+static int io_unlinkat_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_unlink *un = &req->unlink;
+       const char __user *fname;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
+           sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
+               return -EBADF;
+
+       un->dfd = READ_ONCE(sqe->fd);
+
+       un->flags = READ_ONCE(sqe->unlink_flags);
+       if (un->flags & ~AT_REMOVEDIR)
+               return -EINVAL;
+
+       fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       un->filename = getname(fname);
+       if (IS_ERR(un->filename))
+               return PTR_ERR(un->filename);
+
+       req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
+
+static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
+{
+       struct io_unlink *un = &req->unlink;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       if (un->flags & AT_REMOVEDIR)
+               ret = do_rmdir(un->dfd, un->filename);
+       else
+               ret = do_unlinkat(un->dfd, un->filename);
+
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+}
+
+static int io_mkdirat_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_mkdir *mkd = &req->mkdir;
+       const char __user *fname;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
+           sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
+               return -EBADF;
+
+       mkd->dfd = READ_ONCE(sqe->fd);
+       mkd->mode = READ_ONCE(sqe->len);
+
+       fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       mkd->filename = getname(fname);
+       if (IS_ERR(mkd->filename))
+               return PTR_ERR(mkd->filename);
+
+       req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
+
+static int io_mkdirat(struct io_kiocb *req, int issue_flags)
+{
+       struct io_mkdir *mkd = &req->mkdir;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
 
-       /*
-        * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
-        * retry them without IOCB_NOWAIT.
-        */
-       if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
-               ret2 = -EAGAIN;
-       /* no retry on NONBLOCK nor RWF_NOWAIT */
-       if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
-               goto done;
-       if (!force_nonblock || ret2 != -EAGAIN) {
-               /* IOPOLL retry should happen for io-wq threads */
-               if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
-                       goto copy_iov;
-done:
-               kiocb_done(kiocb, ret2, issue_flags);
-       } else {
-copy_iov:
-               /* some cases will consume bytes even on error returns */
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
-               ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
-               return ret ?: -EAGAIN;
-       }
-out_free:
-       /* it's reportedly faster than delegating the null check to kfree() */
-       if (iovec)
-               kfree(iovec);
-       return ret;
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
 }
 
-static int io_renameat_prep(struct io_kiocb *req,
+static int io_symlinkat_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
-       struct io_rename *ren = &req->rename;
-       const char __user *oldf, *newf;
+       struct io_symlink *sl = &req->symlink;
+       const char __user *oldpath, *newpath;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
+           sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
 
-       ren->old_dfd = READ_ONCE(sqe->fd);
-       oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
-       newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
-       ren->new_dfd = READ_ONCE(sqe->len);
-       ren->flags = READ_ONCE(sqe->rename_flags);
+       sl->new_dfd = READ_ONCE(sqe->fd);
+       oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 
-       ren->oldpath = getname(oldf);
-       if (IS_ERR(ren->oldpath))
-               return PTR_ERR(ren->oldpath);
+       sl->oldpath = getname(oldpath);
+       if (IS_ERR(sl->oldpath))
+               return PTR_ERR(sl->oldpath);
 
-       ren->newpath = getname(newf);
-       if (IS_ERR(ren->newpath)) {
-               putname(ren->oldpath);
-               return PTR_ERR(ren->newpath);
+       sl->newpath = getname(newpath);
+       if (IS_ERR(sl->newpath)) {
+               putname(sl->oldpath);
+               return PTR_ERR(sl->newpath);
        }
 
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
 
-static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
+static int io_symlinkat(struct io_kiocb *req, int issue_flags)
 {
-       struct io_rename *ren = &req->rename;
+       struct io_symlink *sl = &req->symlink;
        int ret;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
-       ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
-                               ren->newpath, ren->flags);
+       ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
 
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < 0)
@@ -3518,46 +3801,49 @@ static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
        return 0;
 }
 
-static int io_unlinkat_prep(struct io_kiocb *req,
+static int io_linkat_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
-       struct io_unlink *un = &req->unlink;
-       const char __user *fname;
+       struct io_hardlink *lnk = &req->hardlink;
+       const char __user *oldf, *newf;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
+       if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
 
-       un->dfd = READ_ONCE(sqe->fd);
+       lnk->old_dfd = READ_ONCE(sqe->fd);
+       lnk->new_dfd = READ_ONCE(sqe->len);
+       oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+       lnk->flags = READ_ONCE(sqe->hardlink_flags);
 
-       un->flags = READ_ONCE(sqe->unlink_flags);
-       if (un->flags & ~AT_REMOVEDIR)
-               return -EINVAL;
+       lnk->oldpath = getname(oldf);
+       if (IS_ERR(lnk->oldpath))
+               return PTR_ERR(lnk->oldpath);
 
-       fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
-       un->filename = getname(fname);
-       if (IS_ERR(un->filename))
-               return PTR_ERR(un->filename);
+       lnk->newpath = getname(newf);
+       if (IS_ERR(lnk->newpath)) {
+               putname(lnk->oldpath);
+               return PTR_ERR(lnk->newpath);
+       }
 
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
 
-static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
+static int io_linkat(struct io_kiocb *req, int issue_flags)
 {
-       struct io_unlink *un = &req->unlink;
+       struct io_hardlink *lnk = &req->hardlink;
        int ret;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
-       if (un->flags & AT_REMOVEDIR)
-               ret = do_rmdir(un->dfd, un->filename);
-       else
-               ret = do_unlinkat(un->dfd, un->filename);
+       ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
+                               lnk->newpath, lnk->flags);
 
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < 0)
@@ -3572,8 +3858,8 @@ static int io_shutdown_prep(struct io_kiocb *req,
 #if defined(CONFIG_NET)
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
-           sqe->buf_index)
+       if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
+                    sqe->buf_index || sqe->splice_fd_in))
                return -EINVAL;
 
        req->shutdown.how = READ_ONCE(sqe->len);
@@ -3622,7 +3908,7 @@ static int __io_splice_prep(struct io_kiocb *req,
        if (unlikely(sp->flags & ~valid_flags))
                return -EINVAL;
 
-       sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
+       sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
                                  (sp->flags & SPLICE_F_FD_IN_FIXED));
        if (!sp->file_in)
                return -EBADF;
@@ -3721,7 +4007,8 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
+       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
+                    sqe->splice_fd_in))
                return -EINVAL;
 
        req->sync.flags = READ_ONCE(sqe->fsync_flags);
@@ -3754,7 +4041,8 @@ static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
 static int io_fallocate_prep(struct io_kiocb *req,
                             const struct io_uring_sqe *sqe)
 {
-       if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
+       if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
+           sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -3785,6 +4073,8 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
        const char __user *fname;
        int ret;
 
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
        if (unlikely(sqe->ioprio || sqe->buf_index))
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
@@ -3802,6 +4092,11 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
                req->open.filename = NULL;
                return ret;
        }
+
+       req->open.file_slot = READ_ONCE(sqe->file_index);
+       if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
+               return -EINVAL;
+
        req->open.nofile = rlimit(RLIMIT_NOFILE);
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
@@ -3809,12 +4104,9 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
 
 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       u64 flags, mode;
+       u64 mode = READ_ONCE(sqe->len);
+       u64 flags = READ_ONCE(sqe->open_flags);
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       mode = READ_ONCE(sqe->len);
-       flags = READ_ONCE(sqe->open_flags);
        req->open.how = build_open_how(flags, mode);
        return __io_openat_prep(req, sqe);
 }
@@ -3825,8 +4117,6 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        size_t len;
        int ret;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
        how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        len = READ_ONCE(sqe->len);
        if (len < OPEN_HOW_SIZE_VER0)
@@ -3844,8 +4134,8 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct open_flags op;
        struct file *file;
-       bool nonblock_set;
-       bool resolve_nonblock;
+       bool resolve_nonblock, nonblock_set;
+       bool fixed = !!req->open.file_slot;
        int ret;
 
        ret = build_open_flags(&req->open.how, &op);
@@ -3864,9 +4154,11 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
                op.open_flag |= O_NONBLOCK;
        }
 
-       ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
-       if (ret < 0)
-               goto err;
+       if (!fixed) {
+               ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
+               if (ret < 0)
+                       goto err;
+       }
 
        file = do_filp_open(req->open.dfd, req->open.filename, &op);
        if (IS_ERR(file)) {
@@ -3875,7 +4167,8 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
                 * marginal gain for something that is now known to be a slower
                 * path. So just put it, and we'll get a new one when we retry.
                 */
-               put_unused_fd(ret);
+               if (!fixed)
+                       put_unused_fd(ret);
 
                ret = PTR_ERR(file);
                /* only retry if RESOLVE_CACHED wasn't already set by application */
@@ -3888,7 +4181,12 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
        if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
                file->f_flags &= ~O_NONBLOCK;
        fsnotify_open(file);
-       fd_install(ret, file);
+
+       if (!fixed)
+               fd_install(ret, file);
+       else
+               ret = io_install_fixed_file(req, file, issue_flags,
+                                           req->open.file_slot - 1);
 err:
        putname(req->open.filename);
        req->flags &= ~REQ_F_NEED_CLEANUP;
@@ -3909,7 +4207,8 @@ static int io_remove_buffers_prep(struct io_kiocb *req,
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
-       if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
+       if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
+           sqe->splice_fd_in)
                return -EINVAL;
 
        tmp = READ_ONCE(sqe->fd);
@@ -3980,7 +4279,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
-       if (sqe->ioprio || sqe->rw_flags)
+       if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        tmp = READ_ONCE(sqe->fd);
@@ -4067,7 +4366,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
                             const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_EPOLL)
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4113,7 +4412,7 @@ static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
-       if (sqe->ioprio || sqe->buf_index || sqe->off)
+       if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4148,7 +4447,7 @@ static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
 
 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (sqe->ioprio || sqe->buf_index || sqe->addr)
+       if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4186,7 +4485,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
@@ -4222,7 +4521,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
-           sqe->rw_flags || sqe->buf_index)
+           sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
@@ -4283,7 +4582,8 @@ static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
+       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
+                    sqe->splice_fd_in))
                return -EINVAL;
 
        req->sync.off = READ_ONCE(sqe->off);
@@ -4717,6 +5017,15 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
        accept->nofile = rlimit(RLIMIT_NOFILE);
+
+       accept->file_slot = READ_ONCE(sqe->file_index);
+       if (accept->file_slot && ((req->open.how.flags & O_CLOEXEC) ||
+                                 (accept->flags & SOCK_CLOEXEC)))
+               return -EINVAL;
+       if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+               return -EINVAL;
+       if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
+               accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
        return 0;
 }
 
@@ -4725,20 +5034,35 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
        struct io_accept *accept = &req->accept;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
        unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
-       int ret;
+       bool fixed = !!accept->file_slot;
+       struct file *file;
+       int ret, fd;
 
        if (req->file->f_flags & O_NONBLOCK)
                req->flags |= REQ_F_NOWAIT;
 
-       ret = __sys_accept4_file(req->file, file_flags, accept->addr,
-                                       accept->addr_len, accept->flags,
-                                       accept->nofile);
-       if (ret == -EAGAIN && force_nonblock)
-               return -EAGAIN;
-       if (ret < 0) {
+       if (!fixed) {
+               fd = __get_unused_fd_flags(accept->flags, accept->nofile);
+               if (unlikely(fd < 0))
+                       return fd;
+       }
+       file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
+                        accept->flags);
+       if (IS_ERR(file)) {
+               if (!fixed)
+                       put_unused_fd(fd);
+               ret = PTR_ERR(file);
+               if (ret == -EAGAIN && force_nonblock)
+                       return -EAGAIN;
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
                req_set_fail(req);
+       } else if (!fixed) {
+               fd_install(fd, file);
+               ret = fd;
+       } else {
+               ret = io_install_fixed_file(req, file, issue_flags,
+                                           accept->file_slot - 1);
        }
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
@@ -4758,7 +5082,8 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
+           sqe->splice_fd_in)
                return -EINVAL;
 
        conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
@@ -4871,6 +5196,7 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
+       /* req->task == current here, checking PF_EXITING is safe */
        if (unlikely(req->task->flags & PF_EXITING))
                WRITE_ONCE(poll->canceled, true);
 
@@ -4880,7 +5206,7 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
                req->result = vfs_poll(req->file, &pt) & poll->events;
        }
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        if (!req->result && !READ_ONCE(poll->canceled)) {
                add_wait_queue(poll->head, &poll->wait);
                return true;
@@ -4914,12 +5240,12 @@ static void io_poll_remove_double(struct io_kiocb *req)
        if (poll && poll->head) {
                struct wait_queue_head *head = poll->head;
 
-               spin_lock(&head->lock);
+               spin_lock_irq(&head->lock);
                list_del_init(&poll->wait.entry);
                if (poll->wait.private)
                        req_ref_put(req);
                poll->head = NULL;
-               spin_unlock(&head->lock);
+               spin_unlock_irq(&head->lock);
        }
 }
 
@@ -4949,13 +5275,13 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
        return !(flags & IORING_CQE_F_MORE);
 }
 
-static void io_poll_task_func(struct io_kiocb *req)
+static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_kiocb *nxt;
 
        if (io_poll_rewait(req, &req->poll)) {
-               spin_unlock_irq(&ctx->completion_lock);
+               spin_unlock(&ctx->completion_lock);
        } else {
                bool done;
 
@@ -4967,13 +5293,13 @@ static void io_poll_task_func(struct io_kiocb *req)
                        req->result = 0;
                        add_wait_queue(req->poll.head, &req->poll.wait);
                }
-               spin_unlock_irq(&ctx->completion_lock);
+               spin_unlock(&ctx->completion_lock);
                io_cqring_ev_posted(ctx);
 
                if (done) {
                        nxt = io_put_req_find_next(req);
                        if (nxt)
-                               io_req_task_submit(nxt);
+                               io_req_task_submit(nxt, locked);
                }
        }
 }
@@ -4984,6 +5310,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
        struct io_kiocb *req = wait->private;
        struct io_poll_iocb *poll = io_poll_get_single(req);
        __poll_t mask = key_to_poll(key);
+       unsigned long flags;
 
        /* for instances that support it check for an event match first: */
        if (mask && !(mask & poll->events))
@@ -4996,13 +5323,13 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
        if (poll->head) {
                bool done;
 
-               spin_lock(&poll->head->lock);
+               spin_lock_irqsave(&poll->head->lock, flags);
                done = list_empty(&poll->wait.entry);
                if (!done)
                        list_del_init(&poll->wait.entry);
                /* make sure double remove sees this as being gone */
                wait->private = NULL;
-               spin_unlock(&poll->head->lock);
+               spin_unlock_irqrestore(&poll->head->lock, flags);
                if (!done) {
                        /* use wait func handler, so it matches the rq type */
                        poll->wait.func(&poll->wait, mode, sync, key);
@@ -5039,8 +5366,13 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
        if (unlikely(pt->nr_entries)) {
                struct io_poll_iocb *poll_one = poll;
 
+               /* double add on the same waitqueue head, ignore */
+               if (poll_one->head == head)
+                       return;
                /* already have a 2nd entry, fail a third attempt */
                if (*poll_ptr) {
+                       if ((*poll_ptr)->head == head)
+                               return;
                        pt->error = -EINVAL;
                        return;
                }
@@ -5050,9 +5382,6 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                 */
                if (!(poll_one->events & EPOLLONESHOT))
                        poll_one->events |= EPOLLONESHOT;
-               /* double add on the same waitqueue head, ignore */
-               if (poll_one->head == head)
-                       return;
                poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
                if (!poll) {
                        pt->error = -ENOMEM;
@@ -5082,7 +5411,7 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
        __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
 }
 
-static void io_async_task_func(struct io_kiocb *req)
+static void io_async_task_func(struct io_kiocb *req, bool *locked)
 {
        struct async_poll *apoll = req->apoll;
        struct io_ring_ctx *ctx = req->ctx;
@@ -5090,16 +5419,16 @@ static void io_async_task_func(struct io_kiocb *req)
        trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
 
        if (io_poll_rewait(req, &apoll->poll)) {
-               spin_unlock_irq(&ctx->completion_lock);
+               spin_unlock(&ctx->completion_lock);
                return;
        }
 
        hash_del(&req->hash_node);
        io_poll_remove_double(req);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
 
        if (!READ_ONCE(apoll->poll.canceled))
-               io_req_task_submit(req);
+               io_req_task_submit(req, locked);
        else
                io_req_complete_failed(req, -ECANCELED);
 }
@@ -5148,11 +5477,11 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
        if (unlikely(!ipt->nr_entries) && !ipt->error)
                ipt->error = -EINVAL;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
                io_poll_remove_double(req);
        if (likely(poll->head)) {
-               spin_lock(&poll->head->lock);
+               spin_lock_irq(&poll->head->lock);
                if (unlikely(list_empty(&poll->wait.entry))) {
                        if (ipt->error)
                                cancel = true;
@@ -5165,7 +5494,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
                        WRITE_ONCE(poll->canceled, true);
                else if (!poll->done) /* actually waiting for an event */
                        io_poll_req_insert(req);
-               spin_unlock(&poll->head->lock);
+               spin_unlock_irq(&poll->head->lock);
        }
 
        return mask;
@@ -5207,7 +5536,7 @@ static int io_arm_poll_handler(struct io_kiocb *req)
        }
 
        /* if we can't nonblock try, then no point in arming a poll handler */
-       if (!io_file_supports_async(req, rw))
+       if (!io_file_supports_nowait(req, rw))
                return IO_APOLL_ABORTED;
 
        apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
@@ -5217,16 +5546,14 @@ static int io_arm_poll_handler(struct io_kiocb *req)
        req->apoll = apoll;
        req->flags |= REQ_F_POLLED;
        ipt.pt._qproc = io_async_queue_proc;
+       io_req_set_refcount(req);
 
        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
                                        io_async_wake);
-       if (ret || ipt.error) {
-               spin_unlock_irq(&ctx->completion_lock);
-               if (ret)
-                       return IO_APOLL_READY;
-               return IO_APOLL_ABORTED;
-       }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
+       if (ret || ipt.error)
+               return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
+
        trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
                                mask, apoll->poll.events);
        return IO_APOLL_OK;
@@ -5240,19 +5567,19 @@ static bool __io_poll_remove_one(struct io_kiocb *req,
 
        if (!poll->head)
                return false;
-       spin_lock(&poll->head->lock);
+       spin_lock_irq(&poll->head->lock);
        if (do_cancel)
                WRITE_ONCE(poll->canceled, true);
        if (!list_empty(&poll->wait.entry)) {
                list_del_init(&poll->wait.entry);
                do_complete = true;
        }
-       spin_unlock(&poll->head->lock);
+       spin_unlock_irq(&poll->head->lock);
        hash_del(&req->hash_node);
        return do_complete;
 }
 
-static bool io_poll_remove_waitqs(struct io_kiocb *req)
+static bool io_poll_remove_one(struct io_kiocb *req)
        __must_hold(&req->ctx->completion_lock)
 {
        bool do_complete;
@@ -5260,26 +5587,12 @@ static bool io_poll_remove_waitqs(struct io_kiocb *req)
        io_poll_remove_double(req);
        do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
 
-       if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
-               /* non-poll requests have submit ref still */
-               req_ref_put(req);
-       }
-       return do_complete;
-}
-
-static bool io_poll_remove_one(struct io_kiocb *req)
-       __must_hold(&req->ctx->completion_lock)
-{
-       bool do_complete;
-
-       do_complete = io_poll_remove_waitqs(req);
        if (do_complete) {
                io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
                io_commit_cqring(req->ctx);
                req_set_fail(req);
-               io_put_req_deferred(req, 1);
+               io_put_req_deferred(req);
        }
-
        return do_complete;
 }
 
@@ -5293,7 +5606,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
        struct io_kiocb *req;
        int posted = 0, i;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
                struct hlist_head *list;
 
@@ -5303,7 +5616,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
                                posted += io_poll_remove_one(req);
                }
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
 
        if (posted)
                io_cqring_ev_posted(ctx);
@@ -5366,7 +5679,7 @@ static int io_poll_update_prep(struct io_kiocb *req,
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        flags = READ_ONCE(sqe->len);
        if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
@@ -5421,6 +5734,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
        if (flags & ~IORING_POLL_ADD_MULTI)
                return -EINVAL;
 
+       io_req_set_refcount(req);
        poll->events = io_poll_parse_events(sqe, flags);
        return 0;
 }
@@ -5441,7 +5755,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
                ipt.error = 0;
                io_poll_complete(req, mask);
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
 
        if (mask) {
                io_cqring_ev_posted(ctx);
@@ -5458,7 +5772,7 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
        bool completing;
        int ret;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
        if (!preq) {
                ret = -ENOENT;
@@ -5485,7 +5799,7 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
        ret = 0;
 err:
        if (ret < 0) {
-               spin_unlock_irq(&ctx->completion_lock);
+               spin_unlock(&ctx->completion_lock);
                req_set_fail(req);
                io_req_complete(req, ret);
                return 0;
@@ -5498,7 +5812,7 @@ err:
        }
        if (req->poll_update.update_user_data)
                preq->user_data = req->poll_update.new_user_data;
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
 
        /* complete update request, we're done with it */
        io_req_complete(req, ret);
@@ -5513,6 +5827,12 @@ err:
        return 0;
 }
 
+static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
+{
+       req_set_fail(req);
+       io_req_complete_post(req, -ETIME, 0);
+}
+
 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 {
        struct io_timeout_data *data = container_of(timer,
@@ -5521,24 +5841,20 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        struct io_ring_ctx *ctx = req->ctx;
        unsigned long flags;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
+       spin_lock_irqsave(&ctx->timeout_lock, flags);
        list_del_init(&req->timeout.list);
        atomic_set(&req->ctx->cq_timeouts,
                atomic_read(&req->ctx->cq_timeouts) + 1);
+       spin_unlock_irqrestore(&ctx->timeout_lock, flags);
 
-       io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
-       io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
-
-       io_cqring_ev_posted(ctx);
-       req_set_fail(req);
-       io_put_req(req);
+       req->io_task_work.func = io_req_task_timeout;
+       io_req_task_work_add(req);
        return HRTIMER_NORESTART;
 }
 
 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
                                           __u64 user_data)
-       __must_hold(&ctx->completion_lock)
+       __must_hold(&ctx->timeout_lock)
 {
        struct io_timeout_data *io;
        struct io_kiocb *req;
@@ -5561,6 +5877,7 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
 
 static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
        __must_hold(&ctx->completion_lock)
+       __must_hold(&ctx->timeout_lock)
 {
        struct io_kiocb *req = io_timeout_extract(ctx, user_data);
 
@@ -5569,13 +5886,54 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
 
        req_set_fail(req);
        io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
-       io_put_req_deferred(req, 1);
+       io_put_req_deferred(req);
+       return 0;
+}
+
+static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
+{
+       switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
+       case IORING_TIMEOUT_BOOTTIME:
+               return CLOCK_BOOTTIME;
+       case IORING_TIMEOUT_REALTIME:
+               return CLOCK_REALTIME;
+       default:
+               /* can't happen, vetted at prep time */
+               WARN_ON_ONCE(1);
+               fallthrough;
+       case 0:
+               return CLOCK_MONOTONIC;
+       }
+}
+
+static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
+                                   struct timespec64 *ts, enum hrtimer_mode mode)
+       __must_hold(&ctx->timeout_lock)
+{
+       struct io_timeout_data *io;
+       struct io_kiocb *req;
+       bool found = false;
+
+       list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
+               found = user_data == req->user_data;
+               if (found)
+                       break;
+       }
+       if (!found)
+               return -ENOENT;
+
+       io = req->async_data;
+       if (hrtimer_try_to_cancel(&io->timer) == -1)
+               return -EALREADY;
+       hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
+       io->timer.function = io_link_timeout_fn;
+       hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
        return 0;
 }
 
 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
                             struct timespec64 *ts, enum hrtimer_mode mode)
-       __must_hold(&ctx->completion_lock)
+       __must_hold(&ctx->timeout_lock)
 {
        struct io_kiocb *req = io_timeout_extract(ctx, user_data);
        struct io_timeout_data *data;
@@ -5586,7 +5944,7 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
        req->timeout.off = 0; /* noseq */
        data = req->async_data;
        list_add_tail(&req->timeout.list, &ctx->timeout_list);
-       hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
+       hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
        data->timer.function = io_timeout_fn;
        hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
        return 0;
@@ -5601,13 +5959,18 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
                return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->len)
+       if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
                return -EINVAL;
 
+       tr->ltimeout = false;
        tr->addr = READ_ONCE(sqe->addr);
        tr->flags = READ_ONCE(sqe->timeout_flags);
-       if (tr->flags & IORING_TIMEOUT_UPDATE) {
-               if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
+       if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
+               if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
+                       return -EINVAL;
+               if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
+                       tr->ltimeout = true;
+               if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
                        return -EINVAL;
                if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
                        return -EFAULT;
@@ -5634,20 +5997,26 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       spin_lock_irq(&ctx->completion_lock);
-       if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
+       if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
+               spin_lock(&ctx->completion_lock);
+               spin_lock_irq(&ctx->timeout_lock);
                ret = io_timeout_cancel(ctx, tr->addr);
-       else
-               ret = io_timeout_update(ctx, tr->addr, &tr->ts,
-                                       io_translate_timeout_mode(tr->flags));
+               spin_unlock_irq(&ctx->timeout_lock);
+               spin_unlock(&ctx->completion_lock);
+       } else {
+               enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
+
+               spin_lock_irq(&ctx->timeout_lock);
+               if (tr->ltimeout)
+                       ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
+               else
+                       ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
+               spin_unlock_irq(&ctx->timeout_lock);
+       }
 
-       io_cqring_fill_event(ctx, req->user_data, ret, 0);
-       io_commit_cqring(ctx);
-       spin_unlock_irq(&ctx->completion_lock);
-       io_cqring_ev_posted(ctx);
        if (ret < 0)
                req_set_fail(req);
-       io_put_req(req);
+       io_req_complete_post(req, ret, 0);
        return 0;
 }
 
@@ -5660,14 +6029,19 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
+       if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
+           sqe->splice_fd_in)
                return -EINVAL;
        if (off && is_timeout_link)
                return -EINVAL;
        flags = READ_ONCE(sqe->timeout_flags);
-       if (flags & ~IORING_TIMEOUT_ABS)
+       if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK))
+               return -EINVAL;
+       /* more than one clock specified is invalid, obviously */
+       if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
                return -EINVAL;
 
+       INIT_LIST_HEAD(&req->timeout.list);
        req->timeout.off = off;
        if (unlikely(off && !req->ctx->off_timeout_used))
                req->ctx->off_timeout_used = true;
@@ -5677,14 +6051,24 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        data = req->async_data;
        data->req = req;
+       data->flags = flags;
 
        if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
                return -EFAULT;
 
        data->mode = io_translate_timeout_mode(flags);
-       hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
-       if (is_timeout_link)
-               io_req_track_inflight(req);
+       hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
+
+       if (is_timeout_link) {
+               struct io_submit_link *link = &req->ctx->submit_state.link;
+
+               if (!link->head)
+                       return -EINVAL;
+               if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
+                       return -EINVAL;
+               req->timeout.head = link->last;
+               link->last->flags |= REQ_F_ARM_LTIMEOUT;
+       }
        return 0;
 }
 
@@ -5695,7 +6079,7 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
        struct list_head *entry;
        u32 tail, off = req->timeout.off;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
 
        /*
         * sqe->off holds how many events that need to occur for this
@@ -5734,7 +6118,7 @@ add:
        list_add(&req->timeout.list, entry);
        data->timer.function = io_timeout_fn;
        hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock_irq(&ctx->timeout_lock);
        return 0;
 }
 
@@ -5777,31 +6161,27 @@ static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
        return ret;
 }
 
-static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
-                                    struct io_kiocb *req, __u64 sqe_addr,
-                                    int success_ret)
+static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
 {
-       unsigned long flags;
+       struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
+       WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
+
        ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
-       spin_lock_irqsave(&ctx->completion_lock, flags);
        if (ret != -ENOENT)
-               goto done;
+               return ret;
+
+       spin_lock(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
        ret = io_timeout_cancel(ctx, sqe_addr);
+       spin_unlock_irq(&ctx->timeout_lock);
        if (ret != -ENOENT)
-               goto done;
+               goto out;
        ret = io_poll_cancel(ctx, sqe_addr, false);
-done:
-       if (!ret)
-               ret = success_ret;
-       io_cqring_fill_event(ctx, req->user_data, ret, 0);
-       io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       io_cqring_ev_posted(ctx);
-
-       if (ret < 0)
-               req_set_fail(req);
+out:
+       spin_unlock(&ctx->completion_lock);
+       return ret;
 }
 
 static int io_async_cancel_prep(struct io_kiocb *req,
@@ -5811,7 +6191,8 @@ static int io_async_cancel_prep(struct io_kiocb *req,
                return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
+       if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
+           sqe->splice_fd_in)
                return -EINVAL;
 
        req->cancel.addr = READ_ONCE(sqe->addr);
@@ -5825,18 +6206,9 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
        struct io_tctx_node *node;
        int ret;
 
-       /* tasks should wait for their io-wq threads, so safe w/o sync */
-       ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
-       spin_lock_irq(&ctx->completion_lock);
-       if (ret != -ENOENT)
-               goto done;
-       ret = io_timeout_cancel(ctx, sqe_addr);
-       if (ret != -ENOENT)
-               goto done;
-       ret = io_poll_cancel(ctx, sqe_addr, false);
+       ret = io_try_cancel_userdata(req, sqe_addr);
        if (ret != -ENOENT)
                goto done;
-       spin_unlock_irq(&ctx->completion_lock);
 
        /* slow path, try all io-wq's */
        io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
@@ -5849,17 +6221,10 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
                        break;
        }
        io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
-
-       spin_lock_irq(&ctx->completion_lock);
 done:
-       io_cqring_fill_event(ctx, req->user_data, ret, 0);
-       io_commit_cqring(ctx);
-       spin_unlock_irq(&ctx->completion_lock);
-       io_cqring_ev_posted(ctx);
-
        if (ret < 0)
                req_set_fail(req);
-       io_put_req(req);
+       io_req_complete_post(req, ret, 0);
        return 0;
 }
 
@@ -5868,7 +6233,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
 {
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->rw_flags)
+       if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        req->rsrc_update.offset = READ_ONCE(sqe->off);
@@ -5976,6 +6341,12 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return io_renameat_prep(req, sqe);
        case IORING_OP_UNLINKAT:
                return io_unlinkat_prep(req, sqe);
+       case IORING_OP_MKDIRAT:
+               return io_mkdirat_prep(req, sqe);
+       case IORING_OP_SYMLINKAT:
+               return io_symlinkat_prep(req, sqe);
+       case IORING_OP_LINKAT:
+               return io_linkat_prep(req, sqe);
        }
 
        printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
@@ -6070,11 +6441,11 @@ fail:
                return true;
        }
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
-               spin_unlock_irq(&ctx->completion_lock);
+               spin_unlock(&ctx->completion_lock);
                kfree(de);
-               io_queue_async_work(req);
+               io_queue_async_work(req, NULL);
                return true;
        }
 
@@ -6082,7 +6453,7 @@ fail:
        de->req = req;
        de->seq = seq;
        list_add_tail(&de->list, &ctx->defer_list);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
        return true;
 }
 
@@ -6139,6 +6510,17 @@ static void io_clean_op(struct io_kiocb *req)
                case IORING_OP_UNLINKAT:
                        putname(req->unlink.filename);
                        break;
+               case IORING_OP_MKDIRAT:
+                       putname(req->mkdir.filename);
+                       break;
+               case IORING_OP_SYMLINKAT:
+                       putname(req->symlink.oldpath);
+                       putname(req->symlink.newpath);
+                       break;
+               case IORING_OP_LINKAT:
+                       putname(req->hardlink.oldpath);
+                       putname(req->hardlink.newpath);
+                       break;
                }
        }
        if ((req->flags & REQ_F_POLLED) && req->apoll) {
@@ -6267,6 +6649,15 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
        case IORING_OP_UNLINKAT:
                ret = io_unlinkat(req, issue_flags);
                break;
+       case IORING_OP_MKDIRAT:
+               ret = io_mkdirat(req, issue_flags);
+               break;
+       case IORING_OP_SYMLINKAT:
+               ret = io_symlinkat(req, issue_flags);
+               break;
+       case IORING_OP_LINKAT:
+               ret = io_linkat(req, issue_flags);
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -6283,16 +6674,31 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
        return 0;
 }
 
+static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
+{
+       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+       req = io_put_req_find_next(req);
+       return req ? &req->work : NULL;
+}
+
 static void io_wq_submit_work(struct io_wq_work *work)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
        struct io_kiocb *timeout;
        int ret = 0;
 
+       /* one will be dropped by ->io_free_work() after returning to io-wq */
+       if (!(req->flags & REQ_F_REFCOUNT))
+               __io_req_set_refcount(req, 2);
+       else
+               req_ref_get(req);
+
        timeout = io_prep_linked_timeout(req);
        if (timeout)
                io_queue_linked_timeout(timeout);
 
+       /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
        if (work->flags & IO_WQ_WORK_CANCEL)
                ret = -ECANCELED;
 
@@ -6311,29 +6717,14 @@ static void io_wq_submit_work(struct io_wq_work *work)
        }
 
        /* avoid locking problems by failing it from a clean context */
-       if (ret) {
-               /* io-wq is going to take one down */
-               req_ref_get(req);
+       if (ret)
                io_req_task_queue_fail(req, ret);
-       }
 }
 
-#define FFS_ASYNC_READ         0x1UL
-#define FFS_ASYNC_WRITE                0x2UL
-#ifdef CONFIG_64BIT
-#define FFS_ISREG              0x4UL
-#else
-#define FFS_ISREG              0x0UL
-#endif
-#define FFS_MASK               ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
-
 static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
-                                                     unsigned i)
+                                                      unsigned i)
 {
-       struct io_fixed_file *table_l2;
-
-       table_l2 = table->files[i >> IORING_FILE_TABLE_SHIFT];
-       return &table_l2[i & IORING_FILE_TABLE_MASK];
+       return &table->files[i];
 }
 
 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
@@ -6348,43 +6739,67 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file
 {
        unsigned long file_ptr = (unsigned long) file;
 
-       if (__io_file_supports_async(file, READ))
+       if (__io_file_supports_nowait(file, READ))
                file_ptr |= FFS_ASYNC_READ;
-       if (__io_file_supports_async(file, WRITE))
+       if (__io_file_supports_nowait(file, WRITE))
                file_ptr |= FFS_ASYNC_WRITE;
        if (S_ISREG(file_inode(file)->i_mode))
                file_ptr |= FFS_ISREG;
        file_slot->file_ptr = file_ptr;
 }
 
-static struct file *io_file_get(struct io_submit_state *state,
-                               struct io_kiocb *req, int fd, bool fixed)
+static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
+                                            struct io_kiocb *req, int fd)
 {
-       struct io_ring_ctx *ctx = req->ctx;
        struct file *file;
+       unsigned long file_ptr;
+
+       if (unlikely((unsigned int)fd >= ctx->nr_user_files))
+               return NULL;
+       fd = array_index_nospec(fd, ctx->nr_user_files);
+       file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
+       file = (struct file *) (file_ptr & FFS_MASK);
+       file_ptr &= ~FFS_MASK;
+       /* mask in overlapping REQ_F and FFS bits */
+       req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
+       io_req_set_rsrc_node(req);
+       return file;
+}
+
+static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
+                                      struct io_kiocb *req, int fd)
+{
+       struct file *file = fget(fd);
+
+       trace_io_uring_file_get(ctx, fd);
+
+       /* we don't allow fixed io_uring files */
+       if (file && unlikely(file->f_op == &io_uring_fops))
+               io_req_track_inflight(req);
+       return file;
+}
+
+static inline struct file *io_file_get(struct io_ring_ctx *ctx,
+                                      struct io_kiocb *req, int fd, bool fixed)
+{
+       if (fixed)
+               return io_file_get_fixed(ctx, req, fd);
+       else
+               return io_file_get_normal(ctx, req, fd);
+}
 
-       if (fixed) {
-               unsigned long file_ptr;
+static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+{
+       struct io_kiocb *prev = req->timeout.prev;
+       int ret;
 
-               if (unlikely((unsigned int)fd >= ctx->nr_user_files))
-                       return NULL;
-               fd = array_index_nospec(fd, ctx->nr_user_files);
-               file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
-               file = (struct file *) (file_ptr & FFS_MASK);
-               file_ptr &= ~FFS_MASK;
-               /* mask in overlapping REQ_F and FFS bits */
-               req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
-               io_req_set_rsrc_node(req);
+       if (prev) {
+               ret = io_try_cancel_userdata(req, prev->user_data);
+               io_req_complete_post(req, ret ?: -ETIME, 0);
+               io_put_req(prev);
        } else {
-               trace_io_uring_file_get(ctx, fd);
-               file = __io_file_get(state, fd);
-
-               /* we don't allow fixed io_uring files */
-               if (file && unlikely(file->f_op == &io_uring_fops))
-                       io_req_track_inflight(req);
+               io_req_complete_post(req, -ETIME, 0);
        }
-
-       return file;
 }
 
 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
@@ -6395,7 +6810,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
        struct io_ring_ctx *ctx = req->ctx;
        unsigned long flags;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
+       spin_lock_irqsave(&ctx->timeout_lock, flags);
        prev = req->timeout.head;
        req->timeout.head = NULL;
 
@@ -6408,15 +6823,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
                if (!req_ref_inc_not_zero(prev))
                        prev = NULL;
        }
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       list_del(&req->timeout.list);
+       req->timeout.prev = prev;
+       spin_unlock_irqrestore(&ctx->timeout_lock, flags);
 
-       if (prev) {
-               io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
-               io_put_req_deferred(prev, 1);
-               io_put_req_deferred(req, 1);
-       } else {
-               io_req_complete_post(req, -ETIME, 0);
-       }
+       req->io_task_work.func = io_req_task_link_timeout;
+       io_req_task_work_add(req);
        return HRTIMER_NORESTART;
 }
 
@@ -6424,7 +6836,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
        /*
         * If the back reference is NULL, then our linked request finished
         * before we got a chance to setup the timer
@@ -6435,29 +6847,17 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
                data->timer.function = io_link_timeout_fn;
                hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
                                data->mode);
+               list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock_irq(&ctx->timeout_lock);
        /* drop submission reference */
        io_put_req(req);
 }
 
-static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
-{
-       struct io_kiocb *nxt = req->link;
-
-       if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
-           nxt->opcode != IORING_OP_LINK_TIMEOUT)
-               return NULL;
-
-       nxt->timeout.head = req;
-       nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
-       req->flags |= REQ_F_LINK_TIMEOUT;
-       return nxt;
-}
-
 static void __io_queue_sqe(struct io_kiocb *req)
+       __must_hold(&req->ctx->uring_lock)
 {
-       struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
+       struct io_kiocb *linked_timeout;
        int ret;
 
 issue_sqe:
@@ -6468,50 +6868,60 @@ issue_sqe:
         * doesn't support non-blocking read/write attempts
         */
        if (likely(!ret)) {
-               /* drop submission reference */
                if (req->flags & REQ_F_COMPLETE_INLINE) {
                        struct io_ring_ctx *ctx = req->ctx;
-                       struct io_comp_state *cs = &ctx->submit_state.comp;
+                       struct io_submit_state *state = &ctx->submit_state;
 
-                       cs->reqs[cs->nr++] = req;
-                       if (cs->nr == ARRAY_SIZE(cs->reqs))
+                       state->compl_reqs[state->compl_nr++] = req;
+                       if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
                                io_submit_flush_completions(ctx);
-               } else {
-                       io_put_req(req);
+                       return;
                }
+
+               linked_timeout = io_prep_linked_timeout(req);
+               if (linked_timeout)
+                       io_queue_linked_timeout(linked_timeout);
        } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+               linked_timeout = io_prep_linked_timeout(req);
+
                switch (io_arm_poll_handler(req)) {
                case IO_APOLL_READY:
+                       if (linked_timeout)
+                               io_unprep_linked_timeout(req);
                        goto issue_sqe;
                case IO_APOLL_ABORTED:
                        /*
                         * Queued up for async execution, worker will release
                         * submit reference when the iocb is actually submitted.
                         */
-                       io_queue_async_work(req);
+                       io_queue_async_work(req, NULL);
                        break;
                }
+
+               if (linked_timeout)
+                       io_queue_linked_timeout(linked_timeout);
        } else {
                io_req_complete_failed(req, ret);
        }
-       if (linked_timeout)
-               io_queue_linked_timeout(linked_timeout);
 }
 
 static inline void io_queue_sqe(struct io_kiocb *req)
+       __must_hold(&req->ctx->uring_lock)
 {
        if (unlikely(req->ctx->drain_active) && io_drain_req(req))
                return;
 
-       if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) {
+       if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
                __io_queue_sqe(req);
+       } else if (req->flags & REQ_F_FAIL) {
+               io_req_complete_failed(req, req->result);
        } else {
                int ret = io_req_prep_async(req);
 
                if (unlikely(ret))
                        io_req_complete_failed(req, ret);
                else
-                       io_queue_async_work(req);
+                       io_queue_async_work(req, NULL);
        }
 }
 
@@ -6543,19 +6953,19 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
 
 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                       const struct io_uring_sqe *sqe)
+       __must_hold(&ctx->uring_lock)
 {
        struct io_submit_state *state;
        unsigned int sqe_flags;
        int personality, ret = 0;
 
+       /* req is partially pre-initialised, see io_preinit_req() */
        req->opcode = READ_ONCE(sqe->opcode);
        /* same numerical values with corresponding REQ_F_*, safe to copy */
        req->flags = sqe_flags = READ_ONCE(sqe->flags);
        req->user_data = READ_ONCE(sqe->user_data);
        req->file = NULL;
        req->fixed_rsrc_refs = NULL;
-       /* one is dropped after submission, the other at completion */
-       atomic_set(&req->refs, 2);
        req->task = current;
 
        /* enforce forwards compatibility on users */
@@ -6593,9 +7003,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        }
 
        if (io_op_defs[req->opcode].needs_file) {
-               bool fixed = req->flags & REQ_F_FIXED_FILE;
-
-               req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
+               req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
+                                       (sqe_flags & IOSQE_FIXED_FILE));
                if (unlikely(!req->file))
                        ret = -EBADF;
        }
@@ -6606,6 +7015,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
 
 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
                         const struct io_uring_sqe *sqe)
+       __must_hold(&ctx->uring_lock)
 {
        struct io_submit_link *link = &ctx->submit_state.link;
        int ret;
@@ -6613,20 +7023,34 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
        ret = io_init_req(ctx, req, sqe);
        if (unlikely(ret)) {
 fail_req:
+               /* fail even hard links since we don't submit */
                if (link->head) {
-                       /* fail even hard links since we don't submit */
-                       req_set_fail(link->head);
-                       io_req_complete_failed(link->head, -ECANCELED);
-                       link->head = NULL;
+                       /*
+                        * we can judge a link req is failed or cancelled by if
+                        * REQ_F_FAIL is set, but the head is an exception since
+                        * it may be set REQ_F_FAIL because of other req's failure
+                        * so let's leverage req->result to distinguish if a head
+                        * is set REQ_F_FAIL because of its failure or other req's
+                        * failure so that we can set the correct ret code for it.
+                        * init result here to avoid affecting the normal path.
+                        */
+                       if (!(link->head->flags & REQ_F_FAIL))
+                               req_fail_link_node(link->head, -ECANCELED);
+               } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
+                       /*
+                        * the current req is a normal req, we should return
+                        * error and thus break the submittion loop.
+                        */
+                       io_req_complete_failed(req, ret);
+                       return ret;
                }
-               io_req_complete_failed(req, ret);
-               return ret;
+               req_fail_link_node(req, ret);
+       } else {
+               ret = io_req_prep(req, sqe);
+               if (unlikely(ret))
+                       goto fail_req;
        }
 
-       ret = io_req_prep(req, sqe);
-       if (unlikely(ret))
-               goto fail_req;
-
        /* don't need @sqe from now on */
        trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
                                  req->flags, true,
@@ -6642,9 +7066,14 @@ fail_req:
        if (link->head) {
                struct io_kiocb *head = link->head;
 
-               ret = io_req_prep_async(req);
-               if (unlikely(ret))
-                       goto fail_req;
+               if (!(req->flags & REQ_F_FAIL)) {
+                       ret = io_req_prep_async(req);
+                       if (unlikely(ret)) {
+                               req_fail_link_node(req, ret);
+                               if (!(head->flags & REQ_F_FAIL))
+                                       req_fail_link_node(head, -ECANCELED);
+                       }
+               }
                trace_io_uring_link(ctx, req, head);
                link->last->link = req;
                link->last = req;
@@ -6674,11 +7103,10 @@ static void io_submit_state_end(struct io_submit_state *state,
 {
        if (state->link.head)
                io_queue_sqe(state->link.head);
-       if (state->comp.nr)
+       if (state->compl_nr)
                io_submit_flush_completions(ctx);
        if (state->plug_started)
                blk_finish_plug(&state->plug);
-       io_state_file_put(state);
 }
 
 /*
@@ -6738,26 +7166,17 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
 }
 
 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
+       __must_hold(&ctx->uring_lock)
 {
-       struct io_uring_task *tctx;
        int submitted = 0;
 
        /* make sure SQ entry isn't read before tail */
        nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
        if (!percpu_ref_tryget_many(&ctx->refs, nr))
                return -EAGAIN;
+       io_get_task_refs(nr);
 
-       tctx = current->io_uring;
-       tctx->cached_refs -= nr;
-       if (unlikely(tctx->cached_refs < 0)) {
-               unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
-
-               percpu_counter_add(&tctx->inflight, refill);
-               refcount_add(refill, &current->usage);
-               tctx->cached_refs += refill;
-       }
        io_submit_state_start(&ctx->submit_state, nr);
-
        while (submitted < nr) {
                const struct io_uring_sqe *sqe;
                struct io_kiocb *req;
@@ -6770,7 +7189,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
                }
                sqe = io_get_sqe(ctx);
                if (unlikely(!sqe)) {
-                       kmem_cache_free(req_cachep, req);
+                       list_add(&req->inflight_entry, &ctx->submit_state.free_list);
                        break;
                }
                /* will complete beyond this point, count as submitted */
@@ -6802,16 +7221,18 @@ static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
 static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
 {
        /* Tell userspace we may need a wakeup call */
-       spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
+       spin_unlock(&ctx->completion_lock);
 }
 
 static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
 {
-       spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
+       spin_unlock(&ctx->completion_lock);
 }
 
 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
@@ -6833,7 +7254,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
 
                mutex_lock(&ctx->uring_lock);
                if (!list_empty(&ctx->iopoll_list))
-                       io_do_iopoll(ctx, &nr_events, 0, true);
+                       io_do_iopoll(ctx, &nr_events, 0);
 
                /*
                 * Don't submit if refs are dying, good for io_uring_register(),
@@ -6968,21 +7389,21 @@ static int io_sq_thread(void *data)
 struct io_wait_queue {
        struct wait_queue_entry wq;
        struct io_ring_ctx *ctx;
-       unsigned to_wait;
+       unsigned cq_tail;
        unsigned nr_timeouts;
 };
 
 static inline bool io_should_wake(struct io_wait_queue *iowq)
 {
        struct io_ring_ctx *ctx = iowq->ctx;
+       int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
 
        /*
         * Wake up if we have enough events, or if a timeout occurred since we
         * started waiting. For timeouts, we always want to return to userspace,
         * regardless of event count.
         */
-       return io_cqring_events(ctx) >= iowq->to_wait ||
-                       atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+       return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
 }
 
 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
@@ -7038,21 +7459,13 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                          const sigset_t __user *sig, size_t sigsz,
                          struct __kernel_timespec __user *uts)
 {
-       struct io_wait_queue iowq = {
-               .wq = {
-                       .private        = current,
-                       .func           = io_wake_function,
-                       .entry          = LIST_HEAD_INIT(iowq.wq.entry),
-               },
-               .ctx            = ctx,
-               .to_wait        = min_events,
-       };
+       struct io_wait_queue iowq;
        struct io_rings *rings = ctx->rings;
        signed long timeout = MAX_SCHEDULE_TIMEOUT;
        int ret;
 
        do {
-               io_cqring_overflow_flush(ctx, false);
+               io_cqring_overflow_flush(ctx);
                if (io_cqring_events(ctx) >= min_events)
                        return 0;
                if (!io_run_task_work())
@@ -7080,11 +7493,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                timeout = timespec64_to_jiffies(&ts);
        }
 
+       init_waitqueue_func_entry(&iowq.wq, io_wake_function);
+       iowq.wq.private = current;
+       INIT_LIST_HEAD(&iowq.wq.entry);
+       iowq.ctx = ctx;
        iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+       iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
                /* if we can't even flush overflow, don't wait for more */
-               if (!io_cqring_overflow_flush(ctx, false)) {
+               if (!io_cqring_overflow_flush(ctx)) {
                        ret = -EBUSY;
                        break;
                }
@@ -7115,14 +7534,14 @@ static void **io_alloc_page_table(size_t size)
        size_t init_size = size;
        void **table;
 
-       table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
+       table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
        if (!table)
                return NULL;
 
        for (i = 0; i < nr_tables; i++) {
                unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
 
-               table[i] = kzalloc(this_size, GFP_KERNEL);
+               table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
                if (!table[i]) {
                        io_free_page_table(table, init_size);
                        return NULL;
@@ -7132,20 +7551,54 @@ static void **io_alloc_page_table(size_t size)
        return table;
 }
 
-static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
+static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
 {
-       spin_lock_bh(&ctx->rsrc_ref_lock);
+       percpu_ref_exit(&ref_node->refs);
+       kfree(ref_node);
 }
 
-static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
+static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
 {
-       spin_unlock_bh(&ctx->rsrc_ref_lock);
+       struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
+       struct io_ring_ctx *ctx = node->rsrc_data->ctx;
+       unsigned long flags;
+       bool first_add = false;
+
+       spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
+       node->done = true;
+
+       while (!list_empty(&ctx->rsrc_ref_list)) {
+               node = list_first_entry(&ctx->rsrc_ref_list,
+                                           struct io_rsrc_node, node);
+               /* recycle ref nodes in order */
+               if (!node->done)
+                       break;
+               list_del(&node->node);
+               first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
+       }
+       spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
+
+       if (first_add)
+               mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
 }
 
-static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
+static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
 {
-       percpu_ref_exit(&ref_node->refs);
-       kfree(ref_node);
+       struct io_rsrc_node *ref_node;
+
+       ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
+       if (!ref_node)
+               return NULL;
+
+       if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
+                           0, GFP_KERNEL)) {
+               kfree(ref_node);
+               return NULL;
+       }
+       INIT_LIST_HEAD(&ref_node->node);
+       INIT_LIST_HEAD(&ref_node->rsrc_list);
+       ref_node->done = false;
+       return ref_node;
 }
 
 static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
@@ -7158,9 +7611,9 @@ static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
                struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
 
                rsrc_node->rsrc_data = data_to_kill;
-               io_rsrc_ref_lock(ctx);
+               spin_lock_irq(&ctx->rsrc_ref_lock);
                list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
-               io_rsrc_ref_unlock(ctx);
+               spin_unlock_irq(&ctx->rsrc_ref_lock);
 
                atomic_inc(&data_to_kill->refs);
                percpu_ref_kill(&rsrc_node->refs);
@@ -7199,17 +7652,19 @@ static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ct
                /* kill initial ref, already quiesced if zero */
                if (atomic_dec_and_test(&data->refs))
                        break;
+               mutex_unlock(&ctx->uring_lock);
                flush_delayed_work(&ctx->rsrc_put_work);
                ret = wait_for_completion_interruptible(&data->done);
-               if (!ret)
+               if (!ret) {
+                       mutex_lock(&ctx->uring_lock);
                        break;
+               }
 
                atomic_inc(&data->refs);
                /* wait for all works potentially completing data->done */
                flush_delayed_work(&ctx->rsrc_put_work);
                reinit_completion(&data->done);
 
-               mutex_unlock(&ctx->uring_lock);
                ret = io_run_task_work_sig();
                mutex_lock(&ctx->uring_lock);
        } while (ret >= 0);
@@ -7277,17 +7732,14 @@ fail:
 
 static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
 {
-       size_t size = nr_files * sizeof(struct io_fixed_file);
-
-       table->files = (struct io_fixed_file **)io_alloc_page_table(size);
+       table->files = kvcalloc(nr_files, sizeof(table->files[0]),
+                               GFP_KERNEL_ACCOUNT);
        return !!table->files;
 }
 
-static void io_free_file_tables(struct io_file_table *table, unsigned nr_files)
+static void io_free_file_tables(struct io_file_table *table)
 {
-       size_t size = nr_files * sizeof(struct io_fixed_file);
-
-       io_free_page_table((void **)table->files, size);
+       kvfree(table->files);
        table->files = NULL;
 }
 
@@ -7312,7 +7764,7 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
                        fput(file);
        }
 #endif
-       io_free_file_tables(&ctx->file_table, ctx->nr_user_files);
+       io_free_file_tables(&ctx->file_table);
        io_rsrc_data_free(ctx->file_data);
        ctx->file_data = NULL;
        ctx->nr_user_files = 0;
@@ -7628,11 +8080,11 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
                        bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
 
                        io_ring_submit_lock(ctx, lock_ring);
-                       spin_lock_irq(&ctx->completion_lock);
+                       spin_lock(&ctx->completion_lock);
                        io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
                        ctx->cq_extra++;
                        io_commit_cqring(ctx);
-                       spin_unlock_irq(&ctx->completion_lock);
+                       spin_unlock(&ctx->completion_lock);
                        io_cqring_ev_posted(ctx);
                        io_ring_submit_unlock(ctx, lock_ring);
                }
@@ -7664,49 +8116,6 @@ static void io_rsrc_put_work(struct work_struct *work)
        }
 }
 
-static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
-{
-       struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
-       struct io_ring_ctx *ctx = node->rsrc_data->ctx;
-       bool first_add = false;
-
-       io_rsrc_ref_lock(ctx);
-       node->done = true;
-
-       while (!list_empty(&ctx->rsrc_ref_list)) {
-               node = list_first_entry(&ctx->rsrc_ref_list,
-                                           struct io_rsrc_node, node);
-               /* recycle ref nodes in order */
-               if (!node->done)
-                       break;
-               list_del(&node->node);
-               first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
-       }
-       io_rsrc_ref_unlock(ctx);
-
-       if (first_add)
-               mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
-}
-
-static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
-{
-       struct io_rsrc_node *ref_node;
-
-       ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
-       if (!ref_node)
-               return NULL;
-
-       if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
-                           0, GFP_KERNEL)) {
-               kfree(ref_node);
-               return NULL;
-       }
-       INIT_LIST_HEAD(&ref_node->node);
-       INIT_LIST_HEAD(&ref_node->rsrc_list);
-       ref_node->done = false;
-       return ref_node;
-}
-
 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                                 unsigned nr_args, u64 __user *tags)
 {
@@ -7721,6 +8130,8 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (nr_args > IORING_MAX_FIXED_FILES)
                return -EMFILE;
+       if (nr_args > rlimit(RLIMIT_NOFILE))
+               return -EMFILE;
        ret = io_rsrc_node_switch_start(ctx);
        if (ret)
                return ret;
@@ -7779,7 +8190,7 @@ out_fput:
                if (file)
                        fput(file);
        }
-       io_free_file_tables(&ctx->file_table, nr_args);
+       io_free_file_tables(&ctx->file_table);
        ctx->nr_user_files = 0;
 out_free:
        io_rsrc_data_free(ctx->file_data);
@@ -7830,6 +8241,46 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 #endif
 }
 
+static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
+                                unsigned int issue_flags, u32 slot_index)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct io_fixed_file *file_slot;
+       int ret = -EBADF;
+
+       io_ring_submit_lock(ctx, !force_nonblock);
+       if (file->f_op == &io_uring_fops)
+               goto err;
+       ret = -ENXIO;
+       if (!ctx->file_data)
+               goto err;
+       ret = -EINVAL;
+       if (slot_index >= ctx->nr_user_files)
+               goto err;
+
+       slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
+       file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
+       ret = -EBADF;
+       if (file_slot->file_ptr)
+               goto err;
+
+       *io_get_tag_slot(ctx->file_data, slot_index) = 0;
+       io_fixed_file_set(file_slot, file);
+       ret = io_sqe_file_register(ctx, file, slot_index);
+       if (ret) {
+               file_slot->file_ptr = 0;
+               goto err;
+       }
+
+       ret = 0;
+err:
+       io_ring_submit_unlock(ctx, !force_nonblock);
+       if (ret)
+               fput(file);
+       return ret;
+}
+
 static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
                                 struct io_rsrc_node *node, void *rsrc)
 {
@@ -7925,14 +8376,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
        return done ? done : err;
 }
 
-static struct io_wq_work *io_free_work(struct io_wq_work *work)
-{
-       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
-
-       req = io_put_req_find_next(req);
-       return req ? &req->work : NULL;
-}
-
 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
                                        struct task_struct *task)
 {
@@ -7956,7 +8399,7 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
 
        data.hash = hash;
        data.task = task;
-       data.free_work = io_free_work;
+       data.free_work = io_wq_free_work;
        data.do_work = io_wq_submit_work;
 
        /* Do QD, or 4 * CPUS, whatever is smallest */
@@ -8623,43 +9066,36 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
                __io_remove_buffers(ctx, buf, index, -1U);
 }
 
-static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
+static void io_req_cache_free(struct list_head *list)
 {
        struct io_kiocb *req, *nxt;
 
-       list_for_each_entry_safe(req, nxt, list, compl.list) {
-               if (tsk && req->task != tsk)
-                       continue;
-               list_del(&req->compl.list);
+       list_for_each_entry_safe(req, nxt, list, inflight_entry) {
+               list_del(&req->inflight_entry);
                kmem_cache_free(req_cachep, req);
        }
 }
 
 static void io_req_caches_free(struct io_ring_ctx *ctx)
 {
-       struct io_submit_state *submit_state = &ctx->submit_state;
-       struct io_comp_state *cs = &ctx->submit_state.comp;
+       struct io_submit_state *state = &ctx->submit_state;
 
        mutex_lock(&ctx->uring_lock);
 
-       if (submit_state->free_reqs) {
-               kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
-                                    submit_state->reqs);
-               submit_state->free_reqs = 0;
+       if (state->free_reqs) {
+               kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
+               state->free_reqs = 0;
        }
 
-       io_flush_cached_locked_reqs(ctx, cs);
-       io_req_cache_free(&cs->free_list, NULL);
+       io_flush_cached_locked_reqs(ctx, state);
+       io_req_cache_free(&state->free_list);
        mutex_unlock(&ctx->uring_lock);
 }
 
-static bool io_wait_rsrc_data(struct io_rsrc_data *data)
+static void io_wait_rsrc_data(struct io_rsrc_data *data)
 {
-       if (!data)
-               return false;
-       if (!atomic_dec_and_test(&data->refs))
+       if (data && !atomic_dec_and_test(&data->refs))
                wait_for_completion(&data->done);
-       return true;
 }
 
 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
@@ -8671,10 +9107,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
                ctx->mm_account = NULL;
        }
 
+       /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
+       io_wait_rsrc_data(ctx->buf_data);
+       io_wait_rsrc_data(ctx->file_data);
+
        mutex_lock(&ctx->uring_lock);
-       if (io_wait_rsrc_data(ctx->buf_data))
+       if (ctx->buf_data)
                __io_sqe_buffers_unregister(ctx);
-       if (io_wait_rsrc_data(ctx->file_data))
+       if (ctx->file_data)
                __io_sqe_files_unregister(ctx);
        if (ctx->rings)
                __io_cqring_overflow_flush(ctx, true);
@@ -8700,6 +9140,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
                sock_release(ctx->ring_sock);
        }
 #endif
+       WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
 
        io_mem_free(ctx->rings);
        io_mem_free(ctx->sq_sqes);
@@ -8799,6 +9240,7 @@ static void io_ring_exit_work(struct work_struct *work)
 {
        struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
        unsigned long timeout = jiffies + HZ * 60 * 5;
+       unsigned long interval = HZ / 20;
        struct io_tctx_exit exit;
        struct io_tctx_node *node;
        int ret;
@@ -8823,8 +9265,11 @@ static void io_ring_exit_work(struct work_struct *work)
                        io_sq_thread_unpark(sqd);
                }
 
-               WARN_ON_ONCE(time_after(jiffies, timeout));
-       } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
+               if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
+                       /* there is little hope left, don't run it too often */
+                       interval = HZ * 60;
+               }
+       } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
 
        init_completion(&exit.completion);
        init_task_work(&exit.task_work, io_tctx_exit_cb);
@@ -8853,8 +9298,8 @@ static void io_ring_exit_work(struct work_struct *work)
                mutex_lock(&ctx->uring_lock);
        }
        mutex_unlock(&ctx->uring_lock);
-       spin_lock_irq(&ctx->completion_lock);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
 
        io_ring_ctx_free(ctx);
 }
@@ -8866,16 +9311,18 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
        struct io_kiocb *req, *tmp;
        int canceled = 0;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
        list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
                if (io_match_task(req, tsk, cancel_all)) {
                        io_kill_timeout(req, -ECANCELED);
                        canceled++;
                }
        }
+       spin_unlock_irq(&ctx->timeout_lock);
        if (canceled != 0)
                io_commit_cqring(ctx);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
        if (canceled != 0)
                io_cqring_ev_posted(ctx);
        return canceled != 0;
@@ -8931,13 +9378,12 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
        bool ret;
 
        if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
-               unsigned long flags;
                struct io_ring_ctx *ctx = req->ctx;
 
                /* protect against races with linked timeouts */
-               spin_lock_irqsave(&ctx->completion_lock, flags);
+               spin_lock(&ctx->completion_lock);
                ret = io_match_task(req, cancel->task, cancel->all);
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
+               spin_unlock(&ctx->completion_lock);
        } else {
                ret = io_match_task(req, cancel->task, cancel->all);
        }
@@ -8950,14 +9396,14 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
        struct io_defer_entry *de;
        LIST_HEAD(list);
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        list_for_each_entry_reverse(de, &ctx->defer_list, list) {
                if (io_match_task(de->req, task, cancel_all)) {
                        list_cut_position(&list, &ctx->defer_list, &de->list);
                        break;
                }
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
        if (list_empty(&list))
                return false;
 
@@ -9122,8 +9568,8 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
                 * Must be after io_uring_del_task_file() (removes nodes under
                 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
                 */
-               tctx->io_wq = NULL;
                io_wq_put_and_exit(wq);
+               tctx->io_wq = NULL;
        }
 }
 
@@ -9139,9 +9585,11 @@ static void io_uring_drop_tctx_refs(struct task_struct *task)
        struct io_uring_task *tctx = task->io_uring;
        unsigned int refs = tctx->cached_refs;
 
-       tctx->cached_refs = 0;
-       percpu_counter_sub(&tctx->inflight, refs);
-       put_task_struct_many(task, refs);
+       if (refs) {
+               tctx->cached_refs = 0;
+               percpu_counter_sub(&tctx->inflight, refs);
+               put_task_struct_many(task, refs);
+       }
 }
 
 /*
@@ -9162,9 +9610,9 @@ static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
        if (tctx->io_wq)
                io_wq_exit_start(tctx->io_wq);
 
-       io_uring_drop_tctx_refs(current);
        atomic_inc(&tctx->in_idle);
        do {
+               io_uring_drop_tctx_refs(current);
                /* read completions before cancelations */
                inflight = tctx_inflight(tctx, !cancel_all);
                if (!inflight)
@@ -9188,6 +9636,7 @@ static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
                }
 
                prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+               io_uring_drop_tctx_refs(current);
                /*
                 * If we've seen completions, retry without waiting. This
                 * avoids a race where a completion comes in before we did
@@ -9206,9 +9655,9 @@ static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
        }
 }
 
-void __io_uring_cancel(struct files_struct *files)
+void __io_uring_cancel(bool cancel_all)
 {
-       io_uring_cancel_generic(!files, NULL);
+       io_uring_cancel_generic(cancel_all, NULL);
 }
 
 static void *io_uring_validate_mmap_request(struct file *file,
@@ -9368,11 +9817,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
         */
        ret = 0;
        if (ctx->flags & IORING_SETUP_SQPOLL) {
-               io_cqring_overflow_flush(ctx, false);
+               io_cqring_overflow_flush(ctx);
 
-               ret = -EOWNERDEAD;
-               if (unlikely(ctx->sq_data->thread == NULL))
+               if (unlikely(ctx->sq_data->thread == NULL)) {
+                       ret = -EOWNERDEAD;
                        goto out;
+               }
                if (flags & IORING_ENTER_SQ_WAKEUP)
                        wake_up(&ctx->sq_data->wait);
                if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9503,7 +9953,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
                        io_uring_show_cred(m, index, cred);
        }
        seq_printf(m, "PollList:\n");
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
                struct hlist_head *list = &ctx->cancel_hash[i];
                struct io_kiocb *req;
@@ -9512,7 +9962,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
                        seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
                                        req->task->task_works != NULL);
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
        if (has_lock)
                mutex_unlock(&ctx->uring_lock);
 }
@@ -9840,10 +10290,11 @@ static int io_register_personality(struct io_ring_ctx *ctx)
 
        ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
                        XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
-       if (!ret)
-               return id;
-       put_cred(creds);
-       return ret;
+       if (ret < 0) {
+               put_cred(creds);
+               return ret;
+       }
+       return id;
 }
 
 static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
@@ -10044,6 +10495,31 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
        return io_wq_cpu_affinity(tctx->io_wq, NULL);
 }
 
+static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+                                       void __user *arg)
+{
+       struct io_uring_task *tctx = current->io_uring;
+       __u32 new_count[2];
+       int i, ret;
+
+       if (!tctx || !tctx->io_wq)
+               return -EINVAL;
+       if (copy_from_user(new_count, arg, sizeof(new_count)))
+               return -EFAULT;
+       for (i = 0; i < ARRAY_SIZE(new_count); i++)
+               if (new_count[i] > INT_MAX)
+                       return -EINVAL;
+
+       ret = io_wq_max_workers(tctx->io_wq, new_count);
+       if (ret)
+               return ret;
+
+       if (copy_to_user(arg, new_count, sizeof(new_count)))
+               return -EFAULT;
+
+       return 0;
+}
+
 static bool io_register_op_must_quiesce(int op)
 {
        switch (op) {
@@ -10061,12 +10537,40 @@ static bool io_register_op_must_quiesce(int op)
        case IORING_REGISTER_BUFFERS_UPDATE:
        case IORING_REGISTER_IOWQ_AFF:
        case IORING_UNREGISTER_IOWQ_AFF:
+       case IORING_REGISTER_IOWQ_MAX_WORKERS:
                return false;
        default:
                return true;
        }
 }
 
+static int io_ctx_quiesce(struct io_ring_ctx *ctx)
+{
+       long ret;
+
+       percpu_ref_kill(&ctx->refs);
+
+       /*
+        * Drop uring mutex before waiting for references to exit. If another
+        * thread is currently inside io_uring_enter() it might need to grab the
+        * uring_lock to make progress. If we hold it here across the drain
+        * wait, then we can deadlock. It's safe to drop the mutex here, since
+        * no new references will come in after we've killed the percpu ref.
+        */
+       mutex_unlock(&ctx->uring_lock);
+       do {
+               ret = wait_for_completion_interruptible(&ctx->ref_comp);
+               if (!ret)
+                       break;
+               ret = io_run_task_work_sig();
+       } while (ret >= 0);
+       mutex_lock(&ctx->uring_lock);
+
+       if (ret)
+               io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
+       return ret;
+}
+
 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                               void __user *arg, unsigned nr_args)
        __releases(ctx->uring_lock)
@@ -10091,31 +10595,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
        }
 
        if (io_register_op_must_quiesce(opcode)) {
-               percpu_ref_kill(&ctx->refs);
-
-               /*
-                * Drop uring mutex before waiting for references to exit. If
-                * another thread is currently inside io_uring_enter() it might
-                * need to grab the uring_lock to make progress. If we hold it
-                * here across the drain wait, then we can deadlock. It's safe
-                * to drop the mutex here, since no new references will come in
-                * after we've killed the percpu ref.
-                */
-               mutex_unlock(&ctx->uring_lock);
-               do {
-                       ret = wait_for_completion_interruptible(&ctx->ref_comp);
-                       if (!ret)
-                               break;
-                       ret = io_run_task_work_sig();
-                       if (ret < 0)
-                               break;
-               } while (1);
-               mutex_lock(&ctx->uring_lock);
-
-               if (ret) {
-                       io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
+               ret = io_ctx_quiesce(ctx);
+               if (ret)
                        return ret;
-               }
        }
 
        switch (opcode) {
@@ -10212,6 +10694,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                        break;
                ret = io_unregister_iowq_aff(ctx);
                break;
+       case IORING_REGISTER_IOWQ_MAX_WORKERS:
+               ret = -EINVAL;
+               if (!arg || nr_args != 2)
+                       break;
+               ret = io_register_iowq_max_workers(ctx, arg);
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -10293,11 +10781,16 @@ static int __init io_uring_init(void)
        BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
        BUILD_BUG_SQE_ELEM(42, __u16,  personality);
        BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
+       BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
 
        BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
                     sizeof(struct io_uring_rsrc_update));
        BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
                     sizeof(struct io_uring_rsrc_update2));
+
+       /* ->buf_index is u16 */
+       BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
+
        /* should fit into one byte */
        BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
 
index 1e2204f..504e695 100644 (file)
@@ -263,209 +263,6 @@ static long ioctl_file_clone_range(struct file *file,
                                args.src_length, args.dest_offset);
 }
 
-#ifdef CONFIG_BLOCK
-
-static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
-{
-       return (offset >> inode->i_blkbits);
-}
-
-static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
-{
-       return (blk << inode->i_blkbits);
-}
-
-/**
- * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
- * @inode: the inode to map
- * @fieinfo: the fiemap info struct that will be passed back to userspace
- * @start: where to start mapping in the inode
- * @len: how much space to map
- * @get_block: the fs's get_block function
- *
- * This does FIEMAP for block based inodes.  Basically it will just loop
- * through get_block until we hit the number of extents we want to map, or we
- * go past the end of the file and hit a hole.
- *
- * If it is possible to have data blocks beyond a hole past @inode->i_size, then
- * please do not use this function, it will stop at the first unmapped block
- * beyond i_size.
- *
- * If you use this function directly, you need to do your own locking. Use
- * generic_block_fiemap if you want the locking done for you.
- */
-static int __generic_block_fiemap(struct inode *inode,
-                          struct fiemap_extent_info *fieinfo, loff_t start,
-                          loff_t len, get_block_t *get_block)
-{
-       struct buffer_head map_bh;
-       sector_t start_blk, last_blk;
-       loff_t isize = i_size_read(inode);
-       u64 logical = 0, phys = 0, size = 0;
-       u32 flags = FIEMAP_EXTENT_MERGED;
-       bool past_eof = false, whole_file = false;
-       int ret = 0;
-
-       ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC);
-       if (ret)
-               return ret;
-
-       /*
-        * Either the i_mutex or other appropriate locking needs to be held
-        * since we expect isize to not change at all through the duration of
-        * this call.
-        */
-       if (len >= isize) {
-               whole_file = true;
-               len = isize;
-       }
-
-       /*
-        * Some filesystems can't deal with being asked to map less than
-        * blocksize, so make sure our len is at least block length.
-        */
-       if (logical_to_blk(inode, len) == 0)
-               len = blk_to_logical(inode, 1);
-
-       start_blk = logical_to_blk(inode, start);
-       last_blk = logical_to_blk(inode, start + len - 1);
-
-       do {
-               /*
-                * we set b_size to the total size we want so it will map as
-                * many contiguous blocks as possible at once
-                */
-               memset(&map_bh, 0, sizeof(struct buffer_head));
-               map_bh.b_size = len;
-
-               ret = get_block(inode, start_blk, &map_bh, 0);
-               if (ret)
-                       break;
-
-               /* HOLE */
-               if (!buffer_mapped(&map_bh)) {
-                       start_blk++;
-
-                       /*
-                        * We want to handle the case where there is an
-                        * allocated block at the front of the file, and then
-                        * nothing but holes up to the end of the file properly,
-                        * to make sure that extent at the front gets properly
-                        * marked with FIEMAP_EXTENT_LAST
-                        */
-                       if (!past_eof &&
-                           blk_to_logical(inode, start_blk) >= isize)
-                               past_eof = 1;
-
-                       /*
-                        * First hole after going past the EOF, this is our
-                        * last extent
-                        */
-                       if (past_eof && size) {
-                               flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                       } else if (size) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size, flags);
-                               size = 0;
-                       }
-
-                       /* if we have holes up to/past EOF then we're done */
-                       if (start_blk > last_blk || past_eof || ret)
-                               break;
-               } else {
-                       /*
-                        * We have gone over the length of what we wanted to
-                        * map, and it wasn't the entire file, so add the extent
-                        * we got last time and exit.
-                        *
-                        * This is for the case where say we want to map all the
-                        * way up to the second to the last block in a file, but
-                        * the last block is a hole, making the second to last
-                        * block FIEMAP_EXTENT_LAST.  In this case we want to
-                        * see if there is a hole after the second to last block
-                        * so we can mark it properly.  If we found data after
-                        * we exceeded the length we were requesting, then we
-                        * are good to go, just add the extent to the fieinfo
-                        * and break
-                        */
-                       if (start_blk > last_blk && !whole_file) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                               break;
-                       }
-
-                       /*
-                        * if size != 0 then we know we already have an extent
-                        * to add, so add it.
-                        */
-                       if (size) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                               if (ret)
-                                       break;
-                       }
-
-                       logical = blk_to_logical(inode, start_blk);
-                       phys = blk_to_logical(inode, map_bh.b_blocknr);
-                       size = map_bh.b_size;
-                       flags = FIEMAP_EXTENT_MERGED;
-
-                       start_blk += logical_to_blk(inode, size);
-
-                       /*
-                        * If we are past the EOF, then we need to make sure as
-                        * soon as we find a hole that the last extent we found
-                        * is marked with FIEMAP_EXTENT_LAST
-                        */
-                       if (!past_eof && logical + size >= isize)
-                               past_eof = true;
-               }
-               cond_resched();
-               if (fatal_signal_pending(current)) {
-                       ret = -EINTR;
-                       break;
-               }
-
-       } while (1);
-
-       /* If ret is 1 then we just hit the end of the extent array */
-       if (ret == 1)
-               ret = 0;
-
-       return ret;
-}
-
-/**
- * generic_block_fiemap - FIEMAP for block based inodes
- * @inode: The inode to map
- * @fieinfo: The mapping information
- * @start: The initial block to map
- * @len: The length of the extect to attempt to map
- * @get_block: The block mapping function for the fs
- *
- * Calls __generic_block_fiemap to map the inode, after taking
- * the inode's mutex lock.
- */
-
-int generic_block_fiemap(struct inode *inode,
-                        struct fiemap_extent_info *fieinfo, u64 start,
-                        u64 len, get_block_t *get_block)
-{
-       int ret;
-       inode_lock(inode);
-       ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
-       inode_unlock(inode);
-       return ret;
-}
-EXPORT_SYMBOL(generic_block_fiemap);
-
-#endif  /*  CONFIG_BLOCK  */
-
 /*
  * This provides compatibility with legacy XFS pre-allocation ioctls
  * which predate the fallocate syscall.
@@ -817,6 +614,14 @@ static int fileattr_set_prepare(struct inode *inode,
                if ((old_ma->fsx_xflags ^ fa->fsx_xflags) &
                                FS_XFLAG_PROJINHERIT)
                        return -EINVAL;
+       } else {
+               /*
+                * Caller is allowed to change the project ID. If it is being
+                * changed, make sure that the new value is valid.
+                */
+               if (old_ma->fsx_projid != fa->fsx_projid &&
+                   !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid)))
+                       return -EINVAL;
        }
 
        /* Check extent size hints. */
index eef2722..4143a3f 100644 (file)
@@ -9,9 +9,9 @@ ccflags-y += -I $(srctree)/$(src)               # needed for trace events
 obj-$(CONFIG_FS_IOMAP)         += iomap.o
 
 iomap-y                                += trace.o \
-                                  apply.o \
                                   buffered-io.o \
                                   direct-io.o \
                                   fiemap.o \
+                                  iter.o \
                                   seek.o
 iomap-$(CONFIG_SWAP)           += swapfile.o
diff --git a/fs/iomap/apply.c b/fs/iomap/apply.c
deleted file mode 100644 (file)
index 26ab656..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2018 Christoph Hellwig.
- */
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
-#include <linux/iomap.h>
-#include "trace.h"
-
-/*
- * Execute a iomap write on a segment of the mapping that spans a
- * contiguous range of pages that have identical block mapping state.
- *
- * This avoids the need to map pages individually, do individual allocations
- * for each page and most importantly avoid the need for filesystem specific
- * locking per page. Instead, all the operations are amortised over the entire
- * range of pages. It is assumed that the filesystems will lock whatever
- * resources they require in the iomap_begin call, and release them in the
- * iomap_end call.
- */
-loff_t
-iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
-               const struct iomap_ops *ops, void *data, iomap_actor_t actor)
-{
-       struct iomap iomap = { .type = IOMAP_HOLE };
-       struct iomap srcmap = { .type = IOMAP_HOLE };
-       loff_t written = 0, ret;
-       u64 end;
-
-       trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
-
-       /*
-        * Need to map a range from start position for length bytes. This can
-        * span multiple pages - it is only guaranteed to return a range of a
-        * single type of pages (e.g. all into a hole, all mapped or all
-        * unwritten). Failure at this point has nothing to undo.
-        *
-        * If allocation is required for this range, reserve the space now so
-        * that the allocation is guaranteed to succeed later on. Once we copy
-        * the data into the page cache pages, then we cannot fail otherwise we
-        * expose transient stale data. If the reserve fails, we can safely
-        * back out at this point as there is nothing to undo.
-        */
-       ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
-       if (ret)
-               return ret;
-       if (WARN_ON(iomap.offset > pos)) {
-               written = -EIO;
-               goto out;
-       }
-       if (WARN_ON(iomap.length == 0)) {
-               written = -EIO;
-               goto out;
-       }
-
-       trace_iomap_apply_dstmap(inode, &iomap);
-       if (srcmap.type != IOMAP_HOLE)
-               trace_iomap_apply_srcmap(inode, &srcmap);
-
-       /*
-        * Cut down the length to the one actually provided by the filesystem,
-        * as it might not be able to give us the whole size that we requested.
-        */
-       end = iomap.offset + iomap.length;
-       if (srcmap.type != IOMAP_HOLE)
-               end = min(end, srcmap.offset + srcmap.length);
-       if (pos + length > end)
-               length = end - pos;
-
-       /*
-        * Now that we have guaranteed that the space allocation will succeed,
-        * we can do the copy-in page by page without having to worry about
-        * failures exposing transient data.
-        *
-        * To support COW operations, we read in data for partially blocks from
-        * the srcmap if the file system filled it in.  In that case we the
-        * length needs to be limited to the earlier of the ends of the iomaps.
-        * If the file system did not provide a srcmap we pass in the normal
-        * iomap into the actors so that they don't need to have special
-        * handling for the two cases.
-        */
-       written = actor(inode, pos, length, data, &iomap,
-                       srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
-
-out:
-       /*
-        * Now the data has been copied, commit the range we've copied.  This
-        * should not fail unless the filesystem has had a fatal error.
-        */
-       if (ops->iomap_end) {
-               ret = ops->iomap_end(inode, pos, length,
-                                    written > 0 ? written : 0,
-                                    flags, &iomap);
-       }
-
-       return written ? written : ret;
-}
index 87ccb34..9cc5798 100644 (file)
@@ -36,7 +36,7 @@ static inline struct iomap_page *to_iomap_page(struct page *page)
 {
        /*
         * per-block data is stored in the head page.  Callers should
-        * not be dealing with tail pages (and if they are, they can
+        * not be dealing with tail pagesand if they are, they can
         * call thp_head() first.
         */
        VM_BUG_ON_PGFLAGS(PageTail(page), page);
@@ -98,7 +98,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
        unsigned last = (poff + plen - 1) >> block_bits;
 
        /*
-        * If the block size is smaller than the page size we need to check the
+        * If the block size is smaller than the page size, we need to check the
         * per-block uptodate status and adjust the offset and length if needed
         * to avoid reading in already uptodate ranges.
         */
@@ -126,7 +126,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
        }
 
        /*
-        * If the extent spans the block that contains the i_size we need to
+        * If the extent spans the block that contains the i_size, we need to
         * handle both halves separately so that we properly zero data in the
         * page cache for blocks that are entirely outside of i_size.
         */
@@ -205,60 +205,67 @@ struct iomap_readpage_ctx {
        struct readahead_control *rac;
 };
 
-static void
-iomap_read_inline_data(struct inode *inode, struct page *page,
-               struct iomap *iomap)
+static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
+               struct page *page)
 {
-       size_t size = i_size_read(inode);
+       const struct iomap *iomap = iomap_iter_srcmap(iter);
+       size_t size = i_size_read(iter->inode) - iomap->offset;
+       size_t poff = offset_in_page(iomap->offset);
        void *addr;
 
        if (PageUptodate(page))
-               return;
-
-       BUG_ON(page_has_private(page));
-       BUG_ON(page->index);
-       BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
-
-       addr = kmap_atomic(page);
+               return PAGE_SIZE - poff;
+
+       if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
+               return -EIO;
+       if (WARN_ON_ONCE(size > PAGE_SIZE -
+                        offset_in_page(iomap->inline_data)))
+               return -EIO;
+       if (WARN_ON_ONCE(size > iomap->length))
+               return -EIO;
+       if (poff > 0)
+               iomap_page_create(iter->inode, page);
+
+       addr = kmap_local_page(page) + poff;
        memcpy(addr, iomap->inline_data, size);
-       memset(addr + size, 0, PAGE_SIZE - size);
-       kunmap_atomic(addr);
-       SetPageUptodate(page);
+       memset(addr + size, 0, PAGE_SIZE - poff - size);
+       kunmap_local(addr);
+       iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
+       return PAGE_SIZE - poff;
 }
 
-static inline bool iomap_block_needs_zeroing(struct inode *inode,
-               struct iomap *iomap, loff_t pos)
+static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
+               loff_t pos)
 {
-       return iomap->type != IOMAP_MAPPED ||
-               (iomap->flags & IOMAP_F_NEW) ||
-               pos >= i_size_read(inode);
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+
+       return srcmap->type != IOMAP_MAPPED ||
+               (srcmap->flags & IOMAP_F_NEW) ||
+               pos >= i_size_read(iter->inode);
 }
 
-static loff_t
-iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
-               struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
+               struct iomap_readpage_ctx *ctx, loff_t offset)
 {
-       struct iomap_readpage_ctx *ctx = data;
+       const struct iomap *iomap = &iter->iomap;
+       loff_t pos = iter->pos + offset;
+       loff_t length = iomap_length(iter) - offset;
        struct page *page = ctx->cur_page;
        struct iomap_page *iop;
-       bool same_page = false, is_contig = false;
        loff_t orig_pos = pos;
        unsigned poff, plen;
        sector_t sector;
 
-       if (iomap->type == IOMAP_INLINE) {
-               WARN_ON_ONCE(pos);
-               iomap_read_inline_data(inode, page, iomap);
-               return PAGE_SIZE;
-       }
+       if (iomap->type == IOMAP_INLINE)
+               return min(iomap_read_inline_data(iter, page), length);
 
        /* zero post-eof blocks as the page may be mapped */
-       iop = iomap_page_create(inode, page);
-       iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
+       iop = iomap_page_create(iter->inode, page);
+       iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
        if (plen == 0)
                goto done;
 
-       if (iomap_block_needs_zeroing(inode, iomap, pos)) {
+       if (iomap_block_needs_zeroing(iter, pos)) {
                zero_user(page, poff, plen);
                iomap_set_range_uptodate(page, poff, plen);
                goto done;
@@ -268,16 +275,10 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
        if (iop)
                atomic_add(plen, &iop->read_bytes_pending);
 
-       /* Try to merge into a previous segment if we can */
        sector = iomap_sector(iomap, pos);
-       if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
-               if (__bio_try_merge_page(ctx->bio, page, plen, poff,
-                               &same_page))
-                       goto done;
-               is_contig = true;
-       }
-
-       if (!is_contig || bio_full(ctx->bio, plen)) {
+       if (!ctx->bio ||
+           bio_end_sector(ctx->bio) != sector ||
+           bio_add_page(ctx->bio, page, plen, poff) != plen) {
                gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
                gfp_t orig_gfp = gfp;
                unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -301,13 +302,12 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                ctx->bio->bi_iter.bi_sector = sector;
                bio_set_dev(ctx->bio, iomap->bdev);
                ctx->bio->bi_end_io = iomap_read_end_io;
+               __bio_add_page(ctx->bio, page, plen, poff);
        }
-
-       bio_add_page(ctx->bio, page, plen, poff);
 done:
        /*
         * Move the caller beyond our range so that it keeps making progress.
-        * For that we have to include any leading non-uptodate ranges, but
+        * For that, we have to include any leading non-uptodate ranges, but
         * we can skip trailing ones as they will be handled in the next
         * iteration.
         */
@@ -317,23 +317,23 @@ done:
 int
 iomap_readpage(struct page *page, const struct iomap_ops *ops)
 {
-       struct iomap_readpage_ctx ctx = { .cur_page = page };
-       struct inode *inode = page->mapping->host;
-       unsigned poff;
-       loff_t ret;
+       struct iomap_iter iter = {
+               .inode          = page->mapping->host,
+               .pos            = page_offset(page),
+               .len            = PAGE_SIZE,
+       };
+       struct iomap_readpage_ctx ctx = {
+               .cur_page       = page,
+       };
+       int ret;
 
        trace_iomap_readpage(page->mapping->host, 1);
 
-       for (poff = 0; poff < PAGE_SIZE; poff += ret) {
-               ret = iomap_apply(inode, page_offset(page) + poff,
-                               PAGE_SIZE - poff, 0, ops, &ctx,
-                               iomap_readpage_actor);
-               if (ret <= 0) {
-                       WARN_ON_ONCE(ret == 0);
-                       SetPageError(page);
-                       break;
-               }
-       }
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
+
+       if (ret < 0)
+               SetPageError(page);
 
        if (ctx.bio) {
                submit_bio(ctx.bio);
@@ -344,23 +344,22 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
        }
 
        /*
-        * Just like mpage_readahead and block_read_full_page we always
+        * Just like mpage_readahead and block_read_full_page, we always
         * return 0 and just mark the page as PageError on errors.  This
-        * should be cleaned up all through the stack eventually.
+        * should be cleaned up throughout the stack eventually.
         */
        return 0;
 }
 EXPORT_SYMBOL_GPL(iomap_readpage);
 
-static loff_t
-iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
-               void *data, struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
+               struct iomap_readpage_ctx *ctx)
 {
-       struct iomap_readpage_ctx *ctx = data;
+       loff_t length = iomap_length(iter);
        loff_t done, ret;
 
        for (done = 0; done < length; done += ret) {
-               if (ctx->cur_page && offset_in_page(pos + done) == 0) {
+               if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
                        if (!ctx->cur_page_in_bio)
                                unlock_page(ctx->cur_page);
                        put_page(ctx->cur_page);
@@ -370,8 +369,7 @@ iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
                        ctx->cur_page = readahead_page(ctx->rac);
                        ctx->cur_page_in_bio = false;
                }
-               ret = iomap_readpage_actor(inode, pos + done, length - done,
-                               ctx, iomap, srcmap);
+               ret = iomap_readpage_iter(iter, ctx, done);
        }
 
        return done;
@@ -394,25 +392,19 @@ iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
  */
 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
 {
-       struct inode *inode = rac->mapping->host;
-       loff_t pos = readahead_pos(rac);
-       size_t length = readahead_length(rac);
+       struct iomap_iter iter = {
+               .inode  = rac->mapping->host,
+               .pos    = readahead_pos(rac),
+               .len    = readahead_length(rac),
+       };
        struct iomap_readpage_ctx ctx = {
                .rac    = rac,
        };
 
-       trace_iomap_readahead(inode, readahead_count(rac));
+       trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
 
-       while (length > 0) {
-               ssize_t ret = iomap_apply(inode, pos, length, 0, ops,
-                               &ctx, iomap_readahead_actor);
-               if (ret <= 0) {
-                       WARN_ON_ONCE(ret == 0);
-                       break;
-               }
-               pos += ret;
-               length -= ret;
-       }
+       while (iomap_iter(&iter, ops) > 0)
+               iter.processed = iomap_readahead_iter(&iter, &ctx);
 
        if (ctx.bio)
                submit_bio(ctx.bio);
@@ -467,7 +459,7 @@ iomap_releasepage(struct page *page, gfp_t gfp_mask)
        /*
         * mm accommodates an old ext3 case where clean pages might not have had
         * the dirty bit cleared. Thus, it can send actual dirty pages to
-        * ->releasepage() via shrink_active_list(), skip those here.
+        * ->releasepage() via shrink_active_list(); skip those here.
         */
        if (PageDirty(page) || PageWriteback(page))
                return 0;
@@ -482,7 +474,7 @@ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
        trace_iomap_invalidatepage(page->mapping->host, offset, len);
 
        /*
-        * If we are invalidating the entire page, clear the dirty state from it
+        * If we're invalidating the entire page, clear the dirty state from it
         * and release it to avoid unnecessary buildup of the LRU.
         */
        if (offset == 0 && len == PAGE_SIZE) {
@@ -516,10 +508,6 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
 EXPORT_SYMBOL_GPL(iomap_migrate_page);
 #endif /* CONFIG_MIGRATION */
 
-enum {
-       IOMAP_WRITE_F_UNSHARE           = (1 << 0),
-};
-
 static void
 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
 {
@@ -535,7 +523,7 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
 
 static int
 iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
-               unsigned plen, struct iomap *iomap)
+               unsigned plen, const struct iomap *iomap)
 {
        struct bio_vec bvec;
        struct bio bio;
@@ -548,12 +536,12 @@ iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
        return submit_bio_wait(&bio);
 }
 
-static int
-__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
-               struct page *page, struct iomap *srcmap)
+static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
+               unsigned len, struct page *page)
 {
-       struct iomap_page *iop = iomap_page_create(inode, page);
-       loff_t block_size = i_blocksize(inode);
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+       struct iomap_page *iop = iomap_page_create(iter->inode, page);
+       loff_t block_size = i_blocksize(iter->inode);
        loff_t block_start = round_down(pos, block_size);
        loff_t block_end = round_up(pos + len, block_size);
        unsigned from = offset_in_page(pos), to = from + len, poff, plen;
@@ -563,18 +551,18 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
        ClearPageError(page);
 
        do {
-               iomap_adjust_read_range(inode, iop, &block_start,
+               iomap_adjust_read_range(iter->inode, iop, &block_start,
                                block_end - block_start, &poff, &plen);
                if (plen == 0)
                        break;
 
-               if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
+               if (!(iter->flags & IOMAP_UNSHARE) &&
                    (from <= poff || from >= poff + plen) &&
                    (to <= poff || to >= poff + plen))
                        continue;
 
-               if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
-                       if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
+               if (iomap_block_needs_zeroing(iter, block_start)) {
+                       if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
                                return -EIO;
                        zero_user_segments(page, poff, from, to, poff + plen);
                } else {
@@ -589,41 +577,54 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
        return 0;
 }
 
-static int
-iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
-               struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
+static int iomap_write_begin_inline(const struct iomap_iter *iter,
+               struct page *page)
 {
-       const struct iomap_page_ops *page_ops = iomap->page_ops;
+       int ret;
+
+       /* needs more work for the tailpacking case; disable for now */
+       if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
+               return -EIO;
+       ret = iomap_read_inline_data(iter, page);
+       if (ret < 0)
+               return ret;
+       return 0;
+}
+
+static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
+               unsigned len, struct page **pagep)
+{
+       const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
        struct page *page;
        int status = 0;
 
-       BUG_ON(pos + len > iomap->offset + iomap->length);
-       if (srcmap != iomap)
+       BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
+       if (srcmap != &iter->iomap)
                BUG_ON(pos + len > srcmap->offset + srcmap->length);
 
        if (fatal_signal_pending(current))
                return -EINTR;
 
        if (page_ops && page_ops->page_prepare) {
-               status = page_ops->page_prepare(inode, pos, len, iomap);
+               status = page_ops->page_prepare(iter->inode, pos, len);
                if (status)
                        return status;
        }
 
-       page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
-                       AOP_FLAG_NOFS);
+       page = grab_cache_page_write_begin(iter->inode->i_mapping,
+                               pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
        if (!page) {
                status = -ENOMEM;
                goto out_no_page;
        }
 
        if (srcmap->type == IOMAP_INLINE)
-               iomap_read_inline_data(inode, page, srcmap);
-       else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
+               status = iomap_write_begin_inline(iter, page);
+       else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
                status = __block_write_begin_int(page, pos, len, NULL, srcmap);
        else
-               status = __iomap_write_begin(inode, pos, len, flags, page,
-                               srcmap);
+               status = __iomap_write_begin(iter, pos, len, page);
 
        if (unlikely(status))
                goto out_unlock;
@@ -634,11 +635,11 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 out_unlock:
        unlock_page(page);
        put_page(page);
-       iomap_write_failed(inode, pos, len);
+       iomap_write_failed(iter->inode, pos, len);
 
 out_no_page:
        if (page_ops && page_ops->page_done)
-               page_ops->page_done(inode, pos, 0, NULL, iomap);
+               page_ops->page_done(iter->inode, pos, 0, NULL);
        return status;
 }
 
@@ -650,13 +651,13 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
        /*
         * The blocks that were entirely written will now be uptodate, so we
         * don't have to worry about a readpage reading them and overwriting a
-        * partial write.  However if we have encountered a short write and only
+        * partial write.  However, if we've encountered a short write and only
         * partially written into a block, it will not be marked uptodate, so a
         * readpage might come in and destroy our partial write.
         *
-        * Do the simplest thing, and just treat any short write to a non
-        * uptodate page as a zero-length write, and force the caller to redo
-        * the whole thing.
+        * Do the simplest thing and just treat any short write to a
+        * non-uptodate page as a zero-length write, and force the caller to
+        * redo the whole thing.
         */
        if (unlikely(copied < len && !PageUptodate(page)))
                return 0;
@@ -665,39 +666,40 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
        return copied;
 }
 
-static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
-               struct iomap *iomap, loff_t pos, size_t copied)
+static size_t iomap_write_end_inline(const struct iomap_iter *iter,
+               struct page *page, loff_t pos, size_t copied)
 {
+       const struct iomap *iomap = &iter->iomap;
        void *addr;
 
        WARN_ON_ONCE(!PageUptodate(page));
-       BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
+       BUG_ON(!iomap_inline_data_valid(iomap));
 
        flush_dcache_page(page);
-       addr = kmap_atomic(page);
-       memcpy(iomap->inline_data + pos, addr + pos, copied);
-       kunmap_atomic(addr);
+       addr = kmap_local_page(page) + pos;
+       memcpy(iomap_inline_data(iomap, pos), addr, copied);
+       kunmap_local(addr);
 
-       mark_inode_dirty(inode);
+       mark_inode_dirty(iter->inode);
        return copied;
 }
 
 /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
-static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
-               size_t copied, struct page *page, struct iomap *iomap,
-               struct iomap *srcmap)
+static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
+               size_t copied, struct page *page)
 {
-       const struct iomap_page_ops *page_ops = iomap->page_ops;
-       loff_t old_size = inode->i_size;
+       const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+       loff_t old_size = iter->inode->i_size;
        size_t ret;
 
        if (srcmap->type == IOMAP_INLINE) {
-               ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
+               ret = iomap_write_end_inline(iter, page, pos, copied);
        } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
-               ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
-                               page, NULL);
+               ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
+                               copied, page, NULL);
        } else {
-               ret = __iomap_write_end(inode, pos, len, copied, page);
+               ret = __iomap_write_end(iter->inode, pos, len, copied, page);
        }
 
        /*
@@ -706,29 +708,28 @@ static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
         * preferably after I/O completion so that no stale data is exposed.
         */
        if (pos + ret > old_size) {
-               i_size_write(inode, pos + ret);
-               iomap->flags |= IOMAP_F_SIZE_CHANGED;
+               i_size_write(iter->inode, pos + ret);
+               iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
        }
        unlock_page(page);
 
        if (old_size < pos)
-               pagecache_isize_extended(inode, old_size, pos);
+               pagecache_isize_extended(iter->inode, old_size, pos);
        if (page_ops && page_ops->page_done)
-               page_ops->page_done(inode, pos, ret, page, iomap);
+               page_ops->page_done(iter->inode, pos, ret, page);
        put_page(page);
 
        if (ret < len)
-               iomap_write_failed(inode, pos, len);
+               iomap_write_failed(iter->inode, pos, len);
        return ret;
 }
 
-static loff_t
-iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
-               struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
 {
-       struct iov_iter *i = data;
-       long status = 0;
+       loff_t length = iomap_length(iter);
+       loff_t pos = iter->pos;
        ssize_t written = 0;
+       long status = 0;
 
        do {
                struct page *page;
@@ -744,7 +745,7 @@ again:
                        bytes = length;
 
                /*
-                * Bring in the user page that we will copy from _first_.
+                * Bring in the user page that we'll copy from _first_.
                 * Otherwise there's a nasty deadlock on copying from the
                 * same page as we're writing to, without it being marked
                 * up-to-date.
@@ -754,18 +755,16 @@ again:
                        break;
                }
 
-               status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
-                               srcmap);
+               status = iomap_write_begin(iter, pos, bytes, &page);
                if (unlikely(status))
                        break;
 
-               if (mapping_writably_mapped(inode->i_mapping))
+               if (mapping_writably_mapped(iter->inode->i_mapping))
                        flush_dcache_page(page);
 
                copied = copy_page_from_iter_atomic(page, offset, bytes, i);
 
-               status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
-                               srcmap);
+               status = iomap_write_end(iter, pos, bytes, copied, page);
 
                if (unlikely(copied != status))
                        iov_iter_revert(i, copied - status);
@@ -786,36 +785,38 @@ again:
                written += status;
                length -= status;
 
-               balance_dirty_pages_ratelimited(inode->i_mapping);
+               balance_dirty_pages_ratelimited(iter->inode->i_mapping);
        } while (iov_iter_count(i) && length);
 
        return written ? written : status;
 }
 
 ssize_t
-iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
+iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
                const struct iomap_ops *ops)
 {
-       struct inode *inode = iocb->ki_filp->f_mapping->host;
-       loff_t pos = iocb->ki_pos, ret = 0, written = 0;
-
-       while (iov_iter_count(iter)) {
-               ret = iomap_apply(inode, pos, iov_iter_count(iter),
-                               IOMAP_WRITE, ops, iter, iomap_write_actor);
-               if (ret <= 0)
-                       break;
-               pos += ret;
-               written += ret;
-       }
+       struct iomap_iter iter = {
+               .inode          = iocb->ki_filp->f_mapping->host,
+               .pos            = iocb->ki_pos,
+               .len            = iov_iter_count(i),
+               .flags          = IOMAP_WRITE,
+       };
+       int ret;
 
-       return written ? written : ret;
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_write_iter(&iter, i);
+       if (iter.pos == iocb->ki_pos)
+               return ret;
+       return iter.pos - iocb->ki_pos;
 }
 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
 
-static loff_t
-iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
-               struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_unshare_iter(struct iomap_iter *iter)
 {
+       struct iomap *iomap = &iter->iomap;
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+       loff_t pos = iter->pos;
+       loff_t length = iomap_length(iter);
        long status = 0;
        loff_t written = 0;
 
@@ -831,13 +832,11 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
                struct page *page;
 
-               status = iomap_write_begin(inode, pos, bytes,
-                               IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
+               status = iomap_write_begin(iter, pos, bytes, &page);
                if (unlikely(status))
                        return status;
 
-               status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
-                               srcmap);
+               status = iomap_write_end(iter, pos, bytes, bytes, page);
                if (WARN_ON_ONCE(status == 0))
                        return -EIO;
 
@@ -847,7 +846,7 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                written += status;
                length -= status;
 
-               balance_dirty_pages_ratelimited(inode->i_mapping);
+               balance_dirty_pages_ratelimited(iter->inode->i_mapping);
        } while (length);
 
        return written;
@@ -857,44 +856,43 @@ int
 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
                const struct iomap_ops *ops)
 {
-       loff_t ret;
-
-       while (len) {
-               ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
-                               iomap_unshare_actor);
-               if (ret <= 0)
-                       return ret;
-               pos += ret;
-               len -= ret;
-       }
+       struct iomap_iter iter = {
+               .inode          = inode,
+               .pos            = pos,
+               .len            = len,
+               .flags          = IOMAP_WRITE | IOMAP_UNSHARE,
+       };
+       int ret;
 
-       return 0;
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_unshare_iter(&iter);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iomap_file_unshare);
 
-static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
-               struct iomap *iomap, struct iomap *srcmap)
+static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
 {
        struct page *page;
        int status;
        unsigned offset = offset_in_page(pos);
        unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
 
-       status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
+       status = iomap_write_begin(iter, pos, bytes, &page);
        if (status)
                return status;
 
        zero_user(page, offset, bytes);
        mark_page_accessed(page);
 
-       return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
+       return iomap_write_end(iter, pos, bytes, bytes, page);
 }
 
-static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
-               loff_t length, void *data, struct iomap *iomap,
-               struct iomap *srcmap)
+static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
 {
-       bool *did_zero = data;
+       struct iomap *iomap = &iter->iomap;
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+       loff_t pos = iter->pos;
+       loff_t length = iomap_length(iter);
        loff_t written = 0;
 
        /* already zeroed?  we're done. */
@@ -904,10 +902,10 @@ static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
        do {
                s64 bytes;
 
-               if (IS_DAX(inode))
+               if (IS_DAX(iter->inode))
                        bytes = dax_iomap_zero(pos, length, iomap);
                else
-                       bytes = iomap_zero(inode, pos, length, iomap, srcmap);
+                       bytes = __iomap_zero_iter(iter, pos, length);
                if (bytes < 0)
                        return bytes;
 
@@ -925,19 +923,17 @@ int
 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
                const struct iomap_ops *ops)
 {
-       loff_t ret;
-
-       while (len > 0) {
-               ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
-                               ops, did_zero, iomap_zero_range_actor);
-               if (ret <= 0)
-                       return ret;
-
-               pos += ret;
-               len -= ret;
-       }
+       struct iomap_iter iter = {
+               .inode          = inode,
+               .pos            = pos,
+               .len            = len,
+               .flags          = IOMAP_ZERO,
+       };
+       int ret;
 
-       return 0;
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_zero_iter(&iter, did_zero);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iomap_zero_range);
 
@@ -955,15 +951,15 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
 }
 EXPORT_SYMBOL_GPL(iomap_truncate_page);
 
-static loff_t
-iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
-               void *data, struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
+               struct page *page)
 {
-       struct page *page = data;
+       loff_t length = iomap_length(iter);
        int ret;
 
-       if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
-               ret = __block_write_begin_int(page, pos, length, NULL, iomap);
+       if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
+               ret = __block_write_begin_int(page, iter->pos, length, NULL,
+                                             &iter->iomap);
                if (ret)
                        return ret;
                block_commit_write(page, 0, length);
@@ -977,29 +973,24 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
 
 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
 {
+       struct iomap_iter iter = {
+               .inode          = file_inode(vmf->vma->vm_file),
+               .flags          = IOMAP_WRITE | IOMAP_FAULT,
+       };
        struct page *page = vmf->page;
-       struct inode *inode = file_inode(vmf->vma->vm_file);
-       unsigned long length;
-       loff_t offset;
        ssize_t ret;
 
        lock_page(page);
-       ret = page_mkwrite_check_truncate(page, inode);
+       ret = page_mkwrite_check_truncate(page, iter.inode);
        if (ret < 0)
                goto out_unlock;
-       length = ret;
-
-       offset = page_offset(page);
-       while (length > 0) {
-               ret = iomap_apply(inode, offset, length,
-                               IOMAP_WRITE | IOMAP_FAULT, ops, page,
-                               iomap_page_mkwrite_actor);
-               if (unlikely(ret <= 0))
-                       goto out_unlock;
-               offset += ret;
-               length -= ret;
-       }
+       iter.pos = page_offset(page);
+       iter.len = ret;
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_page_mkwrite_iter(&iter, page);
 
+       if (ret < 0)
+               goto out_unlock;
        wait_for_stable_page(page);
        return VM_FAULT_LOCKED;
 out_unlock:
@@ -1016,7 +1007,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
 
        if (error) {
                SetPageError(page);
-               mapping_set_error(inode->i_mapping, -EIO);
+               mapping_set_error(inode->i_mapping, error);
        }
 
        WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
@@ -1153,7 +1144,7 @@ static void iomap_writepage_end_bio(struct bio *bio)
  * Submit the final bio for an ioend.
  *
  * If @error is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we have marked paged for writeback
+ * the submission process has failed after we've marked pages for writeback
  * and unlocked them.  In this situation, we need to fail the bio instead of
  * submitting it.  This typically only happens on a filesystem shutdown.
  */
@@ -1168,7 +1159,7 @@ iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
                error = wpc->ops->prepare_ioend(ioend, error);
        if (error) {
                /*
-                * If we are failing the IO now, just mark the ioend with an
+                * If we're failing the IO now, just mark the ioend with an
                 * error and finish it.  This will run IO completion immediately
                 * as there is only one reference to the ioend at this point in
                 * time.
@@ -1210,7 +1201,7 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
 /*
  * Allocate a new bio, and chain the old bio to the new one.
  *
- * Note that we have to do perform the chaining in this unintuitive order
+ * Note that we have to perform the chaining in this unintuitive order
  * so that the bi_private linkage is set up in the right direction for the
  * traversal in iomap_finish_ioend().
  */
@@ -1249,7 +1240,7 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
 
 /*
  * Test to see if we have an existing ioend structure that we could append to
- * first, otherwise finish off the current ioend and start another.
+ * first; otherwise finish off the current ioend and start another.
  */
 static void
 iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
@@ -1259,7 +1250,6 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
        sector_t sector = iomap_sector(&wpc->iomap, offset);
        unsigned len = i_blocksize(inode);
        unsigned poff = offset & (PAGE_SIZE - 1);
-       bool merged, same_page = false;
 
        if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
                if (wpc->ioend)
@@ -1267,19 +1257,13 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
                wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
        }
 
-       merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
-                       &same_page);
-       if (iop)
-               atomic_add(len, &iop->write_bytes_pending);
-
-       if (!merged) {
-               if (bio_full(wpc->ioend->io_bio, len)) {
-                       wpc->ioend->io_bio =
-                               iomap_chain_bio(wpc->ioend->io_bio);
-               }
-               bio_add_page(wpc->ioend->io_bio, page, len, poff);
+       if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
+               wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
+               __bio_add_page(wpc->ioend->io_bio, page, len, poff);
        }
 
+       if (iop)
+               atomic_add(len, &iop->write_bytes_pending);
        wpc->ioend->io_size += len;
        wbc_account_cgroup_owner(wbc, page, len);
 }
@@ -1287,9 +1271,9 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
 /*
  * We implement an immediate ioend submission policy here to avoid needing to
  * chain multiple ioends and hence nest mempool allocations which can violate
- * forward progress guarantees we need to provide. The current ioend we are
- * adding blocks to is cached on the writepage context, and if the new block
- * does not append to the cached ioend it will create a new ioend and cache that
+ * the forward progress guarantees we need to provide. The current ioend we're
+ * adding blocks to is cached in the writepage context, and if the new block
+ * doesn't append to the cached ioend, it will create a new ioend and cache that
  * instead.
  *
  * If a new ioend is created and cached, the old ioend is returned and queued
@@ -1351,7 +1335,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
        if (unlikely(error)) {
                /*
                 * Let the filesystem know what portion of the current page
-                * failed to map. If the page wasn't been added to ioend, it
+                * failed to map. If the page hasn't been added to ioend, it
                 * won't be affected by I/O completion and we must unlock it
                 * now.
                 */
@@ -1368,7 +1352,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
        unlock_page(page);
 
        /*
-        * Preserve the original error if there was one, otherwise catch
+        * Preserve the original error if there was one; catch
         * submission errors here and propagate into subsequent ioend
         * submissions.
         */
@@ -1395,8 +1379,8 @@ done:
 /*
  * Write out a dirty page.
  *
- * For delalloc space on the page we need to allocate space and flush it.
- * For unwritten space on the page we need to start the conversion to
+ * For delalloc space on the page, we need to allocate space and flush it.
+ * For unwritten space on the page, we need to start the conversion to
  * regular allocated space.
  */
 static int
@@ -1411,7 +1395,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
        trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
 
        /*
-        * Refuse to write the page out if we are called from reclaim context.
+        * Refuse to write the page out if we're called from reclaim context.
         *
         * This avoids stack overflows when called from deeply used stacks in
         * random callers for direct reclaim or memcg reclaim.  We explicitly
@@ -1456,20 +1440,20 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
                unsigned offset_into_page = offset & (PAGE_SIZE - 1);
 
                /*
-                * Skip the page if it is fully outside i_size, e.g. due to a
-                * truncate operation that is in progress. We must redirty the
+                * Skip the page if it's fully outside i_size, e.g. due to a
+                * truncate operation that's in progress. We must redirty the
                 * page so that reclaim stops reclaiming it. Otherwise
                 * iomap_vm_releasepage() is called on it and gets confused.
                 *
-                * Note that the end_index is unsigned long, it would overflow
-                * if the given offset is greater than 16TB on 32-bit system
-                * and if we do check the page is fully outside i_size or not
-                * via "if (page->index >= end_index + 1)" as "end_index + 1"
-                * will be evaluated to 0.  Hence this page will be redirtied
-                * and be written out repeatedly which would result in an
-                * infinite loop, the user program that perform this operation
-                * will hang.  Instead, we can verify this situation by checking
-                * if the page to write is totally beyond the i_size or if it's
+                * Note that the end_index is unsigned long.  If the given
+                * offset is greater than 16TB on a 32-bit system then if we
+                * checked if the page is fully outside i_size with
+                * "if (page->index >= end_index + 1)", "end_index + 1" would
+                * overflow and evaluate to 0.  Hence this page would be
+                * redirtied and written out repeatedly, which would result in
+                * an infinite loop; the user program performing this operation
+                * would hang.  Instead, we can detect this situation by
+                * checking if the page is totally beyond i_size or if its
                 * offset is just equal to the EOF.
                 */
                if (page->index > end_index ||
index 9398b8c..4ecd255 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2018 Christoph Hellwig.
+ * Copyright (c) 2016-2021 Christoph Hellwig.
  */
 #include <linux/module.h>
 #include <linux/compiler.h>
@@ -59,19 +59,17 @@ int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
 }
 EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
 
-static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
-               struct bio *bio, loff_t pos)
+static void iomap_dio_submit_bio(const struct iomap_iter *iter,
+               struct iomap_dio *dio, struct bio *bio, loff_t pos)
 {
        atomic_inc(&dio->ref);
 
        if (dio->iocb->ki_flags & IOCB_HIPRI)
                bio_set_polled(bio, dio->iocb);
 
-       dio->submit.last_queue = bdev_get_queue(iomap->bdev);
+       dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev);
        if (dio->dops && dio->dops->submit_io)
-               dio->submit.cookie = dio->dops->submit_io(
-                               file_inode(dio->iocb->ki_filp),
-                               iomap, bio, pos);
+               dio->submit.cookie = dio->dops->submit_io(iter, bio, pos);
        else
                dio->submit.cookie = submit_bio(bio);
 }
@@ -181,24 +179,23 @@ static void iomap_dio_bio_end_io(struct bio *bio)
        }
 }
 
-static void
-iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
-               unsigned len)
+static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
+               loff_t pos, unsigned len)
 {
        struct page *page = ZERO_PAGE(0);
        int flags = REQ_SYNC | REQ_IDLE;
        struct bio *bio;
 
        bio = bio_alloc(GFP_KERNEL, 1);
-       bio_set_dev(bio, iomap->bdev);
-       bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
+       bio_set_dev(bio, iter->iomap.bdev);
+       bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
        bio->bi_private = dio;
        bio->bi_end_io = iomap_dio_bio_end_io;
 
        get_page(page);
        __bio_add_page(bio, page, len, 0);
        bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
-       iomap_dio_submit_bio(dio, iomap, bio, pos);
+       iomap_dio_submit_bio(iter, dio, bio, pos);
 }
 
 /*
@@ -206,8 +203,8 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
  * mapping, and whether or not we want FUA.  Note that we can end up
  * clearing the WRITE_FUA flag in the dio request.
  */
-static inline unsigned int
-iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
+static inline unsigned int iomap_dio_bio_opflags(struct iomap_dio *dio,
+               const struct iomap *iomap, bool use_fua)
 {
        unsigned int opflags = REQ_SYNC | REQ_IDLE;
 
@@ -229,13 +226,16 @@ iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
        return opflags;
 }
 
-static loff_t
-iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
-               struct iomap_dio *dio, struct iomap *iomap)
+static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
+               struct iomap_dio *dio)
 {
+       const struct iomap *iomap = &iter->iomap;
+       struct inode *inode = iter->inode;
        unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
        unsigned int fs_block_size = i_blocksize(inode), pad;
        unsigned int align = iov_iter_alignment(dio->submit.iter);
+       loff_t length = iomap_length(iter);
+       loff_t pos = iter->pos;
        unsigned int bio_opf;
        struct bio *bio;
        bool need_zeroout = false;
@@ -286,7 +286,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
                /* zero out from the start of the block to the write offset */
                pad = pos & (fs_block_size - 1);
                if (pad)
-                       iomap_dio_zero(dio, iomap, pos - pad, pad);
+                       iomap_dio_zero(iter, dio, pos - pad, pad);
        }
 
        /*
@@ -339,7 +339,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
 
                nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
                                                 BIO_MAX_VECS);
-               iomap_dio_submit_bio(dio, iomap, bio, pos);
+               iomap_dio_submit_bio(iter, dio, bio, pos);
                pos += n;
        } while (nr_pages);
 
@@ -355,7 +355,7 @@ zero_tail:
                /* zero out from the end of the write to the end of the block */
                pad = pos & (fs_block_size - 1);
                if (pad)
-                       iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
+                       iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
        }
 out:
        /* Undo iter limitation to current extent */
@@ -365,65 +365,67 @@ out:
        return ret;
 }
 
-static loff_t
-iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
+static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
+               struct iomap_dio *dio)
 {
-       length = iov_iter_zero(length, dio->submit.iter);
+       loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
+
        dio->size += length;
        return length;
 }
 
-static loff_t
-iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
-               struct iomap_dio *dio, struct iomap *iomap)
+static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
+               struct iomap_dio *dio)
 {
+       const struct iomap *iomap = &iomi->iomap;
        struct iov_iter *iter = dio->submit.iter;
+       void *inline_data = iomap_inline_data(iomap, iomi->pos);
+       loff_t length = iomap_length(iomi);
+       loff_t pos = iomi->pos;
        size_t copied;
 
-       BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
+       if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
+               return -EIO;
 
        if (dio->flags & IOMAP_DIO_WRITE) {
-               loff_t size = inode->i_size;
+               loff_t size = iomi->inode->i_size;
 
                if (pos > size)
-                       memset(iomap->inline_data + size, 0, pos - size);
-               copied = copy_from_iter(iomap->inline_data + pos, length, iter);
+                       memset(iomap_inline_data(iomap, size), 0, pos - size);
+               copied = copy_from_iter(inline_data, length, iter);
                if (copied) {
                        if (pos + copied > size)
-                               i_size_write(inode, pos + copied);
-                       mark_inode_dirty(inode);
+                               i_size_write(iomi->inode, pos + copied);
+                       mark_inode_dirty(iomi->inode);
                }
        } else {
-               copied = copy_to_iter(iomap->inline_data + pos, length, iter);
+               copied = copy_to_iter(inline_data, length, iter);
        }
        dio->size += copied;
        return copied;
 }
 
-static loff_t
-iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
-               void *data, struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_dio_iter(const struct iomap_iter *iter,
+               struct iomap_dio *dio)
 {
-       struct iomap_dio *dio = data;
-
-       switch (iomap->type) {
+       switch (iter->iomap.type) {
        case IOMAP_HOLE:
                if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
                        return -EIO;
-               return iomap_dio_hole_actor(length, dio);
+               return iomap_dio_hole_iter(iter, dio);
        case IOMAP_UNWRITTEN:
                if (!(dio->flags & IOMAP_DIO_WRITE))
-                       return iomap_dio_hole_actor(length, dio);
-               return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+                       return iomap_dio_hole_iter(iter, dio);
+               return iomap_dio_bio_iter(iter, dio);
        case IOMAP_MAPPED:
-               return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+               return iomap_dio_bio_iter(iter, dio);
        case IOMAP_INLINE:
-               return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
+               return iomap_dio_inline_iter(iter, dio);
        case IOMAP_DELALLOC:
                /*
                 * DIO is not serialised against mmap() access at all, and so
                 * if the page_mkwrite occurs between the writeback and the
-                * iomap_apply() call in the DIO path, then it will see the
+                * iomap_iter() call in the DIO path, then it will see the
                 * DELALLOC block that the page-mkwrite allocated.
                 */
                pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
@@ -454,16 +456,19 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
 {
        struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = file_inode(iocb->ki_filp);
-       size_t count = iov_iter_count(iter);
-       loff_t pos = iocb->ki_pos;
-       loff_t end = iocb->ki_pos + count - 1, ret = 0;
+       struct iomap_iter iomi = {
+               .inode          = inode,
+               .pos            = iocb->ki_pos,
+               .len            = iov_iter_count(iter),
+               .flags          = IOMAP_DIRECT,
+       };
+       loff_t end = iomi.pos + iomi.len - 1, ret = 0;
        bool wait_for_completion =
                is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
-       unsigned int iomap_flags = IOMAP_DIRECT;
        struct blk_plug plug;
        struct iomap_dio *dio;
 
-       if (!count)
+       if (!iomi.len)
                return NULL;
 
        dio = kmalloc(sizeof(*dio), GFP_KERNEL);
@@ -484,29 +489,30 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        dio->submit.last_queue = NULL;
 
        if (iov_iter_rw(iter) == READ) {
-               if (pos >= dio->i_size)
+               if (iomi.pos >= dio->i_size)
                        goto out_free_dio;
 
                if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (filemap_range_needs_writeback(mapping, pos, end)) {
+                       if (filemap_range_needs_writeback(mapping, iomi.pos,
+                                       end)) {
                                ret = -EAGAIN;
                                goto out_free_dio;
                        }
-                       iomap_flags |= IOMAP_NOWAIT;
+                       iomi.flags |= IOMAP_NOWAIT;
                }
 
                if (iter_is_iovec(iter))
                        dio->flags |= IOMAP_DIO_DIRTY;
        } else {
-               iomap_flags |= IOMAP_WRITE;
+               iomi.flags |= IOMAP_WRITE;
                dio->flags |= IOMAP_DIO_WRITE;
 
                if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (filemap_range_has_page(mapping, pos, end)) {
+                       if (filemap_range_has_page(mapping, iomi.pos, end)) {
                                ret = -EAGAIN;
                                goto out_free_dio;
                        }
-                       iomap_flags |= IOMAP_NOWAIT;
+                       iomi.flags |= IOMAP_NOWAIT;
                }
 
                /* for data sync or sync, we need sync completion processing */
@@ -525,12 +531,13 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
 
        if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
                ret = -EAGAIN;
-               if (pos >= dio->i_size || pos + count > dio->i_size)
+               if (iomi.pos >= dio->i_size ||
+                   iomi.pos + iomi.len > dio->i_size)
                        goto out_free_dio;
-               iomap_flags |= IOMAP_OVERWRITE_ONLY;
+               iomi.flags |= IOMAP_OVERWRITE_ONLY;
        }
 
-       ret = filemap_write_and_wait_range(mapping, pos, end);
+       ret = filemap_write_and_wait_range(mapping, iomi.pos, end);
        if (ret)
                goto out_free_dio;
 
@@ -540,9 +547,10 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                 * If this invalidation fails, let the caller fall back to
                 * buffered I/O.
                 */
-               if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
-                               end >> PAGE_SHIFT)) {
-                       trace_iomap_dio_invalidate_fail(inode, pos, count);
+               if (invalidate_inode_pages2_range(mapping,
+                               iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) {
+                       trace_iomap_dio_invalidate_fail(inode, iomi.pos,
+                                                       iomi.len);
                        ret = -ENOTBLK;
                        goto out_free_dio;
                }
@@ -557,31 +565,23 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        inode_dio_begin(inode);
 
        blk_start_plug(&plug);
-       do {
-               ret = iomap_apply(inode, pos, count, iomap_flags, ops, dio,
-                               iomap_dio_actor);
-               if (ret <= 0) {
-                       /* magic error code to fall back to buffered I/O */
-                       if (ret == -ENOTBLK) {
-                               wait_for_completion = true;
-                               ret = 0;
-                       }
-                       break;
-               }
-               pos += ret;
-
-               if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
-                       /*
-                        * We only report that we've read data up to i_size.
-                        * Revert iter to a state corresponding to that as
-                        * some callers (such as splice code) rely on it.
-                        */
-                       iov_iter_revert(iter, pos - dio->i_size);
-                       break;
-               }
-       } while ((count = iov_iter_count(iter)) > 0);
+       while ((ret = iomap_iter(&iomi, ops)) > 0)
+               iomi.processed = iomap_dio_iter(&iomi, dio);
        blk_finish_plug(&plug);
 
+       /*
+        * We only report that we've read data up to i_size.
+        * Revert iter to a state corresponding to that as some callers (such
+        * as the splice code) rely on it.
+        */
+       if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
+               iov_iter_revert(iter, iomi.pos - dio->i_size);
+
+       /* magic error code to fall back to buffered I/O */
+       if (ret == -ENOTBLK) {
+               wait_for_completion = true;
+               ret = 0;
+       }
        if (ret < 0)
                iomap_dio_set_error(dio, ret);
 
index aab070d..66cf267 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2016-2018 Christoph Hellwig.
+ * Copyright (c) 2016-2021 Christoph Hellwig.
  */
 #include <linux/module.h>
 #include <linux/compiler.h>
@@ -8,13 +8,8 @@
 #include <linux/iomap.h>
 #include <linux/fiemap.h>
 
-struct fiemap_ctx {
-       struct fiemap_extent_info *fi;
-       struct iomap prev;
-};
-
 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
-               struct iomap *iomap, u32 flags)
+               const struct iomap *iomap, u32 flags)
 {
        switch (iomap->type) {
        case IOMAP_HOLE:
@@ -43,24 +38,22 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
                        iomap->length, flags);
 }
 
-static loff_t
-iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
-               struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_fiemap_iter(const struct iomap_iter *iter,
+               struct fiemap_extent_info *fi, struct iomap *prev)
 {
-       struct fiemap_ctx *ctx = data;
-       loff_t ret = length;
+       int ret;
 
-       if (iomap->type == IOMAP_HOLE)
-               return length;
+       if (iter->iomap.type == IOMAP_HOLE)
+               return iomap_length(iter);
 
-       ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
-       ctx->prev = *iomap;
+       ret = iomap_to_fiemap(fi, prev, 0);
+       *prev = iter->iomap;
        switch (ret) {
        case 0:         /* success */
-               return length;
+               return iomap_length(iter);
        case 1:         /* extent array full */
                return 0;
-       default:
+       default:        /* error */
                return ret;
        }
 }
@@ -68,73 +61,63 @@ iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
                u64 start, u64 len, const struct iomap_ops *ops)
 {
-       struct fiemap_ctx ctx;
-       loff_t ret;
-
-       memset(&ctx, 0, sizeof(ctx));
-       ctx.fi = fi;
-       ctx.prev.type = IOMAP_HOLE;
+       struct iomap_iter iter = {
+               .inode          = inode,
+               .pos            = start,
+               .len            = len,
+               .flags          = IOMAP_REPORT,
+       };
+       struct iomap prev = {
+               .type           = IOMAP_HOLE,
+       };
+       int ret;
 
-       ret = fiemap_prep(inode, fi, start, &len, 0);
+       ret = fiemap_prep(inode, fi, start, &iter.len, 0);
        if (ret)
                return ret;
 
-       while (len > 0) {
-               ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
-                               iomap_fiemap_actor);
-               /* inode with no (attribute) mapping will give ENOENT */
-               if (ret == -ENOENT)
-                       break;
-               if (ret < 0)
-                       return ret;
-               if (ret == 0)
-                       break;
-
-               start += ret;
-               len -= ret;
-       }
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_fiemap_iter(&iter, fi, &prev);
 
-       if (ctx.prev.type != IOMAP_HOLE) {
-               ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
+       if (prev.type != IOMAP_HOLE) {
+               ret = iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST);
                if (ret < 0)
                        return ret;
        }
 
+       /* inode with no (attribute) mapping will give ENOENT */
+       if (ret < 0 && ret != -ENOENT)
+               return ret;
        return 0;
 }
 EXPORT_SYMBOL_GPL(iomap_fiemap);
 
-static loff_t
-iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
-               void *data, struct iomap *iomap, struct iomap *srcmap)
-{
-       sector_t *bno = data, addr;
-
-       if (iomap->type == IOMAP_MAPPED) {
-               addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
-               *bno = addr;
-       }
-       return 0;
-}
-
 /* legacy ->bmap interface.  0 is the error return (!) */
 sector_t
 iomap_bmap(struct address_space *mapping, sector_t bno,
                const struct iomap_ops *ops)
 {
-       struct inode *inode = mapping->host;
-       loff_t pos = bno << inode->i_blkbits;
-       unsigned blocksize = i_blocksize(inode);
+       struct iomap_iter iter = {
+               .inode  = mapping->host,
+               .pos    = (loff_t)bno << mapping->host->i_blkbits,
+               .len    = i_blocksize(mapping->host),
+               .flags  = IOMAP_REPORT,
+       };
+       const unsigned int blkshift = mapping->host->i_blkbits - SECTOR_SHIFT;
        int ret;
 
        if (filemap_write_and_wait(mapping))
                return 0;
 
        bno = 0;
-       ret = iomap_apply(inode, pos, blocksize, 0, ops, &bno,
-                         iomap_bmap_actor);
+       while ((ret = iomap_iter(&iter, ops)) > 0) {
+               if (iter.iomap.type == IOMAP_MAPPED)
+                       bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift;
+               /* leave iter.processed unset to abort loop */
+       }
        if (ret)
                return 0;
+
        return bno;
 }
 EXPORT_SYMBOL_GPL(iomap_bmap);
diff --git a/fs/iomap/iter.c b/fs/iomap/iter.c
new file mode 100644 (file)
index 0000000..a1c7592
--- /dev/null
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (c) 2016-2021 Christoph Hellwig.
+ */
+#include <linux/fs.h>
+#include <linux/iomap.h>
+#include "trace.h"
+
+static inline int iomap_iter_advance(struct iomap_iter *iter)
+{
+       /* handle the previous iteration (if any) */
+       if (iter->iomap.length) {
+               if (iter->processed <= 0)
+                       return iter->processed;
+               if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
+                       return -EIO;
+               iter->pos += iter->processed;
+               iter->len -= iter->processed;
+               if (!iter->len)
+                       return 0;
+       }
+
+       /* clear the state for the next iteration */
+       iter->processed = 0;
+       memset(&iter->iomap, 0, sizeof(iter->iomap));
+       memset(&iter->srcmap, 0, sizeof(iter->srcmap));
+       return 1;
+}
+
+static inline void iomap_iter_done(struct iomap_iter *iter)
+{
+       WARN_ON_ONCE(iter->iomap.offset > iter->pos);
+       WARN_ON_ONCE(iter->iomap.length == 0);
+       WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
+
+       trace_iomap_iter_dstmap(iter->inode, &iter->iomap);
+       if (iter->srcmap.type != IOMAP_HOLE)
+               trace_iomap_iter_srcmap(iter->inode, &iter->srcmap);
+}
+
+/**
+ * iomap_iter - iterate over a ranges in a file
+ * @iter: iteration structue
+ * @ops: iomap ops provided by the file system
+ *
+ * Iterate over filesystem-provided space mappings for the provided file range.
+ *
+ * This function handles cleanup of resources acquired for iteration when the
+ * filesystem indicates there are no more space mappings, which means that this
+ * function must be called in a loop that continues as long it returns a
+ * positive value.  If 0 or a negative value is returned, the caller must not
+ * return to the loop body.  Within a loop body, there are two ways to break out
+ * of the loop body:  leave @iter.processed unchanged, or set it to a negative
+ * errno.
+ */
+int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
+{
+       int ret;
+
+       if (iter->iomap.length && ops->iomap_end) {
+               ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter),
+                               iter->processed > 0 ? iter->processed : 0,
+                               iter->flags, &iter->iomap);
+               if (ret < 0 && !iter->processed)
+                       return ret;
+       }
+
+       trace_iomap_iter(iter, ops, _RET_IP_);
+       ret = iomap_iter_advance(iter);
+       if (ret <= 0)
+               return ret;
+
+       ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
+                              &iter->iomap, &iter->srcmap);
+       if (ret < 0)
+               return ret;
+       iomap_iter_done(iter);
+       return 1;
+}
index ce6fb81..a845c01 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Red Hat, Inc.
- * Copyright (c) 2018 Christoph Hellwig.
+ * Copyright (c) 2018-2021 Christoph Hellwig.
  */
 #include <linux/module.h>
 #include <linux/compiler.h>
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
 
-static loff_t
-iomap_seek_hole_actor(struct inode *inode, loff_t start, loff_t length,
-                     void *data, struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
+               loff_t *hole_pos)
 {
-       loff_t offset = start;
+       loff_t length = iomap_length(iter);
 
-       switch (iomap->type) {
+       switch (iter->iomap.type) {
        case IOMAP_UNWRITTEN:
-               offset = mapping_seek_hole_data(inode->i_mapping, start,
-                               start + length, SEEK_HOLE);
-               if (offset == start + length)
+               *hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
+                               iter->pos, iter->pos + length, SEEK_HOLE);
+               if (*hole_pos == iter->pos + length)
                        return length;
-               fallthrough;
+               return 0;
        case IOMAP_HOLE:
-               *(loff_t *)data = offset;
+               *hole_pos = iter->pos;
                return 0;
        default:
                return length;
@@ -32,70 +31,73 @@ iomap_seek_hole_actor(struct inode *inode, loff_t start, loff_t length,
 }
 
 loff_t
-iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
+iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
 {
        loff_t size = i_size_read(inode);
-       loff_t ret;
+       struct iomap_iter iter = {
+               .inode  = inode,
+               .pos    = pos,
+               .flags  = IOMAP_REPORT,
+       };
+       int ret;
 
        /* Nothing to be found before or beyond the end of the file. */
-       if (offset < 0 || offset >= size)
+       if (pos < 0 || pos >= size)
                return -ENXIO;
 
-       while (offset < size) {
-               ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
-                                 ops, &offset, iomap_seek_hole_actor);
-               if (ret < 0)
-                       return ret;
-               if (ret == 0)
-                       break;
-               offset += ret;
-       }
-
-       return offset;
+       iter.len = size - pos;
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_seek_hole_iter(&iter, &pos);
+       if (ret < 0)
+               return ret;
+       if (iter.len) /* found hole before EOF */
+               return pos;
+       return size;
 }
 EXPORT_SYMBOL_GPL(iomap_seek_hole);
 
-static loff_t
-iomap_seek_data_actor(struct inode *inode, loff_t start, loff_t length,
-                     void *data, struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_seek_data_iter(const struct iomap_iter *iter,
+               loff_t *hole_pos)
 {
-       loff_t offset = start;
+       loff_t length = iomap_length(iter);
 
-       switch (iomap->type) {
+       switch (iter->iomap.type) {
        case IOMAP_HOLE:
                return length;
        case IOMAP_UNWRITTEN:
-               offset = mapping_seek_hole_data(inode->i_mapping, start,
-                               start + length, SEEK_DATA);
-               if (offset < 0)
+               *hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
+                               iter->pos, iter->pos + length, SEEK_DATA);
+               if (*hole_pos < 0)
                        return length;
-               fallthrough;
+               return 0;
        default:
-               *(loff_t *)data = offset;
+               *hole_pos = iter->pos;
                return 0;
        }
 }
 
 loff_t
-iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
+iomap_seek_data(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
 {
        loff_t size = i_size_read(inode);
-       loff_t ret;
+       struct iomap_iter iter = {
+               .inode  = inode,
+               .pos    = pos,
+               .flags  = IOMAP_REPORT,
+       };
+       int ret;
 
        /* Nothing to be found before or beyond the end of the file. */
-       if (offset < 0 || offset >= size)
+       if (pos < 0 || pos >= size)
                return -ENXIO;
 
-       while (offset < size) {
-               ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
-                                 ops, &offset, iomap_seek_data_actor);
-               if (ret < 0)
-                       return ret;
-               if (ret == 0)
-                       return offset;
-               offset += ret;
-       }
-
+       iter.len = size - pos;
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_seek_data_iter(&iter, &pos);
+       if (ret < 0)
+               return ret;
+       if (iter.len) /* found data before EOF */
+               return pos;
        /* We've reached the end of the file without finding data */
        return -ENXIO;
 }
index 6250ca6..5fc0ac3 100644 (file)
@@ -31,11 +31,16 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
 {
        struct iomap *iomap = &isi->iomap;
        unsigned long nr_pages;
+       unsigned long max_pages;
        uint64_t first_ppage;
        uint64_t first_ppage_reported;
        uint64_t next_ppage;
        int error;
 
+       if (unlikely(isi->nr_pages >= isi->sis->max))
+               return 0;
+       max_pages = isi->sis->max - isi->nr_pages;
+
        /*
         * Round the start up and the end down so that the physical
         * extent aligns to a page boundary.
@@ -48,6 +53,7 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
        if (first_ppage >= next_ppage)
                return 0;
        nr_pages = next_ppage - first_ppage;
+       nr_pages = min(nr_pages, max_pages);
 
        /*
         * Calculate how much swap space we're adding; the first page contains
@@ -88,13 +94,9 @@ static int iomap_swapfile_fail(struct iomap_swapfile_info *isi, const char *str)
  * swap only cares about contiguous page-aligned physical extents and makes no
  * distinction between written and unwritten extents.
  */
-static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
-               loff_t count, void *data, struct iomap *iomap,
-               struct iomap *srcmap)
+static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
+               struct iomap *iomap, struct iomap_swapfile_info *isi)
 {
-       struct iomap_swapfile_info *isi = data;
-       int error;
-
        switch (iomap->type) {
        case IOMAP_MAPPED:
        case IOMAP_UNWRITTEN:
@@ -125,12 +127,12 @@ static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
                isi->iomap.length += iomap->length;
        } else {
                /* Otherwise, add the retained iomap and store this one. */
-               error = iomap_swapfile_add_extent(isi);
+               int error = iomap_swapfile_add_extent(isi);
                if (error)
                        return error;
                memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
        }
-       return count;
+       return iomap_length(iter);
 }
 
 /*
@@ -141,16 +143,19 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
                struct file *swap_file, sector_t *pagespan,
                const struct iomap_ops *ops)
 {
+       struct inode *inode = swap_file->f_mapping->host;
+       struct iomap_iter iter = {
+               .inode  = inode,
+               .pos    = 0,
+               .len    = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE),
+               .flags  = IOMAP_REPORT,
+       };
        struct iomap_swapfile_info isi = {
                .sis = sis,
                .lowest_ppage = (sector_t)-1ULL,
                .file = swap_file,
        };
-       struct address_space *mapping = swap_file->f_mapping;
-       struct inode *inode = mapping->host;
-       loff_t pos = 0;
-       loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
-       loff_t ret;
+       int ret;
 
        /*
         * Persist all file mapping metadata so that we won't have any
@@ -160,15 +165,10 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
        if (ret)
                return ret;
 
-       while (len > 0) {
-               ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
-                               ops, &isi, iomap_swapfile_activate_actor);
-               if (ret <= 0)
-                       return ret;
-
-               pos += ret;
-               len -= ret;
-       }
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
+       if (ret < 0)
+               return ret;
 
        if (isi.iomap.length) {
                ret = iomap_swapfile_add_extent(&isi);
index fdc7ae3..65e3978 100644 (file)
@@ -1,9 +1,18 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2009-2019 Christoph Hellwig
+ * Copyright (c) 2009-2021 Christoph Hellwig
  *
- * NOTE: none of these tracepoints shall be consider a stable kernel ABI
+ * NOTE: none of these tracepoints shall be considered a stable kernel ABI
  * as they can change at any time.
+ *
+ * Current conventions for printing numbers measuring specific units:
+ *
+ * offset: byte offset into a subcomponent of a file operation
+ * pos: file offset, in bytes
+ * length: length of a file operation, in bytes
+ * ino: inode number
+ *
+ * Numbers describing space allocations should be formatted in hexadecimal.
  */
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM iomap
@@ -42,14 +51,14 @@ DEFINE_READPAGE_EVENT(iomap_readpage);
 DEFINE_READPAGE_EVENT(iomap_readahead);
 
 DECLARE_EVENT_CLASS(iomap_range_class,
-       TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),
+       TP_PROTO(struct inode *inode, loff_t off, u64 len),
        TP_ARGS(inode, off, len),
        TP_STRUCT__entry(
                __field(dev_t, dev)
                __field(u64, ino)
                __field(loff_t, size)
-               __field(unsigned long, offset)
-               __field(unsigned int, length)
+               __field(loff_t, offset)
+               __field(u64, length)
        ),
        TP_fast_assign(
                __entry->dev = inode->i_sb->s_dev;
@@ -58,8 +67,7 @@ DECLARE_EVENT_CLASS(iomap_range_class,
                __entry->offset = off;
                __entry->length = len;
        ),
-       TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset %lx "
-                 "length %x",
+       TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx length 0x%llx",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->size,
@@ -69,7 +77,7 @@ DECLARE_EVENT_CLASS(iomap_range_class,
 
 #define DEFINE_RANGE_EVENT(name)               \
 DEFINE_EVENT(iomap_range_class, name,  \
-       TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),\
+       TP_PROTO(struct inode *inode, loff_t off, u64 len),\
        TP_ARGS(inode, off, len))
 DEFINE_RANGE_EVENT(iomap_writepage);
 DEFINE_RANGE_EVENT(iomap_releasepage);
@@ -122,8 +130,8 @@ DECLARE_EVENT_CLASS(iomap_class,
                __entry->flags = iomap->flags;
                __entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
        ),
-       TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr %lld offset %lld "
-                 "length %llu type %s flags %s",
+       TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr 0x%llx offset 0x%llx "
+                 "length 0x%llx type %s flags %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  MAJOR(__entry->bdev), MINOR(__entry->bdev),
@@ -138,36 +146,32 @@ DECLARE_EVENT_CLASS(iomap_class,
 DEFINE_EVENT(iomap_class, name,        \
        TP_PROTO(struct inode *inode, struct iomap *iomap), \
        TP_ARGS(inode, iomap))
-DEFINE_IOMAP_EVENT(iomap_apply_dstmap);
-DEFINE_IOMAP_EVENT(iomap_apply_srcmap);
+DEFINE_IOMAP_EVENT(iomap_iter_dstmap);
+DEFINE_IOMAP_EVENT(iomap_iter_srcmap);
 
-TRACE_EVENT(iomap_apply,
-       TP_PROTO(struct inode *inode, loff_t pos, loff_t length,
-               unsigned int flags, const void *ops, void *actor,
-               unsigned long caller),
-       TP_ARGS(inode, pos, length, flags, ops, actor, caller),
+TRACE_EVENT(iomap_iter,
+       TP_PROTO(struct iomap_iter *iter, const void *ops,
+                unsigned long caller),
+       TP_ARGS(iter, ops, caller),
        TP_STRUCT__entry(
                __field(dev_t, dev)
                __field(u64, ino)
                __field(loff_t, pos)
-               __field(loff_t, length)
+               __field(u64, length)
                __field(unsigned int, flags)
                __field(const void *, ops)
-               __field(void *, actor)
                __field(unsigned long, caller)
        ),
        TP_fast_assign(
-               __entry->dev = inode->i_sb->s_dev;
-               __entry->ino = inode->i_ino;
-               __entry->pos = pos;
-               __entry->length = length;
-               __entry->flags = flags;
+               __entry->dev = iter->inode->i_sb->s_dev;
+               __entry->ino = iter->inode->i_ino;
+               __entry->pos = iter->pos;
+               __entry->length = iomap_length(iter);
+               __entry->flags = iter->flags;
                __entry->ops = ops;
-               __entry->actor = actor;
                __entry->caller = caller;
        ),
-       TP_printk("dev %d:%d ino 0x%llx pos %lld length %lld flags %s (0x%x) "
-                 "ops %ps caller %pS actor %ps",
+       TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx flags %s (0x%x) ops %ps caller %pS",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                   __entry->ino,
                   __entry->pos,
@@ -175,8 +179,7 @@ TRACE_EVENT(iomap_apply,
                   __print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS),
                   __entry->flags,
                   __entry->ops,
-                  (void *)__entry->caller,
-                  __entry->actor)
+                  (void *)__entry->caller)
 );
 
 #endif /* _IOMAP_TRACE_H */
index 21edc42..678e2c5 100644 (file)
@@ -155,7 +155,6 @@ struct iso9660_options{
        unsigned int overriderockperm:1;
        unsigned int uid_set:1;
        unsigned int gid_set:1;
-       unsigned int utf8:1;
        unsigned char map;
        unsigned char check;
        unsigned int blocksize;
@@ -356,7 +355,6 @@ static int parse_options(char *options, struct iso9660_options *popt)
        popt->gid = GLOBAL_ROOT_GID;
        popt->uid = GLOBAL_ROOT_UID;
        popt->iocharset = NULL;
-       popt->utf8 = 0;
        popt->overriderockperm = 0;
        popt->session=-1;
        popt->sbsector=-1;
@@ -389,10 +387,13 @@ static int parse_options(char *options, struct iso9660_options *popt)
                case Opt_cruft:
                        popt->cruft = 1;
                        break;
+#ifdef CONFIG_JOLIET
                case Opt_utf8:
-                       popt->utf8 = 1;
+                       kfree(popt->iocharset);
+                       popt->iocharset = kstrdup("utf8", GFP_KERNEL);
+                       if (!popt->iocharset)
+                               return 0;
                        break;
-#ifdef CONFIG_JOLIET
                case Opt_iocharset:
                        kfree(popt->iocharset);
                        popt->iocharset = match_strdup(&args[0]);
@@ -495,7 +496,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
        if (sbi->s_nocompress)          seq_puts(m, ",nocompress");
        if (sbi->s_overriderockperm)    seq_puts(m, ",overriderockperm");
        if (sbi->s_showassoc)           seq_puts(m, ",showassoc");
-       if (sbi->s_utf8)                seq_puts(m, ",utf8");
 
        if (sbi->s_check)               seq_printf(m, ",check=%c", sbi->s_check);
        if (sbi->s_mapping)             seq_printf(m, ",map=%c", sbi->s_mapping);
@@ -518,9 +518,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
                seq_printf(m, ",fmode=%o", sbi->s_fmode);
 
 #ifdef CONFIG_JOLIET
-       if (sbi->s_nls_iocharset &&
-           strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
+       if (sbi->s_nls_iocharset)
                seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
+       else
+               seq_puts(m, ",iocharset=utf8");
 #endif
        return 0;
 }
@@ -863,14 +864,13 @@ root_found:
        sbi->s_nls_iocharset = NULL;
 
 #ifdef CONFIG_JOLIET
-       if (joliet_level && opt.utf8 == 0) {
+       if (joliet_level) {
                char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
-               sbi->s_nls_iocharset = load_nls(p);
-               if (! sbi->s_nls_iocharset) {
-                       /* Fail only if explicit charset specified */
-                       if (opt.iocharset)
+               if (strcmp(p, "utf8") != 0) {
+                       sbi->s_nls_iocharset = opt.iocharset ?
+                               load_nls(opt.iocharset) : load_nls_default();
+                       if (!sbi->s_nls_iocharset)
                                goto out_freesbi;
-                       sbi->s_nls_iocharset = load_nls_default();
                }
        }
 #endif
@@ -886,7 +886,6 @@ root_found:
        sbi->s_gid = opt.gid;
        sbi->s_uid_set = opt.uid_set;
        sbi->s_gid_set = opt.gid_set;
-       sbi->s_utf8 = opt.utf8;
        sbi->s_nocompress = opt.nocompress;
        sbi->s_overriderockperm = opt.overriderockperm;
        /*
index 055ec6c..dcdc191 100644 (file)
@@ -44,7 +44,6 @@ struct isofs_sb_info {
        unsigned char s_session;
        unsigned int  s_high_sierra:1;
        unsigned int  s_rock:2;
-       unsigned int  s_utf8:1;
        unsigned int  s_cruft:1; /* Broken disks with high byte of length
                                  * containing junk */
        unsigned int  s_nocompress:1;
index be8b6a9..c0f04a1 100644 (file)
@@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
 int
 get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
 {
-       unsigned char utf8;
        struct nls_table *nls;
        unsigned char len = 0;
 
-       utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
        nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
 
-       if (utf8) {
+       if (!nls) {
                len = utf16s_to_utf8s((const wchar_t *) de->name,
                                de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
                                outname, PAGE_SIZE);
diff --git a/fs/ksmbd/Kconfig b/fs/ksmbd/Kconfig
new file mode 100644 (file)
index 0000000..b83cbd7
--- /dev/null
@@ -0,0 +1,68 @@
+config SMB_SERVER
+       tristate "SMB3 server support (EXPERIMENTAL)"
+       depends on INET
+       depends on MULTIUSER
+       depends on FILE_LOCKING
+       select NLS
+       select NLS_UTF8
+       select CRYPTO
+       select CRYPTO_MD4
+       select CRYPTO_MD5
+       select CRYPTO_HMAC
+       select CRYPTO_ECB
+       select CRYPTO_LIB_DES
+       select CRYPTO_SHA256
+       select CRYPTO_CMAC
+       select CRYPTO_SHA512
+       select CRYPTO_AEAD2
+       select CRYPTO_CCM
+       select CRYPTO_GCM
+       select ASN1
+       select OID_REGISTRY
+       default n
+       help
+         Choose Y here if you want to allow SMB3 compliant clients
+         to access files residing on this system using SMB3 protocol.
+         To compile the SMB3 server support as a module,
+         choose M here: the module will be called ksmbd.
+
+         You may choose to use a samba server instead, in which
+         case you can choose N here.
+
+         You also need to install user space programs which can be found
+         in ksmbd-tools, available from
+         https://github.com/cifsd-team/ksmbd-tools.
+         More detail about how to run the ksmbd kernel server is
+         available via README file
+         (https://github.com/cifsd-team/ksmbd-tools/blob/master/README).
+
+         ksmbd kernel server includes support for auto-negotiation,
+         Secure negotiate, Pre-authentication integrity, oplock/lease,
+         compound requests, multi-credit, packet signing, RDMA(smbdirect),
+         smb3 encryption, copy-offload, secure per-user session
+         establishment via NTLM or NTLMv2.
+
+config SMB_SERVER_SMBDIRECT
+       bool "Support for SMB Direct protocol"
+       depends on SMB_SERVER=m && INFINIBAND && INFINIBAND_ADDR_TRANS || SMB_SERVER=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
+       select SG_POOL
+       default n
+
+       help
+         Enables SMB Direct support for SMB 3.0, 3.02 and 3.1.1.
+
+         SMB Direct allows transferring SMB packets over RDMA. If unsure,
+         say N.
+
+config SMB_SERVER_CHECK_CAP_NET_ADMIN
+       bool "Enable check network administration capability"
+       depends on SMB_SERVER
+       default y
+
+       help
+         Prevent unprivileged processes to start the ksmbd kernel server.
+
+config SMB_SERVER_KERBEROS5
+       bool "Support for Kerberos 5"
+       depends on SMB_SERVER
+       default n
diff --git a/fs/ksmbd/Makefile b/fs/ksmbd/Makefile
new file mode 100644 (file)
index 0000000..7d6337a
--- /dev/null
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for Linux SMB3 kernel server
+#
+obj-$(CONFIG_SMB_SERVER) += ksmbd.o
+
+ksmbd-y :=     unicode.o auth.o vfs.o vfs_cache.o server.o ndr.o \
+               misc.o oplock.o connection.o ksmbd_work.o crypto_ctx.o \
+               mgmt/ksmbd_ida.o mgmt/user_config.o mgmt/share_config.o \
+               mgmt/tree_connect.o mgmt/user_session.o smb_common.o \
+               transport_tcp.o transport_ipc.o smbacl.o smb2pdu.o \
+               smb2ops.o smb2misc.o ksmbd_spnego_negtokeninit.asn1.o \
+               ksmbd_spnego_negtokentarg.asn1.o asn1.o
+
+$(obj)/asn1.o: $(obj)/ksmbd_spnego_negtokeninit.asn1.h $(obj)/ksmbd_spnego_negtokentarg.asn1.h
+
+$(obj)/ksmbd_spnego_negtokeninit.asn1.o: $(obj)/ksmbd_spnego_negtokeninit.asn1.c $(obj)/ksmbd_spnego_negtokeninit.asn1.h
+$(obj)/ksmbd_spnego_negtokentarg.asn1.o: $(obj)/ksmbd_spnego_negtokentarg.asn1.c $(obj)/ksmbd_spnego_negtokentarg.asn1.h
+
+ksmbd-$(CONFIG_SMB_SERVER_SMBDIRECT) += transport_rdma.o
diff --git a/fs/ksmbd/asn1.c b/fs/ksmbd/asn1.c
new file mode 100644 (file)
index 0000000..b014f46
--- /dev/null
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
+ * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
+ *
+ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/oid_registry.h>
+
+#include "glob.h"
+
+#include "asn1.h"
+#include "connection.h"
+#include "auth.h"
+#include "ksmbd_spnego_negtokeninit.asn1.h"
+#include "ksmbd_spnego_negtokentarg.asn1.h"
+
+#define SPNEGO_OID_LEN 7
+#define NTLMSSP_OID_LEN  10
+#define KRB5_OID_LEN  7
+#define KRB5U2U_OID_LEN  8
+#define MSKRB5_OID_LEN  7
+static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
+static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
+static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 };
+static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 };
+static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 };
+
+static char NTLMSSP_OID_STR[NTLMSSP_OID_LEN] = { 0x2b, 0x06, 0x01, 0x04, 0x01,
+       0x82, 0x37, 0x02, 0x02, 0x0a };
+
+static bool
+asn1_subid_decode(const unsigned char **begin, const unsigned char *end,
+                 unsigned long *subid)
+{
+       const unsigned char *ptr = *begin;
+       unsigned char ch;
+
+       *subid = 0;
+
+       do {
+               if (ptr >= end)
+                       return false;
+
+               ch = *ptr++;
+               *subid <<= 7;
+               *subid |= ch & 0x7F;
+       } while ((ch & 0x80) == 0x80);
+
+       *begin = ptr;
+       return true;
+}
+
+static bool asn1_oid_decode(const unsigned char *value, size_t vlen,
+                           unsigned long **oid, size_t *oidlen)
+{
+       const unsigned char *iptr = value, *end = value + vlen;
+       unsigned long *optr;
+       unsigned long subid;
+
+       vlen += 1;
+       if (vlen < 2 || vlen > UINT_MAX / sizeof(unsigned long))
+               goto fail_nullify;
+
+       *oid = kmalloc(vlen * sizeof(unsigned long), GFP_KERNEL);
+       if (!*oid)
+               return false;
+
+       optr = *oid;
+
+       if (!asn1_subid_decode(&iptr, end, &subid))
+               goto fail;
+
+       if (subid < 40) {
+               optr[0] = 0;
+               optr[1] = subid;
+       } else if (subid < 80) {
+               optr[0] = 1;
+               optr[1] = subid - 40;
+       } else {
+               optr[0] = 2;
+               optr[1] = subid - 80;
+       }
+
+       *oidlen = 2;
+       optr += 2;
+
+       while (iptr < end) {
+               if (++(*oidlen) > vlen)
+                       goto fail;
+
+               if (!asn1_subid_decode(&iptr, end, optr++))
+                       goto fail;
+       }
+       return true;
+
+fail:
+       kfree(*oid);
+fail_nullify:
+       *oid = NULL;
+       return false;
+}
+
+static bool oid_eq(unsigned long *oid1, unsigned int oid1len,
+                  unsigned long *oid2, unsigned int oid2len)
+{
+       if (oid1len != oid2len)
+               return false;
+
+       return memcmp(oid1, oid2, oid1len) == 0;
+}
+
+int
+ksmbd_decode_negTokenInit(unsigned char *security_blob, int length,
+                         struct ksmbd_conn *conn)
+{
+       return asn1_ber_decoder(&ksmbd_spnego_negtokeninit_decoder, conn,
+                               security_blob, length);
+}
+
+int
+ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length,
+                         struct ksmbd_conn *conn)
+{
+       return asn1_ber_decoder(&ksmbd_spnego_negtokentarg_decoder, conn,
+                               security_blob, length);
+}
+
+static int compute_asn_hdr_len_bytes(int len)
+{
+       if (len > 0xFFFFFF)
+               return 4;
+       else if (len > 0xFFFF)
+               return 3;
+       else if (len > 0xFF)
+               return 2;
+       else if (len > 0x7F)
+               return 1;
+       else
+               return 0;
+}
+
+static void encode_asn_tag(char *buf, unsigned int *ofs, char tag, char seq,
+                          int length)
+{
+       int i;
+       int index = *ofs;
+       char hdr_len = compute_asn_hdr_len_bytes(length);
+       int len = length + 2 + hdr_len;
+
+       /* insert tag */
+       buf[index++] = tag;
+
+       if (!hdr_len) {
+               buf[index++] = len;
+       } else {
+               buf[index++] = 0x80 | hdr_len;
+               for (i = hdr_len - 1; i >= 0; i--)
+                       buf[index++] = (len >> (i * 8)) & 0xFF;
+       }
+
+       /* insert seq */
+       len = len - (index - *ofs);
+       buf[index++] = seq;
+
+       if (!hdr_len) {
+               buf[index++] = len;
+       } else {
+               buf[index++] = 0x80 | hdr_len;
+               for (i = hdr_len - 1; i >= 0; i--)
+                       buf[index++] = (len >> (i * 8)) & 0xFF;
+       }
+
+       *ofs += (index - *ofs);
+}
+
+int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen,
+                                 char *ntlm_blob, int ntlm_blob_len)
+{
+       char *buf;
+       unsigned int ofs = 0;
+       int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1;
+       int oid_len = 4 + compute_asn_hdr_len_bytes(NTLMSSP_OID_LEN) * 2 +
+               NTLMSSP_OID_LEN;
+       int ntlmssp_len = 4 + compute_asn_hdr_len_bytes(ntlm_blob_len) * 2 +
+               ntlm_blob_len;
+       int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len +
+                       oid_len + ntlmssp_len) * 2 +
+                       neg_result_len + oid_len + ntlmssp_len;
+
+       buf = kmalloc(total_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* insert main gss header */
+       encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len + oid_len +
+                       ntlmssp_len);
+
+       /* insert neg result */
+       encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1);
+       buf[ofs++] = 1;
+
+       /* insert oid */
+       encode_asn_tag(buf, &ofs, 0xa1, 0x06, NTLMSSP_OID_LEN);
+       memcpy(buf + ofs, NTLMSSP_OID_STR, NTLMSSP_OID_LEN);
+       ofs += NTLMSSP_OID_LEN;
+
+       /* insert response token - ntlmssp blob */
+       encode_asn_tag(buf, &ofs, 0xa2, 0x04, ntlm_blob_len);
+       memcpy(buf + ofs, ntlm_blob, ntlm_blob_len);
+       ofs += ntlm_blob_len;
+
+       *pbuffer = buf;
+       *buflen = total_len;
+       return 0;
+}
+
+int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+                                  int neg_result)
+{
+       char *buf;
+       unsigned int ofs = 0;
+       int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1;
+       int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len) * 2 +
+               neg_result_len;
+
+       buf = kmalloc(total_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* insert main gss header */
+       encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len);
+
+       /* insert neg result */
+       encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1);
+       if (neg_result)
+               buf[ofs++] = 2;
+       else
+               buf[ofs++] = 0;
+
+       *pbuffer = buf;
+       *buflen = total_len;
+       return 0;
+}
+
+int ksmbd_gssapi_this_mech(void *context, size_t hdrlen, unsigned char tag,
+                          const void *value, size_t vlen)
+{
+       unsigned long *oid;
+       size_t oidlen;
+       int err = 0;
+
+       if (!asn1_oid_decode(value, vlen, &oid, &oidlen)) {
+               err = -EBADMSG;
+               goto out;
+       }
+
+       if (!oid_eq(oid, oidlen, SPNEGO_OID, SPNEGO_OID_LEN))
+               err = -EBADMSG;
+       kfree(oid);
+out:
+       if (err) {
+               char buf[50];
+
+               sprint_oid(value, vlen, buf, sizeof(buf));
+               ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
+       }
+       return err;
+}
+
+int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen,
+                                  unsigned char tag, const void *value,
+                                  size_t vlen)
+{
+       struct ksmbd_conn *conn = context;
+       unsigned long *oid;
+       size_t oidlen;
+       int mech_type;
+       char buf[50];
+
+       if (!asn1_oid_decode(value, vlen, &oid, &oidlen))
+               goto fail;
+
+       if (oid_eq(oid, oidlen, NTLMSSP_OID, NTLMSSP_OID_LEN))
+               mech_type = KSMBD_AUTH_NTLMSSP;
+       else if (oid_eq(oid, oidlen, MSKRB5_OID, MSKRB5_OID_LEN))
+               mech_type = KSMBD_AUTH_MSKRB5;
+       else if (oid_eq(oid, oidlen, KRB5_OID, KRB5_OID_LEN))
+               mech_type = KSMBD_AUTH_KRB5;
+       else if (oid_eq(oid, oidlen, KRB5U2U_OID, KRB5U2U_OID_LEN))
+               mech_type = KSMBD_AUTH_KRB5U2U;
+       else
+               goto fail;
+
+       conn->auth_mechs |= mech_type;
+       if (conn->preferred_auth_mech == 0)
+               conn->preferred_auth_mech = mech_type;
+
+       kfree(oid);
+       return 0;
+
+fail:
+       kfree(oid);
+       sprint_oid(value, vlen, buf, sizeof(buf));
+       ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
+       return -EBADMSG;
+}
+
+int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen,
+                                   unsigned char tag, const void *value,
+                                   size_t vlen)
+{
+       struct ksmbd_conn *conn = context;
+
+       conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
+       if (!conn->mechToken)
+               return -ENOMEM;
+
+       memcpy(conn->mechToken, value, vlen);
+       conn->mechToken[vlen] = '\0';
+       return 0;
+}
+
+int ksmbd_neg_token_targ_resp_token(void *context, size_t hdrlen,
+                                   unsigned char tag, const void *value,
+                                   size_t vlen)
+{
+       struct ksmbd_conn *conn = context;
+
+       conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
+       if (!conn->mechToken)
+               return -ENOMEM;
+
+       memcpy(conn->mechToken, value, vlen);
+       conn->mechToken[vlen] = '\0';
+       return 0;
+}
diff --git a/fs/ksmbd/asn1.h b/fs/ksmbd/asn1.h
new file mode 100644 (file)
index 0000000..ce105f4
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
+ * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
+ *
+ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __ASN1_H__
+#define __ASN1_H__
+
+int ksmbd_decode_negTokenInit(unsigned char *security_blob, int length,
+                             struct ksmbd_conn *conn);
+int ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length,
+                             struct ksmbd_conn *conn);
+int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen,
+                                 char *ntlm_blob, int ntlm_blob_len);
+int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+                                  int neg_result);
+#endif /* __ASN1_H__ */
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
new file mode 100644 (file)
index 0000000..de36f12
--- /dev/null
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/backing-dev.h>
+#include <linux/writeback.h>
+#include <linux/uio.h>
+#include <linux/xattr.h>
+#include <crypto/hash.h>
+#include <crypto/aead.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+
+#include "auth.h"
+#include "glob.h"
+
+#include <linux/fips.h>
+#include <crypto/des.h>
+
+#include "server.h"
+#include "smb_common.h"
+#include "connection.h"
+#include "mgmt/user_session.h"
+#include "mgmt/user_config.h"
+#include "crypto_ctx.h"
+#include "transport_ipc.h"
+
+/*
+ * Fixed format data defining GSS header and fixed string
+ * "not_defined_in_RFC4178@please_ignore".
+ * So sec blob data in neg phase could be generated statically.
+ */
+static char NEGOTIATE_GSS_HEADER[AUTH_GSS_LENGTH] = {
+#ifdef CONFIG_SMB_SERVER_KERBEROS5
+       0x60, 0x5e, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05,
+       0x05, 0x02, 0xa0, 0x54, 0x30, 0x52, 0xa0, 0x24,
+       0x30, 0x22, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86,
+       0xf7, 0x12, 0x01, 0x02, 0x02, 0x06, 0x09, 0x2a,
+       0x86, 0x48, 0x82, 0xf7, 0x12, 0x01, 0x02, 0x02,
+       0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82,
+       0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a, 0x30, 0x28,
+       0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f, 0x74, 0x5f,
+       0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f,
+       0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43, 0x34, 0x31,
+       0x37, 0x38, 0x40, 0x70, 0x6c, 0x65, 0x61, 0x73,
+       0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65
+#else
+       0x60, 0x48, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05,
+       0x05, 0x02, 0xa0, 0x3e, 0x30, 0x3c, 0xa0, 0x0e,
+       0x30, 0x0c, 0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04,
+       0x01, 0x82, 0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a,
+       0x30, 0x28, 0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f,
+       0x74, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65,
+       0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43,
+       0x34, 0x31, 0x37, 0x38, 0x40, 0x70, 0x6c, 0x65,
+       0x61, 0x73, 0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f,
+       0x72, 0x65
+#endif
+};
+
+void ksmbd_copy_gss_neg_header(void *buf)
+{
+       memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
+}
+
+static void
+str_to_key(unsigned char *str, unsigned char *key)
+{
+       int i;
+
+       key[0] = str[0] >> 1;
+       key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
+       key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
+       key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
+       key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
+       key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
+       key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
+       key[7] = str[6] & 0x7F;
+       for (i = 0; i < 8; i++)
+               key[i] = (key[i] << 1);
+}
+
+static int
+smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
+{
+       unsigned char key2[8];
+       struct des_ctx ctx;
+
+       if (fips_enabled) {
+               ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");
+               return -ENOENT;
+       }
+
+       str_to_key(key, key2);
+       des_expand_key(&ctx, key2, DES_KEY_SIZE);
+       des_encrypt(&ctx, out, in);
+       memzero_explicit(&ctx, sizeof(ctx));
+       return 0;
+}
+
+static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
+{
+       int rc;
+
+       rc = smbhash(p24, c8, p21);
+       if (rc)
+               return rc;
+       rc = smbhash(p24 + 8, c8, p21 + 7);
+       if (rc)
+               return rc;
+       return smbhash(p24 + 16, c8, p21 + 14);
+}
+
+/* produce a md4 message digest from data of length n bytes */
+static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,
+                        int link_len)
+{
+       int rc;
+       struct ksmbd_crypto_ctx *ctx;
+
+       ctx = ksmbd_crypto_ctx_find_md4();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "Crypto md4 allocation error\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_init(CRYPTO_MD4(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not init md4 shash\n");
+               goto out;
+       }
+
+       rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not update with link_str\n");
+               goto out;
+       }
+
+       rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);
+       if (rc)
+               ksmbd_debug(AUTH, "Could not generate md4 hash\n");
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,
+                                    char *server_challenge, int len)
+{
+       int rc;
+       struct ksmbd_crypto_ctx *ctx;
+
+       ctx = ksmbd_crypto_ctx_find_md5();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "Crypto md5 allocation error\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_init(CRYPTO_MD5(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not init md5 shash\n");
+               goto out;
+       }
+
+       rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not update with challenge\n");
+               goto out;
+       }
+
+       rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not update with nonce\n");
+               goto out;
+       }
+
+       rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);
+       if (rc)
+               ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+/**
+ * ksmbd_gen_sess_key() - function to generate session key
+ * @sess:      session of connection
+ * @hash:      source hash value to be used for find session key
+ * @hmac:      source hmac value to be used for finding session key
+ *
+ */
+static int ksmbd_gen_sess_key(struct ksmbd_session *sess, char *hash,
+                             char *hmac)
+{
+       struct ksmbd_crypto_ctx *ctx;
+       int rc;
+
+       ctx = ksmbd_crypto_ctx_find_hmacmd5();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+                                hash,
+                                CIFS_HMAC_MD5_HASH_SIZE);
+       if (rc) {
+               ksmbd_debug(AUTH, "hmacmd5 set key fail error %d\n", rc);
+               goto out;
+       }
+
+       rc = crypto_shash_init(CRYPTO_HMACMD5(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "could not init hmacmd5 error %d\n", rc);
+               goto out;
+       }
+
+       rc = crypto_shash_update(CRYPTO_HMACMD5(ctx),
+                                hmac,
+                                SMB2_NTLMV2_SESSKEY_SIZE);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not update with response error %d\n", rc);
+               goto out;
+       }
+
+       rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), sess->sess_key);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n", rc);
+               goto out;
+       }
+
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+static int calc_ntlmv2_hash(struct ksmbd_session *sess, char *ntlmv2_hash,
+                           char *dname)
+{
+       int ret, len, conv_len;
+       wchar_t *domain = NULL;
+       __le16 *uniname = NULL;
+       struct ksmbd_crypto_ctx *ctx;
+
+       ctx = ksmbd_crypto_ctx_find_hmacmd5();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "can't generate ntlmv2 hash\n");
+               return -ENOMEM;
+       }
+
+       ret = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+                                 user_passkey(sess->user),
+                                 CIFS_ENCPWD_SIZE);
+       if (ret) {
+               ksmbd_debug(AUTH, "Could not set NT Hash as a key\n");
+               goto out;
+       }
+
+       ret = crypto_shash_init(CRYPTO_HMACMD5(ctx));
+       if (ret) {
+               ksmbd_debug(AUTH, "could not init hmacmd5\n");
+               goto out;
+       }
+
+       /* convert user_name to unicode */
+       len = strlen(user_name(sess->user));
+       uniname = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
+       if (!uniname) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       conv_len = smb_strtoUTF16(uniname, user_name(sess->user), len,
+                                 sess->conn->local_nls);
+       if (conv_len < 0 || conv_len > len) {
+               ret = -EINVAL;
+               goto out;
+       }
+       UniStrupr(uniname);
+
+       ret = crypto_shash_update(CRYPTO_HMACMD5(ctx),
+                                 (char *)uniname,
+                                 UNICODE_LEN(conv_len));
+       if (ret) {
+               ksmbd_debug(AUTH, "Could not update with user\n");
+               goto out;
+       }
+
+       /* Convert domain name or conn name to unicode and uppercase */
+       len = strlen(dname);
+       domain = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
+       if (!domain) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       conv_len = smb_strtoUTF16((__le16 *)domain, dname, len,
+                                 sess->conn->local_nls);
+       if (conv_len < 0 || conv_len > len) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = crypto_shash_update(CRYPTO_HMACMD5(ctx),
+                                 (char *)domain,
+                                 UNICODE_LEN(conv_len));
+       if (ret) {
+               ksmbd_debug(AUTH, "Could not update with domain\n");
+               goto out;
+       }
+
+       ret = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_hash);
+       if (ret)
+               ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+out:
+       kfree(uniname);
+       kfree(domain);
+       ksmbd_release_crypto_ctx(ctx);
+       return ret;
+}
+
+/**
+ * ksmbd_auth_ntlm() - NTLM authentication handler
+ * @sess:      session of connection
+ * @pw_buf:    NTLM challenge response
+ * @passkey:   user password
+ *
+ * Return:     0 on success, error number on error
+ */
+int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf)
+{
+       int rc;
+       unsigned char p21[21];
+       char key[CIFS_AUTH_RESP_SIZE];
+
+       memset(p21, '\0', 21);
+       memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
+       rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key);
+       if (rc) {
+               pr_err("password processing failed\n");
+               return rc;
+       }
+
+       ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),
+                     CIFS_SMB1_SESSKEY_SIZE);
+       memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,
+              CIFS_AUTH_RESP_SIZE);
+       sess->sequence_number = 1;
+
+       if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {
+               ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");
+               return -EINVAL;
+       }
+
+       ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");
+       return 0;
+}
+
+/**
+ * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
+ * @sess:      session of connection
+ * @ntlmv2:            NTLMv2 challenge response
+ * @blen:              NTLMv2 blob length
+ * @domain_name:       domain name
+ *
+ * Return:     0 on success, error number on error
+ */
+int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
+                     int blen, char *domain_name)
+{
+       char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+       char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
+       struct ksmbd_crypto_ctx *ctx;
+       char *construct = NULL;
+       int rc, len;
+
+       ctx = ksmbd_crypto_ctx_find_hmacmd5();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+               return -ENOMEM;
+       }
+
+       rc = calc_ntlmv2_hash(sess, ntlmv2_hash, domain_name);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
+               goto out;
+       }
+
+       rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+                                ntlmv2_hash,
+                                CIFS_HMAC_MD5_HASH_SIZE);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not set NTLMV2 Hash as a key\n");
+               goto out;
+       }
+
+       rc = crypto_shash_init(CRYPTO_HMACMD5(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not init hmacmd5\n");
+               goto out;
+       }
+
+       len = CIFS_CRYPTO_KEY_SIZE + blen;
+       construct = kzalloc(len, GFP_KERNEL);
+       if (!construct) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       memcpy(construct, sess->ntlmssp.cryptkey, CIFS_CRYPTO_KEY_SIZE);
+       memcpy(construct + CIFS_CRYPTO_KEY_SIZE, &ntlmv2->blob_signature, blen);
+
+       rc = crypto_shash_update(CRYPTO_HMACMD5(ctx), construct, len);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not update with response\n");
+               goto out;
+       }
+
+       rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_rsp);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+               goto out;
+       }
+
+       rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not generate sess key\n");
+               goto out;
+       }
+
+       if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
+               rc = -EINVAL;
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       kfree(construct);
+       return rc;
+}
+
+/**
+ * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler
+ * @sess:      session of connection
+ * @client_nonce:      client nonce from LM response.
+ * @ntlm_resp:         ntlm response data from client.
+ *
+ * Return:     0 on success, error number on error
+ */
+static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce,
+                              char *ntlm_resp)
+{
+       char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};
+       int rc;
+       unsigned char p21[21];
+       char key[CIFS_AUTH_RESP_SIZE];
+
+       rc = ksmbd_enc_update_sess_key(sess_key,
+                                      client_nonce,
+                                      (char *)sess->ntlmssp.cryptkey, 8);
+       if (rc) {
+               pr_err("password processing failed\n");
+               goto out;
+       }
+
+       memset(p21, '\0', 21);
+       memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
+       rc = ksmbd_enc_p24(p21, sess_key, key);
+       if (rc) {
+               pr_err("password processing failed\n");
+               goto out;
+       }
+
+       if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)
+               rc = -EINVAL;
+out:
+       return rc;
+}
+
+/**
+ * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
+ * authenticate blob
+ * @authblob:  authenticate blob source pointer
+ * @usr:       user details
+ * @sess:      session of connection
+ *
+ * Return:     0 on success, error number on error
+ */
+int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+                                  int blob_len, struct ksmbd_session *sess)
+{
+       char *domain_name;
+       unsigned int lm_off, nt_off;
+       unsigned short nt_len;
+       int ret;
+
+       if (blob_len < sizeof(struct authenticate_message)) {
+               ksmbd_debug(AUTH, "negotiate blob len %d too small\n",
+                           blob_len);
+               return -EINVAL;
+       }
+
+       if (memcmp(authblob->Signature, "NTLMSSP", 8)) {
+               ksmbd_debug(AUTH, "blob signature incorrect %s\n",
+                           authblob->Signature);
+               return -EINVAL;
+       }
+
+       lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);
+       nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
+       nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
+
+       /* process NTLM authentication */
+       if (nt_len == CIFS_AUTH_RESP_SIZE) {
+               if (le32_to_cpu(authblob->NegotiateFlags) &
+                   NTLMSSP_NEGOTIATE_EXTENDED_SEC)
+                       return __ksmbd_auth_ntlmv2(sess, (char *)authblob +
+                               lm_off, (char *)authblob + nt_off);
+               else
+                       return ksmbd_auth_ntlm(sess, (char *)authblob +
+                               nt_off);
+       }
+
+       /* TODO : use domain name that imported from configuration file */
+       domain_name = smb_strndup_from_utf16((const char *)authblob +
+                       le32_to_cpu(authblob->DomainName.BufferOffset),
+                       le16_to_cpu(authblob->DomainName.Length), true,
+                       sess->conn->local_nls);
+       if (IS_ERR(domain_name))
+               return PTR_ERR(domain_name);
+
+       /* process NTLMv2 authentication */
+       ksmbd_debug(AUTH, "decode_ntlmssp_authenticate_blob dname%s\n",
+                   domain_name);
+       ret = ksmbd_auth_ntlmv2(sess, (struct ntlmv2_resp *)((char *)authblob + nt_off),
+                               nt_len - CIFS_ENCPWD_SIZE,
+                               domain_name);
+       kfree(domain_name);
+       return ret;
+}
+
+/**
+ * ksmbd_decode_ntlmssp_neg_blob() - helper function to construct
+ * negotiate blob
+ * @negblob: negotiate blob source pointer
+ * @rsp:     response header pointer to be updated
+ * @sess:    session of connection
+ *
+ */
+int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
+                                 int blob_len, struct ksmbd_session *sess)
+{
+       if (blob_len < sizeof(struct negotiate_message)) {
+               ksmbd_debug(AUTH, "negotiate blob len %d too small\n",
+                           blob_len);
+               return -EINVAL;
+       }
+
+       if (memcmp(negblob->Signature, "NTLMSSP", 8)) {
+               ksmbd_debug(AUTH, "blob signature incorrect %s\n",
+                           negblob->Signature);
+               return -EINVAL;
+       }
+
+       sess->ntlmssp.client_flags = le32_to_cpu(negblob->NegotiateFlags);
+       return 0;
+}
+
+/**
+ * ksmbd_build_ntlmssp_challenge_blob() - helper function to construct
+ * challenge blob
+ * @chgblob: challenge blob source pointer to initialize
+ * @rsp:     response header pointer to be updated
+ * @sess:    session of connection
+ *
+ */
+unsigned int
+ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
+                                  struct ksmbd_session *sess)
+{
+       struct target_info *tinfo;
+       wchar_t *name;
+       __u8 *target_name;
+       unsigned int flags, blob_off, blob_len, type, target_info_len = 0;
+       int len, uni_len, conv_len;
+       int cflags = sess->ntlmssp.client_flags;
+
+       memcpy(chgblob->Signature, NTLMSSP_SIGNATURE, 8);
+       chgblob->MessageType = NtLmChallenge;
+
+       flags = NTLMSSP_NEGOTIATE_UNICODE |
+               NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_TARGET_TYPE_SERVER |
+               NTLMSSP_NEGOTIATE_TARGET_INFO;
+
+       if (cflags & NTLMSSP_NEGOTIATE_SIGN) {
+               flags |= NTLMSSP_NEGOTIATE_SIGN;
+               flags |= cflags & (NTLMSSP_NEGOTIATE_128 |
+                                  NTLMSSP_NEGOTIATE_56);
+       }
+
+       if (cflags & NTLMSSP_NEGOTIATE_ALWAYS_SIGN)
+               flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
+
+       if (cflags & NTLMSSP_REQUEST_TARGET)
+               flags |= NTLMSSP_REQUEST_TARGET;
+
+       if (sess->conn->use_spnego &&
+           (cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
+               flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+
+       chgblob->NegotiateFlags = cpu_to_le32(flags);
+       len = strlen(ksmbd_netbios_name());
+       name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
+
+       conv_len = smb_strtoUTF16((__le16 *)name, ksmbd_netbios_name(), len,
+                                 sess->conn->local_nls);
+       if (conv_len < 0 || conv_len > len) {
+               kfree(name);
+               return -EINVAL;
+       }
+
+       uni_len = UNICODE_LEN(conv_len);
+
+       blob_off = sizeof(struct challenge_message);
+       blob_len = blob_off + uni_len;
+
+       chgblob->TargetName.Length = cpu_to_le16(uni_len);
+       chgblob->TargetName.MaximumLength = cpu_to_le16(uni_len);
+       chgblob->TargetName.BufferOffset = cpu_to_le32(blob_off);
+
+       /* Initialize random conn challenge */
+       get_random_bytes(sess->ntlmssp.cryptkey, sizeof(__u64));
+       memcpy(chgblob->Challenge, sess->ntlmssp.cryptkey,
+              CIFS_CRYPTO_KEY_SIZE);
+
+       /* Add Target Information to security buffer */
+       chgblob->TargetInfoArray.BufferOffset = cpu_to_le32(blob_len);
+
+       target_name = (__u8 *)chgblob + blob_off;
+       memcpy(target_name, name, uni_len);
+       tinfo = (struct target_info *)(target_name + uni_len);
+
+       chgblob->TargetInfoArray.Length = 0;
+       /* Add target info list for NetBIOS/DNS settings */
+       for (type = NTLMSSP_AV_NB_COMPUTER_NAME;
+            type <= NTLMSSP_AV_DNS_DOMAIN_NAME; type++) {
+               tinfo->Type = cpu_to_le16(type);
+               tinfo->Length = cpu_to_le16(uni_len);
+               memcpy(tinfo->Content, name, uni_len);
+               tinfo = (struct target_info *)((char *)tinfo + 4 + uni_len);
+               target_info_len += 4 + uni_len;
+       }
+
+       /* Add terminator subblock */
+       tinfo->Type = 0;
+       tinfo->Length = 0;
+       target_info_len += 4;
+
+       chgblob->TargetInfoArray.Length = cpu_to_le16(target_info_len);
+       chgblob->TargetInfoArray.MaximumLength = cpu_to_le16(target_info_len);
+       blob_len += target_info_len;
+       kfree(name);
+       ksmbd_debug(AUTH, "NTLMSSP SecurityBufferLength %d\n", blob_len);
+       return blob_len;
+}
+
+#ifdef CONFIG_SMB_SERVER_KERBEROS5
+int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
+                           int in_len, char *out_blob, int *out_len)
+{
+       struct ksmbd_spnego_authen_response *resp;
+       struct ksmbd_user *user = NULL;
+       int retval;
+
+       resp = ksmbd_ipc_spnego_authen_request(in_blob, in_len);
+       if (!resp) {
+               ksmbd_debug(AUTH, "SPNEGO_AUTHEN_REQUEST failure\n");
+               return -EINVAL;
+       }
+
+       if (!(resp->login_response.status & KSMBD_USER_FLAG_OK)) {
+               ksmbd_debug(AUTH, "krb5 authentication failure\n");
+               retval = -EPERM;
+               goto out;
+       }
+
+       if (*out_len <= resp->spnego_blob_len) {
+               ksmbd_debug(AUTH, "buf len %d, but blob len %d\n",
+                           *out_len, resp->spnego_blob_len);
+               retval = -EINVAL;
+               goto out;
+       }
+
+       if (resp->session_key_len > sizeof(sess->sess_key)) {
+               ksmbd_debug(AUTH, "session key is too long\n");
+               retval = -EINVAL;
+               goto out;
+       }
+
+       user = ksmbd_alloc_user(&resp->login_response);
+       if (!user) {
+               ksmbd_debug(AUTH, "login failure\n");
+               retval = -ENOMEM;
+               goto out;
+       }
+       sess->user = user;
+
+       memcpy(sess->sess_key, resp->payload, resp->session_key_len);
+       memcpy(out_blob, resp->payload + resp->session_key_len,
+              resp->spnego_blob_len);
+       *out_len = resp->spnego_blob_len;
+       retval = 0;
+out:
+       kvfree(resp);
+       return retval;
+}
+#else
+int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
+                           int in_len, char *out_blob, int *out_len)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * ksmbd_sign_smb2_pdu() - function to generate packet signing
+ * @conn:      connection
+ * @key:       signing key
+ * @iov:        buffer iov array
+ * @n_vec:     number of iovecs
+ * @sig:       signature value generated for client request packet
+ *
+ */
+int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+                       int n_vec, char *sig)
+{
+       struct ksmbd_crypto_ctx *ctx;
+       int rc, i;
+
+       ctx = ksmbd_crypto_ctx_find_hmacsha256();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx),
+                                key,
+                                SMB2_NTLMV2_SESSKEY_SIZE);
+       if (rc)
+               goto out;
+
+       rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc);
+               goto out;
+       }
+
+       for (i = 0; i < n_vec; i++) {
+               rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
+                                        iov[i].iov_base,
+                                        iov[i].iov_len);
+               if (rc) {
+                       ksmbd_debug(AUTH, "hmacsha256 update error %d\n", rc);
+                       goto out;
+               }
+       }
+
+       rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), sig);
+       if (rc)
+               ksmbd_debug(AUTH, "hmacsha256 generation error %d\n", rc);
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+/**
+ * ksmbd_sign_smb3_pdu() - function to generate packet signing
+ * @conn:      connection
+ * @key:       signing key
+ * @iov:        buffer iov array
+ * @n_vec:     number of iovecs
+ * @sig:       signature value generated for client request packet
+ *
+ */
+int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+                       int n_vec, char *sig)
+{
+       struct ksmbd_crypto_ctx *ctx;
+       int rc, i;
+
+       ctx = ksmbd_crypto_ctx_find_cmacaes();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "could not crypto alloc cmac\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_setkey(CRYPTO_CMACAES_TFM(ctx),
+                                key,
+                                SMB2_CMACAES_SIZE);
+       if (rc)
+               goto out;
+
+       rc = crypto_shash_init(CRYPTO_CMACAES(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "cmaces init error %d\n", rc);
+               goto out;
+       }
+
+       for (i = 0; i < n_vec; i++) {
+               rc = crypto_shash_update(CRYPTO_CMACAES(ctx),
+                                        iov[i].iov_base,
+                                        iov[i].iov_len);
+               if (rc) {
+                       ksmbd_debug(AUTH, "cmaces update error %d\n", rc);
+                       goto out;
+               }
+       }
+
+       rc = crypto_shash_final(CRYPTO_CMACAES(ctx), sig);
+       if (rc)
+               ksmbd_debug(AUTH, "cmaces generation error %d\n", rc);
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+struct derivation {
+       struct kvec label;
+       struct kvec context;
+       bool binding;
+};
+
+static int generate_key(struct ksmbd_session *sess, struct kvec label,
+                       struct kvec context, __u8 *key, unsigned int key_size)
+{
+       unsigned char zero = 0x0;
+       __u8 i[4] = {0, 0, 0, 1};
+       __u8 L128[4] = {0, 0, 0, 128};
+       __u8 L256[4] = {0, 0, 1, 0};
+       int rc;
+       unsigned char prfhash[SMB2_HMACSHA256_SIZE];
+       unsigned char *hashptr = prfhash;
+       struct ksmbd_crypto_ctx *ctx;
+
+       memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
+       memset(key, 0x0, key_size);
+
+       ctx = ksmbd_crypto_ctx_find_hmacsha256();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx),
+                                sess->sess_key,
+                                SMB2_NTLMV2_SESSKEY_SIZE);
+       if (rc)
+               goto smb3signkey_ret;
+
+       rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc);
+               goto smb3signkey_ret;
+       }
+
+       rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), i, 4);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with n\n");
+               goto smb3signkey_ret;
+       }
+
+       rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
+                                label.iov_base,
+                                label.iov_len);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with label\n");
+               goto smb3signkey_ret;
+       }
+
+       rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), &zero, 1);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with zero\n");
+               goto smb3signkey_ret;
+       }
+
+       rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
+                                context.iov_base,
+                                context.iov_len);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with context\n");
+               goto smb3signkey_ret;
+       }
+
+       if (sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+           sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+               rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
+       else
+               rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with L\n");
+               goto smb3signkey_ret;
+       }
+
+       rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), hashptr);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n",
+                           rc);
+               goto smb3signkey_ret;
+       }
+
+       memcpy(key, hashptr, key_size);
+
+smb3signkey_ret:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+static int generate_smb3signingkey(struct ksmbd_session *sess,
+                                  struct ksmbd_conn *conn,
+                                  const struct derivation *signing)
+{
+       int rc;
+       struct channel *chann;
+       char *key;
+
+       chann = lookup_chann_list(sess, conn);
+       if (!chann)
+               return 0;
+
+       if (sess->conn->dialect >= SMB30_PROT_ID && signing->binding)
+               key = chann->smb3signingkey;
+       else
+               key = sess->smb3signingkey;
+
+       rc = generate_key(sess, signing->label, signing->context, key,
+                         SMB3_SIGN_KEY_SIZE);
+       if (rc)
+               return rc;
+
+       if (!(sess->conn->dialect >= SMB30_PROT_ID && signing->binding))
+               memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
+
+       ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
+       ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
+       ksmbd_debug(AUTH, "Session Key   %*ph\n",
+                   SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+       ksmbd_debug(AUTH, "Signing Key   %*ph\n",
+                   SMB3_SIGN_KEY_SIZE, key);
+       return 0;
+}
+
+int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess,
+                              struct ksmbd_conn *conn)
+{
+       struct derivation d;
+
+       d.label.iov_base = "SMB2AESCMAC";
+       d.label.iov_len = 12;
+       d.context.iov_base = "SmbSign";
+       d.context.iov_len = 8;
+       d.binding = conn->binding;
+
+       return generate_smb3signingkey(sess, conn, &d);
+}
+
+int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess,
+                               struct ksmbd_conn *conn)
+{
+       struct derivation d;
+
+       d.label.iov_base = "SMBSigningKey";
+       d.label.iov_len = 14;
+       if (conn->binding) {
+               struct preauth_session *preauth_sess;
+
+               preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
+               if (!preauth_sess)
+                       return -ENOENT;
+               d.context.iov_base = preauth_sess->Preauth_HashValue;
+       } else {
+               d.context.iov_base = sess->Preauth_HashValue;
+       }
+       d.context.iov_len = 64;
+       d.binding = conn->binding;
+
+       return generate_smb3signingkey(sess, conn, &d);
+}
+
+struct derivation_twin {
+       struct derivation encryption;
+       struct derivation decryption;
+};
+
+static int generate_smb3encryptionkey(struct ksmbd_session *sess,
+                                     const struct derivation_twin *ptwin)
+{
+       int rc;
+
+       rc = generate_key(sess, ptwin->encryption.label,
+                         ptwin->encryption.context, sess->smb3encryptionkey,
+                         SMB3_ENC_DEC_KEY_SIZE);
+       if (rc)
+               return rc;
+
+       rc = generate_key(sess, ptwin->decryption.label,
+                         ptwin->decryption.context,
+                         sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE);
+       if (rc)
+               return rc;
+
+       ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
+       ksmbd_debug(AUTH, "Cipher type   %d\n", sess->conn->cipher_type);
+       ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
+       ksmbd_debug(AUTH, "Session Key   %*ph\n",
+                   SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+       if (sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+           sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
+               ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
+                           SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+               ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+                           SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+       } else {
+               ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
+                           SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+               ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+                           SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+       }
+       return 0;
+}
+
+int ksmbd_gen_smb30_encryptionkey(struct ksmbd_session *sess)
+{
+       struct derivation_twin twin;
+       struct derivation *d;
+
+       d = &twin.encryption;
+       d->label.iov_base = "SMB2AESCCM";
+       d->label.iov_len = 11;
+       d->context.iov_base = "ServerOut";
+       d->context.iov_len = 10;
+
+       d = &twin.decryption;
+       d->label.iov_base = "SMB2AESCCM";
+       d->label.iov_len = 11;
+       d->context.iov_base = "ServerIn ";
+       d->context.iov_len = 10;
+
+       return generate_smb3encryptionkey(sess, &twin);
+}
+
+int ksmbd_gen_smb311_encryptionkey(struct ksmbd_session *sess)
+{
+       struct derivation_twin twin;
+       struct derivation *d;
+
+       d = &twin.encryption;
+       d->label.iov_base = "SMBS2CCipherKey";
+       d->label.iov_len = 16;
+       d->context.iov_base = sess->Preauth_HashValue;
+       d->context.iov_len = 64;
+
+       d = &twin.decryption;
+       d->label.iov_base = "SMBC2SCipherKey";
+       d->label.iov_len = 16;
+       d->context.iov_base = sess->Preauth_HashValue;
+       d->context.iov_len = 64;
+
+       return generate_smb3encryptionkey(sess, &twin);
+}
+
+int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
+                                    __u8 *pi_hash)
+{
+       int rc;
+       struct smb2_hdr *rcv_hdr = (struct smb2_hdr *)buf;
+       char *all_bytes_msg = (char *)&rcv_hdr->ProtocolId;
+       int msg_size = be32_to_cpu(rcv_hdr->smb2_buf_length);
+       struct ksmbd_crypto_ctx *ctx = NULL;
+
+       if (conn->preauth_info->Preauth_HashId !=
+           SMB2_PREAUTH_INTEGRITY_SHA512)
+               return -EINVAL;
+
+       ctx = ksmbd_crypto_ctx_find_sha512();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "could not alloc sha512\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_init(CRYPTO_SHA512(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "could not init shashn");
+               goto out;
+       }
+
+       rc = crypto_shash_update(CRYPTO_SHA512(ctx), pi_hash, 64);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with n\n");
+               goto out;
+       }
+
+       rc = crypto_shash_update(CRYPTO_SHA512(ctx), all_bytes_msg, msg_size);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with n\n");
+               goto out;
+       }
+
+       rc = crypto_shash_final(CRYPTO_SHA512(ctx), pi_hash);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc);
+               goto out;
+       }
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
+                     __u8 *pi_hash)
+{
+       int rc;
+       struct ksmbd_crypto_ctx *ctx = NULL;
+
+       ctx = ksmbd_crypto_ctx_find_sha256();
+       if (!ctx) {
+               ksmbd_debug(AUTH, "could not alloc sha256\n");
+               return -ENOMEM;
+       }
+
+       rc = crypto_shash_init(CRYPTO_SHA256(ctx));
+       if (rc) {
+               ksmbd_debug(AUTH, "could not init shashn");
+               goto out;
+       }
+
+       rc = crypto_shash_update(CRYPTO_SHA256(ctx), sd_buf, len);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not update with n\n");
+               goto out;
+       }
+
+       rc = crypto_shash_final(CRYPTO_SHA256(ctx), pi_hash);
+       if (rc) {
+               ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc);
+               goto out;
+       }
+out:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
+
+static int ksmbd_get_encryption_key(struct ksmbd_conn *conn, __u64 ses_id,
+                                   int enc, u8 *key)
+{
+       struct ksmbd_session *sess;
+       u8 *ses_enc_key;
+
+       sess = ksmbd_session_lookup_all(conn, ses_id);
+       if (!sess)
+               return -EINVAL;
+
+       ses_enc_key = enc ? sess->smb3encryptionkey :
+               sess->smb3decryptionkey;
+       memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+
+       return 0;
+}
+
+static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
+                                  unsigned int buflen)
+{
+       void *addr;
+
+       if (is_vmalloc_addr(buf))
+               addr = vmalloc_to_page(buf);
+       else
+               addr = virt_to_page(buf);
+       sg_set_page(sg, addr, buflen, offset_in_page(buf));
+}
+
+static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
+                                        u8 *sign)
+{
+       struct scatterlist *sg;
+       unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24;
+       int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
+
+       if (!nvec)
+               return NULL;
+
+       for (i = 0; i < nvec - 1; i++) {
+               unsigned long kaddr = (unsigned long)iov[i + 1].iov_base;
+
+               if (is_vmalloc_addr(iov[i + 1].iov_base)) {
+                       nr_entries[i] = ((kaddr + iov[i + 1].iov_len +
+                                       PAGE_SIZE - 1) >> PAGE_SHIFT) -
+                               (kaddr >> PAGE_SHIFT);
+               } else {
+                       nr_entries[i]++;
+               }
+               total_entries += nr_entries[i];
+       }
+
+       /* Add two entries for transform header and signature */
+       total_entries += 2;
+
+       sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL);
+       if (!sg)
+               return NULL;
+
+       sg_init_table(sg, total_entries);
+       smb2_sg_set_buf(&sg[sg_idx++], iov[0].iov_base + 24, assoc_data_len);
+       for (i = 0; i < nvec - 1; i++) {
+               void *data = iov[i + 1].iov_base;
+               int len = iov[i + 1].iov_len;
+
+               if (is_vmalloc_addr(data)) {
+                       int j, offset = offset_in_page(data);
+
+                       for (j = 0; j < nr_entries[i]; j++) {
+                               unsigned int bytes = PAGE_SIZE - offset;
+
+                               if (!len)
+                                       break;
+
+                               if (bytes > len)
+                                       bytes = len;
+
+                               sg_set_page(&sg[sg_idx++],
+                                           vmalloc_to_page(data), bytes,
+                                           offset_in_page(data));
+
+                               data += bytes;
+                               len -= bytes;
+                               offset = 0;
+                       }
+               } else {
+                       sg_set_page(&sg[sg_idx++], virt_to_page(data), len,
+                                   offset_in_page(data));
+               }
+       }
+       smb2_sg_set_buf(&sg[sg_idx], sign, SMB2_SIGNATURE_SIZE);
+       return sg;
+}
+
+int ksmbd_crypt_message(struct ksmbd_conn *conn, struct kvec *iov,
+                       unsigned int nvec, int enc)
+{
+       struct smb2_transform_hdr *tr_hdr =
+               (struct smb2_transform_hdr *)iov[0].iov_base;
+       unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24;
+       int rc;
+       struct scatterlist *sg;
+       u8 sign[SMB2_SIGNATURE_SIZE] = {};
+       u8 key[SMB3_ENC_DEC_KEY_SIZE];
+       struct aead_request *req;
+       char *iv;
+       unsigned int iv_len;
+       struct crypto_aead *tfm;
+       unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+       struct ksmbd_crypto_ctx *ctx;
+
+       rc = ksmbd_get_encryption_key(conn,
+                                     le64_to_cpu(tr_hdr->SessionId),
+                                     enc,
+                                     key);
+       if (rc) {
+               pr_err("Could not get %scryption key\n", enc ? "en" : "de");
+               return rc;
+       }
+
+       if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+           conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+               ctx = ksmbd_crypto_ctx_find_gcm();
+       else
+               ctx = ksmbd_crypto_ctx_find_ccm();
+       if (!ctx) {
+               pr_err("crypto alloc failed\n");
+               return -ENOMEM;
+       }
+
+       if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+           conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+               tfm = CRYPTO_GCM(ctx);
+       else
+               tfm = CRYPTO_CCM(ctx);
+
+       if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+           conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+               rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
+       else
+               rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
+       if (rc) {
+               pr_err("Failed to set aead key %d\n", rc);
+               goto free_ctx;
+       }
+
+       rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
+       if (rc) {
+               pr_err("Failed to set authsize %d\n", rc);
+               goto free_ctx;
+       }
+
+       req = aead_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               rc = -ENOMEM;
+               goto free_ctx;
+       }
+
+       if (!enc) {
+               memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
+               crypt_len += SMB2_SIGNATURE_SIZE;
+       }
+
+       sg = ksmbd_init_sg(iov, nvec, sign);
+       if (!sg) {
+               pr_err("Failed to init sg\n");
+               rc = -ENOMEM;
+               goto free_req;
+       }
+
+       iv_len = crypto_aead_ivsize(tfm);
+       iv = kzalloc(iv_len, GFP_KERNEL);
+       if (!iv) {
+               rc = -ENOMEM;
+               goto free_sg;
+       }
+
+       if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+           conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
+               memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+       } else {
+               iv[0] = 3;
+               memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+       }
+
+       aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+       aead_request_set_ad(req, assoc_data_len);
+       aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
+       if (enc)
+               rc = crypto_aead_encrypt(req);
+       else
+               rc = crypto_aead_decrypt(req);
+       if (rc)
+               goto free_iv;
+
+       if (enc)
+               memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+
+free_iv:
+       kfree(iv);
+free_sg:
+       kfree(sg);
+free_req:
+       kfree(req);
+free_ctx:
+       ksmbd_release_crypto_ctx(ctx);
+       return rc;
+}
diff --git a/fs/ksmbd/auth.h b/fs/ksmbd/auth.h
new file mode 100644 (file)
index 0000000..9c2d4ba
--- /dev/null
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __AUTH_H__
+#define __AUTH_H__
+
+#include "ntlmssp.h"
+
+#ifdef CONFIG_SMB_SERVER_KERBEROS5
+#define AUTH_GSS_LENGTH                96
+#define AUTH_GSS_PADDING       0
+#else
+#define AUTH_GSS_LENGTH                74
+#define AUTH_GSS_PADDING       6
+#endif
+
+#define CIFS_HMAC_MD5_HASH_SIZE        (16)
+#define CIFS_NTHASH_SIZE       (16)
+
+/*
+ * Size of the ntlm client response
+ */
+#define CIFS_AUTH_RESP_SIZE            24
+#define CIFS_SMB1_SIGNATURE_SIZE       8
+#define CIFS_SMB1_SESSKEY_SIZE         16
+
+#define KSMBD_AUTH_NTLMSSP     0x0001
+#define KSMBD_AUTH_KRB5                0x0002
+#define KSMBD_AUTH_MSKRB5      0x0004
+#define KSMBD_AUTH_KRB5U2U     0x0008
+
+struct ksmbd_session;
+struct ksmbd_conn;
+struct kvec;
+
+int ksmbd_crypt_message(struct ksmbd_conn *conn, struct kvec *iov,
+                       unsigned int nvec, int enc);
+void ksmbd_copy_gss_neg_header(void *buf);
+int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf);
+int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
+                     int blen, char *domain_name);
+int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+                                  int blob_len, struct ksmbd_session *sess);
+int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
+                                 int blob_len, struct ksmbd_session *sess);
+unsigned int
+ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
+                                  struct ksmbd_session *sess);
+int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
+                           int in_len, char *out_blob, int *out_len);
+int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+                       int n_vec, char *sig);
+int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+                       int n_vec, char *sig);
+int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess,
+                              struct ksmbd_conn *conn);
+int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess,
+                               struct ksmbd_conn *conn);
+int ksmbd_gen_smb30_encryptionkey(struct ksmbd_session *sess);
+int ksmbd_gen_smb311_encryptionkey(struct ksmbd_session *sess);
+int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
+                                    __u8 *pi_hash);
+int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
+                     __u8 *pi_hash);
+#endif
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
new file mode 100644 (file)
index 0000000..af086d3
--- /dev/null
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/mutex.h>
+#include <linux/freezer.h>
+#include <linux/module.h>
+
+#include "server.h"
+#include "smb_common.h"
+#include "mgmt/ksmbd_ida.h"
+#include "connection.h"
+#include "transport_tcp.h"
+#include "transport_rdma.h"
+
+static DEFINE_MUTEX(init_lock);
+
+static struct ksmbd_conn_ops default_conn_ops;
+
+LIST_HEAD(conn_list);
+DEFINE_RWLOCK(conn_list_lock);
+
+/**
+ * ksmbd_conn_free() - free resources of the connection instance
+ *
+ * @conn:      connection instance to be cleand up
+ *
+ * During the thread termination, the corresponding conn instance
+ * resources(sock/memory) are released and finally the conn object is freed.
+ */
+void ksmbd_conn_free(struct ksmbd_conn *conn)
+{
+       write_lock(&conn_list_lock);
+       list_del(&conn->conns_list);
+       write_unlock(&conn_list_lock);
+
+       kvfree(conn->request_buf);
+       kfree(conn->preauth_info);
+       kfree(conn);
+}
+
+/**
+ * ksmbd_conn_alloc() - initialize a new connection instance
+ *
+ * Return:     ksmbd_conn struct on success, otherwise NULL
+ */
+struct ksmbd_conn *ksmbd_conn_alloc(void)
+{
+       struct ksmbd_conn *conn;
+
+       conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
+       if (!conn)
+               return NULL;
+
+       conn->need_neg = true;
+       conn->status = KSMBD_SESS_NEW;
+       conn->local_nls = load_nls("utf8");
+       if (!conn->local_nls)
+               conn->local_nls = load_nls_default();
+       atomic_set(&conn->req_running, 0);
+       atomic_set(&conn->r_count, 0);
+       init_waitqueue_head(&conn->req_running_q);
+       INIT_LIST_HEAD(&conn->conns_list);
+       INIT_LIST_HEAD(&conn->sessions);
+       INIT_LIST_HEAD(&conn->requests);
+       INIT_LIST_HEAD(&conn->async_requests);
+       spin_lock_init(&conn->request_lock);
+       spin_lock_init(&conn->credits_lock);
+       ida_init(&conn->async_ida);
+
+       spin_lock_init(&conn->llist_lock);
+       INIT_LIST_HEAD(&conn->lock_list);
+
+       write_lock(&conn_list_lock);
+       list_add(&conn->conns_list, &conn_list);
+       write_unlock(&conn_list_lock);
+       return conn;
+}
+
+bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+{
+       struct ksmbd_conn *t;
+       bool ret = false;
+
+       read_lock(&conn_list_lock);
+       list_for_each_entry(t, &conn_list, conns_list) {
+               if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+                       continue;
+
+               ret = true;
+               break;
+       }
+       read_unlock(&conn_list_lock);
+       return ret;
+}
+
+void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct list_head *requests_queue = NULL;
+
+       if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
+               requests_queue = &conn->requests;
+               work->syncronous = true;
+       }
+
+       if (requests_queue) {
+               atomic_inc(&conn->req_running);
+               spin_lock(&conn->request_lock);
+               list_add_tail(&work->request_entry, requests_queue);
+               spin_unlock(&conn->request_lock);
+       }
+}
+
+int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       int ret = 1;
+
+       if (list_empty(&work->request_entry) &&
+           list_empty(&work->async_request_entry))
+               return 0;
+
+       if (!work->multiRsp)
+               atomic_dec(&conn->req_running);
+       spin_lock(&conn->request_lock);
+       if (!work->multiRsp) {
+               list_del_init(&work->request_entry);
+               if (work->syncronous == false)
+                       list_del_init(&work->async_request_entry);
+               ret = 0;
+       }
+       spin_unlock(&conn->request_lock);
+
+       wake_up_all(&conn->req_running_q);
+       return ret;
+}
+
+static void ksmbd_conn_lock(struct ksmbd_conn *conn)
+{
+       mutex_lock(&conn->srv_mutex);
+}
+
+static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+{
+       mutex_unlock(&conn->srv_mutex);
+}
+
+void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
+{
+       wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
+}
+
+int ksmbd_conn_write(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb_hdr *rsp_hdr = work->response_buf;
+       size_t len = 0;
+       int sent;
+       struct kvec iov[3];
+       int iov_idx = 0;
+
+       ksmbd_conn_try_dequeue_request(work);
+       if (!rsp_hdr) {
+               pr_err("NULL response header\n");
+               return -EINVAL;
+       }
+
+       if (work->tr_buf) {
+               iov[iov_idx] = (struct kvec) { work->tr_buf,
+                               sizeof(struct smb2_transform_hdr) };
+               len += iov[iov_idx++].iov_len;
+       }
+
+       if (work->aux_payload_sz) {
+               iov[iov_idx] = (struct kvec) { rsp_hdr, work->resp_hdr_sz };
+               len += iov[iov_idx++].iov_len;
+               iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
+               len += iov[iov_idx++].iov_len;
+       } else {
+               if (work->tr_buf)
+                       iov[iov_idx].iov_len = work->resp_hdr_sz;
+               else
+                       iov[iov_idx].iov_len = get_rfc1002_len(rsp_hdr) + 4;
+               iov[iov_idx].iov_base = rsp_hdr;
+               len += iov[iov_idx++].iov_len;
+       }
+
+       ksmbd_conn_lock(conn);
+       sent = conn->transport->ops->writev(conn->transport, &iov[0],
+                                       iov_idx, len,
+                                       work->need_invalidate_rkey,
+                                       work->remote_key);
+       ksmbd_conn_unlock(conn);
+
+       if (sent < 0) {
+               pr_err("Failed to send message: %d\n", sent);
+               return sent;
+       }
+
+       return 0;
+}
+
+int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
+                        unsigned int buflen, u32 remote_key, u64 remote_offset,
+                        u32 remote_len)
+{
+       int ret = -EINVAL;
+
+       if (conn->transport->ops->rdma_read)
+               ret = conn->transport->ops->rdma_read(conn->transport,
+                                                     buf, buflen,
+                                                     remote_key, remote_offset,
+                                                     remote_len);
+       return ret;
+}
+
+int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
+                         unsigned int buflen, u32 remote_key,
+                         u64 remote_offset, u32 remote_len)
+{
+       int ret = -EINVAL;
+
+       if (conn->transport->ops->rdma_write)
+               ret = conn->transport->ops->rdma_write(conn->transport,
+                                                      buf, buflen,
+                                                      remote_key, remote_offset,
+                                                      remote_len);
+       return ret;
+}
+
+bool ksmbd_conn_alive(struct ksmbd_conn *conn)
+{
+       if (!ksmbd_server_running())
+               return false;
+
+       if (conn->status == KSMBD_SESS_EXITING)
+               return false;
+
+       if (kthread_should_stop())
+               return false;
+
+       if (atomic_read(&conn->stats.open_files_count) > 0)
+               return true;
+
+       /*
+        * Stop current session if the time that get last request from client
+        * is bigger than deadtime user configured and opening file count is
+        * zero.
+        */
+       if (server_conf.deadtime > 0 &&
+           time_after(jiffies, conn->last_active + server_conf.deadtime)) {
+               ksmbd_debug(CONN, "No response from client in %lu minutes\n",
+                           server_conf.deadtime / SMB_ECHO_INTERVAL);
+               return false;
+       }
+       return true;
+}
+
+/**
+ * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
+ * @p:         connection instance
+ *
+ * One thread each per connection
+ *
+ * Return:     0 on success
+ */
+int ksmbd_conn_handler_loop(void *p)
+{
+       struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+       struct ksmbd_transport *t = conn->transport;
+       unsigned int pdu_size;
+       char hdr_buf[4] = {0,};
+       int size;
+
+       mutex_init(&conn->srv_mutex);
+       __module_get(THIS_MODULE);
+
+       if (t->ops->prepare && t->ops->prepare(t))
+               goto out;
+
+       conn->last_active = jiffies;
+       while (ksmbd_conn_alive(conn)) {
+               if (try_to_freeze())
+                       continue;
+
+               kvfree(conn->request_buf);
+               conn->request_buf = NULL;
+
+               size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
+               if (size != sizeof(hdr_buf))
+                       break;
+
+               pdu_size = get_rfc1002_len(hdr_buf);
+               ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+
+               /* make sure we have enough to get to SMB header end */
+               if (!ksmbd_pdu_size_has_room(pdu_size)) {
+                       ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
+                                   pdu_size);
+                       continue;
+               }
+
+               /* 4 for rfc1002 length field */
+               size = pdu_size + 4;
+               conn->request_buf = kvmalloc(size, GFP_KERNEL);
+               if (!conn->request_buf)
+                       continue;
+
+               memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
+               if (!ksmbd_smb_request(conn))
+                       break;
+
+               /*
+                * We already read 4 bytes to find out PDU size, now
+                * read in PDU
+                */
+               size = t->ops->read(t, conn->request_buf + 4, pdu_size);
+               if (size < 0) {
+                       pr_err("sock_read failed: %d\n", size);
+                       break;
+               }
+
+               if (size != pdu_size) {
+                       pr_err("PDU error. Read: %d, Expected: %d\n",
+                              size, pdu_size);
+                       continue;
+               }
+
+               if (!default_conn_ops.process_fn) {
+                       pr_err("No connection request callback\n");
+                       break;
+               }
+
+               if (default_conn_ops.process_fn(conn)) {
+                       pr_err("Cannot handle request\n");
+                       break;
+               }
+       }
+
+out:
+       /* Wait till all reference dropped to the Server object*/
+       while (atomic_read(&conn->r_count) > 0)
+               schedule_timeout(HZ);
+
+       unload_nls(conn->local_nls);
+       if (default_conn_ops.terminate_fn)
+               default_conn_ops.terminate_fn(conn);
+       t->ops->disconnect(t);
+       module_put(THIS_MODULE);
+       return 0;
+}
+
+void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
+{
+       default_conn_ops.process_fn = ops->process_fn;
+       default_conn_ops.terminate_fn = ops->terminate_fn;
+}
+
+int ksmbd_conn_transport_init(void)
+{
+       int ret;
+
+       mutex_lock(&init_lock);
+       ret = ksmbd_tcp_init();
+       if (ret) {
+               pr_err("Failed to init TCP subsystem: %d\n", ret);
+               goto out;
+       }
+
+       ret = ksmbd_rdma_init();
+       if (ret) {
+               pr_err("Failed to init RDMA subsystem: %d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&init_lock);
+       return ret;
+}
+
+static void stop_sessions(void)
+{
+       struct ksmbd_conn *conn;
+
+again:
+       read_lock(&conn_list_lock);
+       list_for_each_entry(conn, &conn_list, conns_list) {
+               struct task_struct *task;
+
+               task = conn->transport->handler;
+               if (task)
+                       ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+                                   task->comm, task_pid_nr(task));
+               conn->status = KSMBD_SESS_EXITING;
+       }
+       read_unlock(&conn_list_lock);
+
+       if (!list_empty(&conn_list)) {
+               schedule_timeout_interruptible(HZ / 10); /* 100ms */
+               goto again;
+       }
+}
+
+void ksmbd_conn_transport_destroy(void)
+{
+       mutex_lock(&init_lock);
+       ksmbd_tcp_destroy();
+       ksmbd_rdma_destroy();
+       stop_sessions();
+       mutex_unlock(&init_lock);
+}
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
new file mode 100644 (file)
index 0000000..e5403c5
--- /dev/null
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_CONNECTION_H__
+#define __KSMBD_CONNECTION_H__
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/inet_connection_sock.h>
+#include <net/request_sock.h>
+#include <linux/kthread.h>
+#include <linux/nls.h>
+
+#include "smb_common.h"
+#include "ksmbd_work.h"
+
+#define KSMBD_SOCKET_BACKLOG           16
+
+/*
+ * WARNING
+ *
+ * This is nothing but a HACK. Session status should move to channel
+ * or to session. As of now we have 1 tcp_conn : 1 ksmbd_session, but
+ * we need to change it to 1 tcp_conn : N ksmbd_sessions.
+ */
+enum {
+       KSMBD_SESS_NEW = 0,
+       KSMBD_SESS_GOOD,
+       KSMBD_SESS_EXITING,
+       KSMBD_SESS_NEED_RECONNECT,
+       KSMBD_SESS_NEED_NEGOTIATE
+};
+
+struct ksmbd_stats {
+       atomic_t                        open_files_count;
+       atomic64_t                      request_served;
+};
+
+struct ksmbd_transport;
+
+struct ksmbd_conn {
+       struct smb_version_values       *vals;
+       struct smb_version_ops          *ops;
+       struct smb_version_cmds         *cmds;
+       unsigned int                    max_cmds;
+       struct mutex                    srv_mutex;
+       int                             status;
+       unsigned int                    cli_cap;
+       char                            *request_buf;
+       struct ksmbd_transport          *transport;
+       struct nls_table                *local_nls;
+       struct list_head                conns_list;
+       /* smb session 1 per user */
+       struct list_head                sessions;
+       unsigned long                   last_active;
+       /* How many request are running currently */
+       atomic_t                        req_running;
+       /* References which are made for this Server object*/
+       atomic_t                        r_count;
+       unsigned short                  total_credits;
+       unsigned short                  max_credits;
+       spinlock_t                      credits_lock;
+       wait_queue_head_t               req_running_q;
+       /* Lock to protect requests list*/
+       spinlock_t                      request_lock;
+       struct list_head                requests;
+       struct list_head                async_requests;
+       int                             connection_type;
+       struct ksmbd_stats              stats;
+       char                            ClientGUID[SMB2_CLIENT_GUID_SIZE];
+       union {
+               /* pending trans request table */
+               struct trans_state      *recent_trans;
+               /* Used by ntlmssp */
+               char                    *ntlmssp_cryptkey;
+       };
+
+       spinlock_t                      llist_lock;
+       struct list_head                lock_list;
+
+       struct preauth_integrity_info   *preauth_info;
+
+       bool                            need_neg;
+       unsigned int                    auth_mechs;
+       unsigned int                    preferred_auth_mech;
+       bool                            sign;
+       bool                            use_spnego:1;
+       __u16                           cli_sec_mode;
+       __u16                           srv_sec_mode;
+       /* dialect index that server chose */
+       __u16                           dialect;
+
+       char                            *mechToken;
+
+       struct ksmbd_conn_ops   *conn_ops;
+
+       /* Preauth Session Table */
+       struct list_head                preauth_sess_table;
+
+       struct sockaddr_storage         peer_addr;
+
+       /* Identifier for async message */
+       struct ida                      async_ida;
+
+       __le16                          cipher_type;
+       __le16                          compress_algorithm;
+       bool                            posix_ext_supported;
+       bool                            signing_negotiated;
+       __le16                          signing_algorithm;
+       bool                            binding;
+};
+
+struct ksmbd_conn_ops {
+       int     (*process_fn)(struct ksmbd_conn *conn);
+       int     (*terminate_fn)(struct ksmbd_conn *conn);
+};
+
+struct ksmbd_transport_ops {
+       int (*prepare)(struct ksmbd_transport *t);
+       void (*disconnect)(struct ksmbd_transport *t);
+       int (*read)(struct ksmbd_transport *t, char *buf, unsigned int size);
+       int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
+                     int size, bool need_invalidate_rkey,
+                     unsigned int remote_key);
+       int (*rdma_read)(struct ksmbd_transport *t, void *buf, unsigned int len,
+                        u32 remote_key, u64 remote_offset, u32 remote_len);
+       int (*rdma_write)(struct ksmbd_transport *t, void *buf,
+                         unsigned int len, u32 remote_key, u64 remote_offset,
+                         u32 remote_len);
+};
+
+struct ksmbd_transport {
+       struct ksmbd_conn               *conn;
+       struct ksmbd_transport_ops      *ops;
+       struct task_struct              *handler;
+};
+
+#define KSMBD_TCP_RECV_TIMEOUT (7 * HZ)
+#define KSMBD_TCP_SEND_TIMEOUT (5 * HZ)
+#define KSMBD_TCP_PEER_SOCKADDR(c)     ((struct sockaddr *)&((c)->peer_addr))
+
+extern struct list_head conn_list;
+extern rwlock_t conn_list_lock;
+
+bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
+struct ksmbd_conn *ksmbd_conn_alloc(void);
+void ksmbd_conn_free(struct ksmbd_conn *conn);
+bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+int ksmbd_conn_write(struct ksmbd_work *work);
+int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
+                        unsigned int buflen, u32 remote_key, u64 remote_offset,
+                        u32 remote_len);
+int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
+                         unsigned int buflen, u32 remote_key, u64 remote_offset,
+                         u32 remote_len);
+void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
+int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
+void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
+int ksmbd_conn_handler_loop(void *p);
+int ksmbd_conn_transport_init(void);
+void ksmbd_conn_transport_destroy(void);
+
+/*
+ * WARNING
+ *
+ * This is a hack. We will move status to a proper place once we land
+ * a multi-sessions support.
+ */
+static inline bool ksmbd_conn_good(struct ksmbd_work *work)
+{
+       return work->conn->status == KSMBD_SESS_GOOD;
+}
+
+static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work)
+{
+       return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE;
+}
+
+static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work)
+{
+       return work->conn->status == KSMBD_SESS_NEED_RECONNECT;
+}
+
+static inline bool ksmbd_conn_exiting(struct ksmbd_work *work)
+{
+       return work->conn->status == KSMBD_SESS_EXITING;
+}
+
+static inline void ksmbd_conn_set_good(struct ksmbd_work *work)
+{
+       work->conn->status = KSMBD_SESS_GOOD;
+}
+
+static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_work *work)
+{
+       work->conn->status = KSMBD_SESS_NEED_NEGOTIATE;
+}
+
+static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_work *work)
+{
+       work->conn->status = KSMBD_SESS_NEED_RECONNECT;
+}
+
+static inline void ksmbd_conn_set_exiting(struct ksmbd_work *work)
+{
+       work->conn->status = KSMBD_SESS_EXITING;
+}
+#endif /* __CONNECTION_H__ */
diff --git a/fs/ksmbd/crypto_ctx.c b/fs/ksmbd/crypto_ctx.c
new file mode 100644 (file)
index 0000000..5f4b100
--- /dev/null
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "glob.h"
+#include "crypto_ctx.h"
+
+struct crypto_ctx_list {
+       spinlock_t              ctx_lock;
+       int                     avail_ctx;
+       struct list_head        idle_ctx;
+       wait_queue_head_t       ctx_wait;
+};
+
+static struct crypto_ctx_list ctx_list;
+
+static inline void free_aead(struct crypto_aead *aead)
+{
+       if (aead)
+               crypto_free_aead(aead);
+}
+
+static void free_shash(struct shash_desc *shash)
+{
+       if (shash) {
+               crypto_free_shash(shash->tfm);
+               kfree(shash);
+       }
+}
+
+static struct crypto_aead *alloc_aead(int id)
+{
+       struct crypto_aead *tfm = NULL;
+
+       switch (id) {
+       case CRYPTO_AEAD_AES_GCM:
+               tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+               break;
+       case CRYPTO_AEAD_AES_CCM:
+               tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
+               break;
+       default:
+               pr_err("Does not support encrypt ahead(id : %d)\n", id);
+               return NULL;
+       }
+
+       if (IS_ERR(tfm)) {
+               pr_err("Failed to alloc encrypt aead : %ld\n", PTR_ERR(tfm));
+               return NULL;
+       }
+
+       return tfm;
+}
+
+static struct shash_desc *alloc_shash_desc(int id)
+{
+       struct crypto_shash *tfm = NULL;
+       struct shash_desc *shash;
+
+       switch (id) {
+       case CRYPTO_SHASH_HMACMD5:
+               tfm = crypto_alloc_shash("hmac(md5)", 0, 0);
+               break;
+       case CRYPTO_SHASH_HMACSHA256:
+               tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+               break;
+       case CRYPTO_SHASH_CMACAES:
+               tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
+               break;
+       case CRYPTO_SHASH_SHA256:
+               tfm = crypto_alloc_shash("sha256", 0, 0);
+               break;
+       case CRYPTO_SHASH_SHA512:
+               tfm = crypto_alloc_shash("sha512", 0, 0);
+               break;
+       case CRYPTO_SHASH_MD4:
+               tfm = crypto_alloc_shash("md4", 0, 0);
+               break;
+       case CRYPTO_SHASH_MD5:
+               tfm = crypto_alloc_shash("md5", 0, 0);
+               break;
+       default:
+               return NULL;
+       }
+
+       if (IS_ERR(tfm))
+               return NULL;
+
+       shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+                       GFP_KERNEL);
+       if (!shash)
+               crypto_free_shash(tfm);
+       else
+               shash->tfm = tfm;
+       return shash;
+}
+
+static void ctx_free(struct ksmbd_crypto_ctx *ctx)
+{
+       int i;
+
+       for (i = 0; i < CRYPTO_SHASH_MAX; i++)
+               free_shash(ctx->desc[i]);
+       for (i = 0; i < CRYPTO_AEAD_MAX; i++)
+               free_aead(ctx->ccmaes[i]);
+       kfree(ctx);
+}
+
+static struct ksmbd_crypto_ctx *ksmbd_find_crypto_ctx(void)
+{
+       struct ksmbd_crypto_ctx *ctx;
+
+       while (1) {
+               spin_lock(&ctx_list.ctx_lock);
+               if (!list_empty(&ctx_list.idle_ctx)) {
+                       ctx = list_entry(ctx_list.idle_ctx.next,
+                                        struct ksmbd_crypto_ctx,
+                                        list);
+                       list_del(&ctx->list);
+                       spin_unlock(&ctx_list.ctx_lock);
+                       return ctx;
+               }
+
+               if (ctx_list.avail_ctx > num_online_cpus()) {
+                       spin_unlock(&ctx_list.ctx_lock);
+                       wait_event(ctx_list.ctx_wait,
+                                  !list_empty(&ctx_list.idle_ctx));
+                       continue;
+               }
+
+               ctx_list.avail_ctx++;
+               spin_unlock(&ctx_list.ctx_lock);
+
+               ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
+               if (!ctx) {
+                       spin_lock(&ctx_list.ctx_lock);
+                       ctx_list.avail_ctx--;
+                       spin_unlock(&ctx_list.ctx_lock);
+                       wait_event(ctx_list.ctx_wait,
+                                  !list_empty(&ctx_list.idle_ctx));
+                       continue;
+               }
+               break;
+       }
+       return ctx;
+}
+
+void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx)
+{
+       if (!ctx)
+               return;
+
+       spin_lock(&ctx_list.ctx_lock);
+       if (ctx_list.avail_ctx <= num_online_cpus()) {
+               list_add(&ctx->list, &ctx_list.idle_ctx);
+               spin_unlock(&ctx_list.ctx_lock);
+               wake_up(&ctx_list.ctx_wait);
+               return;
+       }
+
+       ctx_list.avail_ctx--;
+       spin_unlock(&ctx_list.ctx_lock);
+       ctx_free(ctx);
+}
+
+static struct ksmbd_crypto_ctx *____crypto_shash_ctx_find(int id)
+{
+       struct ksmbd_crypto_ctx *ctx;
+
+       if (id >= CRYPTO_SHASH_MAX)
+               return NULL;
+
+       ctx = ksmbd_find_crypto_ctx();
+       if (ctx->desc[id])
+               return ctx;
+
+       ctx->desc[id] = alloc_shash_desc(id);
+       if (ctx->desc[id])
+               return ctx;
+       ksmbd_release_crypto_ctx(ctx);
+       return NULL;
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void)
+{
+       return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACMD5);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void)
+{
+       return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACSHA256);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void)
+{
+       return ____crypto_shash_ctx_find(CRYPTO_SHASH_CMACAES);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void)
+{
+       return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA256);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
+{
+       return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void)
+{
+       return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void)
+{
+       return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5);
+}
+
+static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
+{
+       struct ksmbd_crypto_ctx *ctx;
+
+       if (id >= CRYPTO_AEAD_MAX)
+               return NULL;
+
+       ctx = ksmbd_find_crypto_ctx();
+       if (ctx->ccmaes[id])
+               return ctx;
+
+       ctx->ccmaes[id] = alloc_aead(id);
+       if (ctx->ccmaes[id])
+               return ctx;
+       ksmbd_release_crypto_ctx(ctx);
+       return NULL;
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void)
+{
+       return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_GCM);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void)
+{
+       return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_CCM);
+}
+
+void ksmbd_crypto_destroy(void)
+{
+       struct ksmbd_crypto_ctx *ctx;
+
+       while (!list_empty(&ctx_list.idle_ctx)) {
+               ctx = list_entry(ctx_list.idle_ctx.next,
+                                struct ksmbd_crypto_ctx,
+                                list);
+               list_del(&ctx->list);
+               ctx_free(ctx);
+       }
+}
+
+int ksmbd_crypto_create(void)
+{
+       struct ksmbd_crypto_ctx *ctx;
+
+       spin_lock_init(&ctx_list.ctx_lock);
+       INIT_LIST_HEAD(&ctx_list.idle_ctx);
+       init_waitqueue_head(&ctx_list.ctx_wait);
+       ctx_list.avail_ctx = 1;
+
+       ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       list_add(&ctx->list, &ctx_list.idle_ctx);
+       return 0;
+}
diff --git a/fs/ksmbd/crypto_ctx.h b/fs/ksmbd/crypto_ctx.h
new file mode 100644 (file)
index 0000000..ef11154
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __CRYPTO_CTX_H__
+#define __CRYPTO_CTX_H__
+
+#include <crypto/hash.h>
+#include <crypto/aead.h>
+
+enum {
+       CRYPTO_SHASH_HMACMD5    = 0,
+       CRYPTO_SHASH_HMACSHA256,
+       CRYPTO_SHASH_CMACAES,
+       CRYPTO_SHASH_SHA256,
+       CRYPTO_SHASH_SHA512,
+       CRYPTO_SHASH_MD4,
+       CRYPTO_SHASH_MD5,
+       CRYPTO_SHASH_MAX,
+};
+
+enum {
+       CRYPTO_AEAD_AES_GCM = 16,
+       CRYPTO_AEAD_AES_CCM,
+       CRYPTO_AEAD_MAX,
+};
+
+enum {
+       CRYPTO_BLK_ECBDES       = 32,
+       CRYPTO_BLK_MAX,
+};
+
+struct ksmbd_crypto_ctx {
+       struct list_head                list;
+
+       struct shash_desc               *desc[CRYPTO_SHASH_MAX];
+       struct crypto_aead              *ccmaes[CRYPTO_AEAD_MAX];
+};
+
+#define CRYPTO_HMACMD5(c)      ((c)->desc[CRYPTO_SHASH_HMACMD5])
+#define CRYPTO_HMACSHA256(c)   ((c)->desc[CRYPTO_SHASH_HMACSHA256])
+#define CRYPTO_CMACAES(c)      ((c)->desc[CRYPTO_SHASH_CMACAES])
+#define CRYPTO_SHA256(c)       ((c)->desc[CRYPTO_SHASH_SHA256])
+#define CRYPTO_SHA512(c)       ((c)->desc[CRYPTO_SHASH_SHA512])
+#define CRYPTO_MD4(c)          ((c)->desc[CRYPTO_SHASH_MD4])
+#define CRYPTO_MD5(c)          ((c)->desc[CRYPTO_SHASH_MD5])
+
+#define CRYPTO_HMACMD5_TFM(c)  ((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
+#define CRYPTO_HMACSHA256_TFM(c)\
+                               ((c)->desc[CRYPTO_SHASH_HMACSHA256]->tfm)
+#define CRYPTO_CMACAES_TFM(c)  ((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
+#define CRYPTO_SHA256_TFM(c)   ((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
+#define CRYPTO_SHA512_TFM(c)   ((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
+#define CRYPTO_MD4_TFM(c)      ((c)->desc[CRYPTO_SHASH_MD4]->tfm)
+#define CRYPTO_MD5_TFM(c)      ((c)->desc[CRYPTO_SHASH_MD5]->tfm)
+
+#define CRYPTO_GCM(c)          ((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
+#define CRYPTO_CCM(c)          ((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
+
+void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
+void ksmbd_crypto_destroy(void);
+int ksmbd_crypto_create(void);
+
+#endif /* __CRYPTO_CTX_H__ */
diff --git a/fs/ksmbd/glob.h b/fs/ksmbd/glob.h
new file mode 100644 (file)
index 0000000..49a5a3a
--- /dev/null
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_GLOB_H
+#define __KSMBD_GLOB_H
+
+#include <linux/ctype.h>
+
+#include "unicode.h"
+#include "vfs_cache.h"
+
+#define KSMBD_VERSION  "3.1.9"
+
+extern int ksmbd_debug_types;
+
+#define KSMBD_DEBUG_SMB                BIT(0)
+#define KSMBD_DEBUG_AUTH       BIT(1)
+#define KSMBD_DEBUG_VFS                BIT(2)
+#define KSMBD_DEBUG_OPLOCK      BIT(3)
+#define KSMBD_DEBUG_IPC         BIT(4)
+#define KSMBD_DEBUG_CONN        BIT(5)
+#define KSMBD_DEBUG_RDMA        BIT(6)
+#define KSMBD_DEBUG_ALL         (KSMBD_DEBUG_SMB | KSMBD_DEBUG_AUTH |  \
+                               KSMBD_DEBUG_VFS | KSMBD_DEBUG_OPLOCK |  \
+                               KSMBD_DEBUG_IPC | KSMBD_DEBUG_CONN |    \
+                               KSMBD_DEBUG_RDMA)
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#ifdef SUBMOD_NAME
+#define pr_fmt(fmt)    "ksmbd: " SUBMOD_NAME ": " fmt
+#else
+#define pr_fmt(fmt)    "ksmbd: " fmt
+#endif
+
+#define ksmbd_debug(type, fmt, ...)                            \
+       do {                                                    \
+               if (ksmbd_debug_types & KSMBD_DEBUG_##type)     \
+                       pr_info(fmt, ##__VA_ARGS__);            \
+       } while (0)
+
+#define UNICODE_LEN(x)         ((x) * 2)
+
+#endif /* __KSMBD_GLOB_H */
diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
new file mode 100644 (file)
index 0000000..2fbe2bc
--- /dev/null
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ *
+ *   linux-ksmbd-devel@lists.sourceforge.net
+ */
+
+#ifndef _LINUX_KSMBD_SERVER_H
+#define _LINUX_KSMBD_SERVER_H
+
+#include <linux/types.h>
+
+/*
+ * This is a userspace ABI to communicate data between ksmbd and user IPC
+ * daemon using netlink. This is added to track and cache user account DB
+ * and share configuration info from userspace.
+ *
+ *  - KSMBD_EVENT_HEARTBEAT_REQUEST(ksmbd_heartbeat)
+ *    This event is to check whether user IPC daemon is alive. If user IPC
+ *    daemon is dead, ksmbd keep existing connection till disconnecting and
+ *    new connection will be denied.
+ *
+ *  - KSMBD_EVENT_STARTING_UP(ksmbd_startup_request)
+ *    This event is to receive the information that initializes the ksmbd
+ *    server from the user IPC daemon and to start the server. The global
+ *    section parameters are given from smb.conf as initialization
+ *    information.
+ *
+ *  - KSMBD_EVENT_SHUTTING_DOWN(ksmbd_shutdown_request)
+ *    This event is to shutdown ksmbd server.
+ *
+ *  - KSMBD_EVENT_LOGIN_REQUEST/RESPONSE(ksmbd_login_request/response)
+ *    This event is to get user account info to user IPC daemon.
+ *
+ *  - KSMBD_EVENT_SHARE_CONFIG_REQUEST/RESPONSE(ksmbd_share_config_request/response)
+ *    This event is to get net share configuration info.
+ *
+ *  - KSMBD_EVENT_TREE_CONNECT_REQUEST/RESPONSE(ksmbd_tree_connect_request/response)
+ *    This event is to get session and tree connect info.
+ *
+ *  - KSMBD_EVENT_TREE_DISCONNECT_REQUEST(ksmbd_tree_disconnect_request)
+ *    This event is to send tree disconnect info to user IPC daemon.
+ *
+ *  - KSMBD_EVENT_LOGOUT_REQUEST(ksmbd_logout_request)
+ *    This event is to send logout request to user IPC daemon.
+ *
+ *  - KSMBD_EVENT_RPC_REQUEST/RESPONSE(ksmbd_rpc_command)
+ *    This event is to make DCE/RPC request like srvsvc, wkssvc, lsarpc,
+ *    samr to be processed in userspace.
+ *
+ *  - KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST/RESPONSE(ksmbd_spnego_authen_request/response)
+ *    This event is to make kerberos authentication to be processed in
+ *    userspace.
+ */
+
+#define KSMBD_GENL_NAME                "SMBD_GENL"
+#define KSMBD_GENL_VERSION             0x01
+
+#define KSMBD_REQ_MAX_ACCOUNT_NAME_SZ  48
+#define KSMBD_REQ_MAX_HASH_SZ          18
+#define KSMBD_REQ_MAX_SHARE_NAME       64
+
+/*
+ * IPC heartbeat frame to check whether user IPC daemon is alive.
+ */
+struct ksmbd_heartbeat {
+       __u32   handle;
+};
+
+/*
+ * Global config flags.
+ */
+#define KSMBD_GLOBAL_FLAG_INVALID              (0)
+#define KSMBD_GLOBAL_FLAG_SMB2_LEASES          BIT(0)
+#define KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION      BIT(1)
+#define KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL    BIT(2)
+
+/*
+ * IPC request for ksmbd server startup
+ */
+struct ksmbd_startup_request {
+       __u32   flags;                  /* Flags for global config */
+       __s32   signing;                /* Signing enabled */
+       __s8    min_prot[16];           /* The minimum SMB protocol version */
+       __s8    max_prot[16];           /* The maximum SMB protocol version */
+       __s8    netbios_name[16];
+       __s8    work_group[64];         /* Workgroup */
+       __s8    server_string[64];      /* Server string */
+       __u16   tcp_port;               /* tcp port */
+       __u16   ipc_timeout;            /*
+                                        * specifies the number of seconds
+                                        * server will wait for the userspace to
+                                        * reply to heartbeat frames.
+                                        */
+       __u32   deadtime;               /* Number of minutes of inactivity */
+       __u32   file_max;               /* Limits the maximum number of open files */
+       __u32   smb2_max_write;         /* MAX write size */
+       __u32   smb2_max_read;          /* MAX read size */
+       __u32   smb2_max_trans;         /* MAX trans size */
+       __u32   share_fake_fscaps;      /*
+                                        * Support some special application that
+                                        * makes QFSINFO calls to check whether
+                                        * we set the SPARSE_FILES bit (0x40).
+                                        */
+       __u32   sub_auth[3];            /* Subauth value for Security ID */
+       __u32   ifc_list_sz;            /* interfaces list size */
+       __s8    ____payload[];
+};
+
+#define KSMBD_STARTUP_CONFIG_INTERFACES(s)     ((s)->____payload)
+
+/*
+ * IPC request to shutdown ksmbd server.
+ */
+struct ksmbd_shutdown_request {
+       __s32   reserved;
+};
+
+/*
+ * IPC user login request.
+ */
+struct ksmbd_login_request {
+       __u32   handle;
+       __s8    account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+};
+
+/*
+ * IPC user login response.
+ */
+struct ksmbd_login_response {
+       __u32   handle;
+       __u32   gid;                                    /* group id */
+       __u32   uid;                                    /* user id */
+       __s8    account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+       __u16   status;
+       __u16   hash_sz;                        /* hash size */
+       __s8    hash[KSMBD_REQ_MAX_HASH_SZ];    /* password hash */
+};
+
+/*
+ * IPC request to fetch net share config.
+ */
+struct ksmbd_share_config_request {
+       __u32   handle;
+       __s8    share_name[KSMBD_REQ_MAX_SHARE_NAME]; /* share name */
+};
+
+/*
+ * IPC response to the net share config request.
+ */
+struct ksmbd_share_config_response {
+       __u32   handle;
+       __u32   flags;
+       __u16   create_mask;
+       __u16   directory_mask;
+       __u16   force_create_mode;
+       __u16   force_directory_mode;
+       __u16   force_uid;
+       __u16   force_gid;
+       __u32   veto_list_sz;
+       __s8    ____payload[];
+};
+
+#define KSMBD_SHARE_CONFIG_VETO_LIST(s)        ((s)->____payload)
+
+static inline char *
+ksmbd_share_config_path(struct ksmbd_share_config_response *sc)
+{
+       char *p = sc->____payload;
+
+       if (sc->veto_list_sz)
+               p += sc->veto_list_sz + 1;
+
+       return p;
+}
+
+/*
+ * IPC request for tree connection. This request include session and tree
+ * connect info from client.
+ */
+struct ksmbd_tree_connect_request {
+       __u32   handle;
+       __u16   account_flags;
+       __u16   flags;
+       __u64   session_id;
+       __u64   connect_id;
+       __s8    account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ];
+       __s8    share[KSMBD_REQ_MAX_SHARE_NAME];
+       __s8    peer_addr[64];
+};
+
+/*
+ * IPC Response structure for tree connection.
+ */
+struct ksmbd_tree_connect_response {
+       __u32   handle;
+       __u16   status;
+       __u16   connection_flags;
+};
+
+/*
+ * IPC Request struture to disconnect tree connection.
+ */
+struct ksmbd_tree_disconnect_request {
+       __u64   session_id;     /* session id */
+       __u64   connect_id;     /* tree connection id */
+};
+
+/*
+ * IPC Response structure to logout user account.
+ */
+struct ksmbd_logout_request {
+       __s8    account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+};
+
+/*
+ * RPC command structure to send rpc request like srvsvc or wkssvc to
+ * IPC user daemon.
+ */
+struct ksmbd_rpc_command {
+       __u32   handle;
+       __u32   flags;
+       __u32   payload_sz;
+       __u8    payload[];
+};
+
+/*
+ * IPC Request Kerberos authentication
+ */
+struct ksmbd_spnego_authen_request {
+       __u32   handle;
+       __u16   spnego_blob_len;        /* the length of spnego_blob */
+       __u8    spnego_blob[0];         /*
+                                        * the GSS token from SecurityBuffer of
+                                        * SMB2 SESSION SETUP request
+                                        */
+};
+
+/*
+ * Response data which includes the GSS token and the session key generated by
+ * user daemon.
+ */
+struct ksmbd_spnego_authen_response {
+       __u32   handle;
+       struct ksmbd_login_response login_response; /*
+                                                    * the login response with
+                                                    * a user identified by the
+                                                    * GSS token from a client
+                                                    */
+       __u16   session_key_len; /* the length of the session key */
+       __u16   spnego_blob_len; /*
+                                 * the length of  the GSS token which will be
+                                 * stored in SecurityBuffer of SMB2 SESSION
+                                 * SETUP response
+                                 */
+       __u8    payload[]; /* session key + AP_REP */
+};
+
+/*
+ * This also used as NETLINK attribute type value.
+ *
+ * NOTE:
+ * Response message type value should be equal to
+ * request message type value + 1.
+ */
+enum ksmbd_event {
+       KSMBD_EVENT_UNSPEC                      = 0,
+       KSMBD_EVENT_HEARTBEAT_REQUEST,
+
+       KSMBD_EVENT_STARTING_UP,
+       KSMBD_EVENT_SHUTTING_DOWN,
+
+       KSMBD_EVENT_LOGIN_REQUEST,
+       KSMBD_EVENT_LOGIN_RESPONSE              = 5,
+
+       KSMBD_EVENT_SHARE_CONFIG_REQUEST,
+       KSMBD_EVENT_SHARE_CONFIG_RESPONSE,
+
+       KSMBD_EVENT_TREE_CONNECT_REQUEST,
+       KSMBD_EVENT_TREE_CONNECT_RESPONSE,
+
+       KSMBD_EVENT_TREE_DISCONNECT_REQUEST     = 10,
+
+       KSMBD_EVENT_LOGOUT_REQUEST,
+
+       KSMBD_EVENT_RPC_REQUEST,
+       KSMBD_EVENT_RPC_RESPONSE,
+
+       KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
+       KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE      = 15,
+
+       KSMBD_EVENT_MAX
+};
+
+/*
+ * Enumeration for IPC tree connect status.
+ */
+enum KSMBD_TREE_CONN_STATUS {
+       KSMBD_TREE_CONN_STATUS_OK               = 0,
+       KSMBD_TREE_CONN_STATUS_NOMEM,
+       KSMBD_TREE_CONN_STATUS_NO_SHARE,
+       KSMBD_TREE_CONN_STATUS_NO_USER,
+       KSMBD_TREE_CONN_STATUS_INVALID_USER,
+       KSMBD_TREE_CONN_STATUS_HOST_DENIED      = 5,
+       KSMBD_TREE_CONN_STATUS_CONN_EXIST,
+       KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS,
+       KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS,
+       KSMBD_TREE_CONN_STATUS_ERROR,
+};
+
+/*
+ * User config flags.
+ */
+#define KSMBD_USER_FLAG_INVALID                (0)
+#define KSMBD_USER_FLAG_OK             BIT(0)
+#define KSMBD_USER_FLAG_BAD_PASSWORD   BIT(1)
+#define KSMBD_USER_FLAG_BAD_UID                BIT(2)
+#define KSMBD_USER_FLAG_BAD_USER       BIT(3)
+#define KSMBD_USER_FLAG_GUEST_ACCOUNT  BIT(4)
+
+/*
+ * Share config flags.
+ */
+#define KSMBD_SHARE_FLAG_INVALID               (0)
+#define KSMBD_SHARE_FLAG_AVAILABLE             BIT(0)
+#define KSMBD_SHARE_FLAG_BROWSEABLE            BIT(1)
+#define KSMBD_SHARE_FLAG_WRITEABLE             BIT(2)
+#define KSMBD_SHARE_FLAG_READONLY              BIT(3)
+#define KSMBD_SHARE_FLAG_GUEST_OK              BIT(4)
+#define KSMBD_SHARE_FLAG_GUEST_ONLY            BIT(5)
+#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS       BIT(6)
+#define KSMBD_SHARE_FLAG_OPLOCKS               BIT(7)
+#define KSMBD_SHARE_FLAG_PIPE                  BIT(8)
+#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES                BIT(9)
+#define KSMBD_SHARE_FLAG_INHERIT_OWNER         BIT(10)
+#define KSMBD_SHARE_FLAG_STREAMS               BIT(11)
+#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS       BIT(12)
+#define KSMBD_SHARE_FLAG_ACL_XATTR             BIT(13)
+
+/*
+ * Tree connect request flags.
+ */
+#define KSMBD_TREE_CONN_FLAG_REQUEST_SMB1      (0)
+#define KSMBD_TREE_CONN_FLAG_REQUEST_IPV6      BIT(0)
+#define KSMBD_TREE_CONN_FLAG_REQUEST_SMB2      BIT(1)
+
+/*
+ * Tree connect flags.
+ */
+#define KSMBD_TREE_CONN_FLAG_GUEST_ACCOUNT     BIT(0)
+#define KSMBD_TREE_CONN_FLAG_READ_ONLY         BIT(1)
+#define KSMBD_TREE_CONN_FLAG_WRITABLE          BIT(2)
+#define KSMBD_TREE_CONN_FLAG_ADMIN_ACCOUNT     BIT(3)
+
+/*
+ * RPC over IPC.
+ */
+#define KSMBD_RPC_METHOD_RETURN                BIT(0)
+#define KSMBD_RPC_SRVSVC_METHOD_INVOKE BIT(1)
+#define KSMBD_RPC_SRVSVC_METHOD_RETURN (KSMBD_RPC_SRVSVC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+#define KSMBD_RPC_WKSSVC_METHOD_INVOKE BIT(2)
+#define KSMBD_RPC_WKSSVC_METHOD_RETURN (KSMBD_RPC_WKSSVC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+#define KSMBD_RPC_IOCTL_METHOD         (BIT(3) | KSMBD_RPC_METHOD_RETURN)
+#define KSMBD_RPC_OPEN_METHOD          BIT(4)
+#define KSMBD_RPC_WRITE_METHOD         BIT(5)
+#define KSMBD_RPC_READ_METHOD          (BIT(6) | KSMBD_RPC_METHOD_RETURN)
+#define KSMBD_RPC_CLOSE_METHOD         BIT(7)
+#define KSMBD_RPC_RAP_METHOD           (BIT(8) | KSMBD_RPC_METHOD_RETURN)
+#define KSMBD_RPC_RESTRICTED_CONTEXT   BIT(9)
+#define KSMBD_RPC_SAMR_METHOD_INVOKE   BIT(10)
+#define KSMBD_RPC_SAMR_METHOD_RETURN   (KSMBD_RPC_SAMR_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+#define KSMBD_RPC_LSARPC_METHOD_INVOKE BIT(11)
+#define KSMBD_RPC_LSARPC_METHOD_RETURN (KSMBD_RPC_LSARPC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+
+/*
+ * RPC status definitions.
+ */
+#define KSMBD_RPC_OK                   0
+#define KSMBD_RPC_EBAD_FUNC            0x00000001
+#define KSMBD_RPC_EACCESS_DENIED       0x00000005
+#define KSMBD_RPC_EBAD_FID             0x00000006
+#define KSMBD_RPC_ENOMEM               0x00000008
+#define KSMBD_RPC_EBAD_DATA            0x0000000D
+#define KSMBD_RPC_ENOTIMPLEMENTED      0x00000040
+#define KSMBD_RPC_EINVALID_PARAMETER   0x00000057
+#define KSMBD_RPC_EMORE_DATA           0x000000EA
+#define KSMBD_RPC_EINVALID_LEVEL       0x0000007C
+#define KSMBD_RPC_SOME_NOT_MAPPED      0x00000107
+
+#define KSMBD_CONFIG_OPT_DISABLED      0
+#define KSMBD_CONFIG_OPT_ENABLED       1
+#define KSMBD_CONFIG_OPT_AUTO          2
+#define KSMBD_CONFIG_OPT_MANDATORY     3
+
+#endif /* _LINUX_KSMBD_SERVER_H */
diff --git a/fs/ksmbd/ksmbd_spnego_negtokeninit.asn1 b/fs/ksmbd/ksmbd_spnego_negtokeninit.asn1
new file mode 100644 (file)
index 0000000..0065f19
--- /dev/null
@@ -0,0 +1,31 @@
+GSSAPI ::=
+       [APPLICATION 0] IMPLICIT SEQUENCE {
+               thisMech
+                       OBJECT IDENTIFIER ({ksmbd_gssapi_this_mech}),
+               negotiationToken
+                       NegotiationToken
+       }
+
+MechType ::= OBJECT IDENTIFIER ({ksmbd_neg_token_init_mech_type})
+
+MechTypeList ::= SEQUENCE OF MechType
+
+NegTokenInit ::=
+       SEQUENCE {
+               mechTypes
+                       [0] MechTypeList,
+               reqFlags
+                       [1] BIT STRING OPTIONAL,
+               mechToken
+                       [2] OCTET STRING OPTIONAL ({ksmbd_neg_token_init_mech_token}),
+               mechListMIC
+                       [3] OCTET STRING OPTIONAL
+       }
+
+NegotiationToken ::=
+       CHOICE {
+               negTokenInit
+                       [0] NegTokenInit,
+               negTokenTarg
+                       [1] ANY
+       }
diff --git a/fs/ksmbd/ksmbd_spnego_negtokentarg.asn1 b/fs/ksmbd/ksmbd_spnego_negtokentarg.asn1
new file mode 100644 (file)
index 0000000..1151933
--- /dev/null
@@ -0,0 +1,19 @@
+GSSAPI ::=
+       CHOICE {
+               negTokenInit
+                       [0] ANY,
+               negTokenTarg
+                       [1] NegTokenTarg
+       }
+
+NegTokenTarg ::=
+       SEQUENCE {
+               negResult
+                       [0] ENUMERATED OPTIONAL,
+               supportedMech
+                       [1] OBJECT IDENTIFIER OPTIONAL,
+               responseToken
+                       [2] OCTET STRING OPTIONAL ({ksmbd_neg_token_targ_resp_token}),
+               mechListMIC
+                       [3] OCTET STRING OPTIONAL
+       }
diff --git a/fs/ksmbd/ksmbd_work.c b/fs/ksmbd/ksmbd_work.c
new file mode 100644 (file)
index 0000000..fd58eb4
--- /dev/null
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "server.h"
+#include "connection.h"
+#include "ksmbd_work.h"
+#include "mgmt/ksmbd_ida.h"
+
+static struct kmem_cache *work_cache;
+static struct workqueue_struct *ksmbd_wq;
+
+struct ksmbd_work *ksmbd_alloc_work_struct(void)
+{
+       struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL);
+
+       if (work) {
+               work->compound_fid = KSMBD_NO_FID;
+               work->compound_pfid = KSMBD_NO_FID;
+               INIT_LIST_HEAD(&work->request_entry);
+               INIT_LIST_HEAD(&work->async_request_entry);
+               INIT_LIST_HEAD(&work->fp_entry);
+               INIT_LIST_HEAD(&work->interim_entry);
+       }
+       return work;
+}
+
+void ksmbd_free_work_struct(struct ksmbd_work *work)
+{
+       WARN_ON(work->saved_cred != NULL);
+
+       kvfree(work->response_buf);
+       kvfree(work->aux_payload_buf);
+       kfree(work->tr_buf);
+       kvfree(work->request_buf);
+       if (work->async_id)
+               ksmbd_release_id(&work->conn->async_ida, work->async_id);
+       kmem_cache_free(work_cache, work);
+}
+
+void ksmbd_work_pool_destroy(void)
+{
+       kmem_cache_destroy(work_cache);
+}
+
+int ksmbd_work_pool_init(void)
+{
+       work_cache = kmem_cache_create("ksmbd_work_cache",
+                                      sizeof(struct ksmbd_work), 0,
+                                      SLAB_HWCACHE_ALIGN, NULL);
+       if (!work_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+int ksmbd_workqueue_init(void)
+{
+       ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0);
+       if (!ksmbd_wq)
+               return -ENOMEM;
+       return 0;
+}
+
+void ksmbd_workqueue_destroy(void)
+{
+       flush_workqueue(ksmbd_wq);
+       destroy_workqueue(ksmbd_wq);
+       ksmbd_wq = NULL;
+}
+
+bool ksmbd_queue_work(struct ksmbd_work *work)
+{
+       return queue_work(ksmbd_wq, &work->work);
+}
diff --git a/fs/ksmbd/ksmbd_work.h b/fs/ksmbd/ksmbd_work.h
new file mode 100644 (file)
index 0000000..f7156bc
--- /dev/null
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_WORK_H__
+#define __KSMBD_WORK_H__
+
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+
+struct ksmbd_conn;
+struct ksmbd_session;
+struct ksmbd_tree_connect;
+
+enum {
+       KSMBD_WORK_ACTIVE = 0,
+       KSMBD_WORK_CANCELLED,
+       KSMBD_WORK_CLOSED,
+};
+
+/* one of these for every pending CIFS request at the connection */
+struct ksmbd_work {
+       /* Server corresponding to this mid */
+       struct ksmbd_conn               *conn;
+       struct ksmbd_session            *sess;
+       struct ksmbd_tree_connect       *tcon;
+
+       /* Pointer to received SMB header */
+       void                            *request_buf;
+       /* Response buffer */
+       void                            *response_buf;
+
+       /* Read data buffer */
+       void                            *aux_payload_buf;
+
+       /* Next cmd hdr in compound req buf*/
+       int                             next_smb2_rcv_hdr_off;
+       /* Next cmd hdr in compound rsp buf*/
+       int                             next_smb2_rsp_hdr_off;
+
+       /*
+        * Current Local FID assigned compound response if SMB2 CREATE
+        * command is present in compound request
+        */
+       u64                             compound_fid;
+       u64                             compound_pfid;
+       u64                             compound_sid;
+
+       const struct cred               *saved_cred;
+
+       /* Number of granted credits */
+       unsigned int                    credits_granted;
+
+       /* response smb header size */
+       unsigned int                    resp_hdr_sz;
+       unsigned int                    response_sz;
+       /* Read data count */
+       unsigned int                    aux_payload_sz;
+
+       void                            *tr_buf;
+
+       unsigned char                   state;
+       /* Multiple responses for one request e.g. SMB ECHO */
+       bool                            multiRsp:1;
+       /* No response for cancelled request */
+       bool                            send_no_response:1;
+       /* Request is encrypted */
+       bool                            encrypted:1;
+       /* Is this SYNC or ASYNC ksmbd_work */
+       bool                            syncronous:1;
+       bool                            need_invalidate_rkey:1;
+
+       unsigned int                    remote_key;
+       /* cancel works */
+       int                             async_id;
+       void                            **cancel_argv;
+       void                            (*cancel_fn)(void **argv);
+
+       struct work_struct              work;
+       /* List head at conn->requests */
+       struct list_head                request_entry;
+       /* List head at conn->async_requests */
+       struct list_head                async_request_entry;
+       struct list_head                fp_entry;
+       struct list_head                interim_entry;
+};
+
+/**
+ * ksmbd_resp_buf_next - Get next buffer on compound response.
+ * @work: smb work containing response buffer
+ */
+static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
+{
+       return work->response_buf + work->next_smb2_rsp_hdr_off;
+}
+
+/**
+ * ksmbd_req_buf_next - Get next buffer on compound request.
+ * @work: smb work containing response buffer
+ */
+static inline void *ksmbd_req_buf_next(struct ksmbd_work *work)
+{
+       return work->request_buf + work->next_smb2_rcv_hdr_off;
+}
+
+struct ksmbd_work *ksmbd_alloc_work_struct(void);
+void ksmbd_free_work_struct(struct ksmbd_work *work);
+
+void ksmbd_work_pool_destroy(void);
+int ksmbd_work_pool_init(void);
+
+int ksmbd_workqueue_init(void);
+void ksmbd_workqueue_destroy(void);
+bool ksmbd_queue_work(struct ksmbd_work *work);
+
+#endif /* __KSMBD_WORK_H__ */
diff --git a/fs/ksmbd/mgmt/ksmbd_ida.c b/fs/ksmbd/mgmt/ksmbd_ida.c
new file mode 100644 (file)
index 0000000..54194d9
--- /dev/null
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include "ksmbd_ida.h"
+
+static inline int __acquire_id(struct ida *ida, int from, int to)
+{
+       return ida_simple_get(ida, from, to, GFP_KERNEL);
+}
+
+int ksmbd_acquire_smb2_tid(struct ida *ida)
+{
+       int id;
+
+       id = __acquire_id(ida, 1, 0xFFFFFFFF);
+
+       return id;
+}
+
+int ksmbd_acquire_smb2_uid(struct ida *ida)
+{
+       int id;
+
+       id = __acquire_id(ida, 1, 0);
+       if (id == 0xFFFE)
+               id = __acquire_id(ida, 1, 0);
+
+       return id;
+}
+
+int ksmbd_acquire_async_msg_id(struct ida *ida)
+{
+       return __acquire_id(ida, 1, 0);
+}
+
+int ksmbd_acquire_id(struct ida *ida)
+{
+       return __acquire_id(ida, 0, 0);
+}
+
+void ksmbd_release_id(struct ida *ida, int id)
+{
+       ida_simple_remove(ida, id);
+}
diff --git a/fs/ksmbd/mgmt/ksmbd_ida.h b/fs/ksmbd/mgmt/ksmbd_ida.h
new file mode 100644 (file)
index 0000000..2bc07b1
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_IDA_MANAGEMENT_H__
+#define __KSMBD_IDA_MANAGEMENT_H__
+
+#include <linux/slab.h>
+#include <linux/idr.h>
+
+/*
+ * 2.2.1.6.7 TID Generation
+ *    The value 0xFFFF MUST NOT be used as a valid TID. All other
+ *    possible values for TID, including zero (0x0000), are valid.
+ *    The value 0xFFFF is used to specify all TIDs or no TID,
+ *    depending upon the context in which it is used.
+ */
+int ksmbd_acquire_smb2_tid(struct ida *ida);
+
+/*
+ * 2.2.1.6.8 UID Generation
+ *    The value 0xFFFE was declared reserved in the LAN Manager 1.0
+ *    documentation, so a value of 0xFFFE SHOULD NOT be used as a
+ *    valid UID.<21> All other possible values for a UID, excluding
+ *    zero (0x0000), are valid.
+ */
+int ksmbd_acquire_smb2_uid(struct ida *ida);
+int ksmbd_acquire_async_msg_id(struct ida *ida);
+
+int ksmbd_acquire_id(struct ida *ida);
+
+void ksmbd_release_id(struct ida *ida, int id);
+#endif /* __KSMBD_IDA_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/mgmt/share_config.c b/fs/ksmbd/mgmt/share_config.c
new file mode 100644 (file)
index 0000000..cb72d30
--- /dev/null
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/parser.h>
+#include <linux/namei.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include "share_config.h"
+#include "user_config.h"
+#include "user_session.h"
+#include "../transport_ipc.h"
+
+#define SHARE_HASH_BITS                3
+static DEFINE_HASHTABLE(shares_table, SHARE_HASH_BITS);
+static DECLARE_RWSEM(shares_table_lock);
+
+struct ksmbd_veto_pattern {
+       char                    *pattern;
+       struct list_head        list;
+};
+
+static unsigned int share_name_hash(char *name)
+{
+       return jhash(name, strlen(name), 0);
+}
+
+static void kill_share(struct ksmbd_share_config *share)
+{
+       while (!list_empty(&share->veto_list)) {
+               struct ksmbd_veto_pattern *p;
+
+               p = list_entry(share->veto_list.next,
+                              struct ksmbd_veto_pattern,
+                              list);
+               list_del(&p->list);
+               kfree(p->pattern);
+               kfree(p);
+       }
+
+       if (share->path)
+               path_put(&share->vfs_path);
+       kfree(share->name);
+       kfree(share->path);
+       kfree(share);
+}
+
+void __ksmbd_share_config_put(struct ksmbd_share_config *share)
+{
+       down_write(&shares_table_lock);
+       hash_del(&share->hlist);
+       up_write(&shares_table_lock);
+
+       kill_share(share);
+}
+
+static struct ksmbd_share_config *
+__get_share_config(struct ksmbd_share_config *share)
+{
+       if (!atomic_inc_not_zero(&share->refcount))
+               return NULL;
+       return share;
+}
+
+static struct ksmbd_share_config *__share_lookup(char *name)
+{
+       struct ksmbd_share_config *share;
+       unsigned int key = share_name_hash(name);
+
+       hash_for_each_possible(shares_table, share, hlist, key) {
+               if (!strcmp(name, share->name))
+                       return share;
+       }
+       return NULL;
+}
+
+static int parse_veto_list(struct ksmbd_share_config *share,
+                          char *veto_list,
+                          int veto_list_sz)
+{
+       int sz = 0;
+
+       if (!veto_list_sz)
+               return 0;
+
+       while (veto_list_sz > 0) {
+               struct ksmbd_veto_pattern *p;
+
+               sz = strlen(veto_list);
+               if (!sz)
+                       break;
+
+               p = kzalloc(sizeof(struct ksmbd_veto_pattern), GFP_KERNEL);
+               if (!p)
+                       return -ENOMEM;
+
+               p->pattern = kstrdup(veto_list, GFP_KERNEL);
+               if (!p->pattern) {
+                       kfree(p);
+                       return -ENOMEM;
+               }
+
+               list_add(&p->list, &share->veto_list);
+
+               veto_list += sz + 1;
+               veto_list_sz -= (sz + 1);
+       }
+
+       return 0;
+}
+
+static struct ksmbd_share_config *share_config_request(char *name)
+{
+       struct ksmbd_share_config_response *resp;
+       struct ksmbd_share_config *share = NULL;
+       struct ksmbd_share_config *lookup;
+       int ret;
+
+       resp = ksmbd_ipc_share_config_request(name);
+       if (!resp)
+               return NULL;
+
+       if (resp->flags == KSMBD_SHARE_FLAG_INVALID)
+               goto out;
+
+       share = kzalloc(sizeof(struct ksmbd_share_config), GFP_KERNEL);
+       if (!share)
+               goto out;
+
+       share->flags = resp->flags;
+       atomic_set(&share->refcount, 1);
+       INIT_LIST_HEAD(&share->veto_list);
+       share->name = kstrdup(name, GFP_KERNEL);
+
+       if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+               share->path = kstrdup(ksmbd_share_config_path(resp),
+                                     GFP_KERNEL);
+               if (share->path)
+                       share->path_sz = strlen(share->path);
+               share->create_mask = resp->create_mask;
+               share->directory_mask = resp->directory_mask;
+               share->force_create_mode = resp->force_create_mode;
+               share->force_directory_mode = resp->force_directory_mode;
+               share->force_uid = resp->force_uid;
+               share->force_gid = resp->force_gid;
+               ret = parse_veto_list(share,
+                                     KSMBD_SHARE_CONFIG_VETO_LIST(resp),
+                                     resp->veto_list_sz);
+               if (!ret && share->path) {
+                       ret = kern_path(share->path, 0, &share->vfs_path);
+                       if (ret) {
+                               ksmbd_debug(SMB, "failed to access '%s'\n",
+                                           share->path);
+                               /* Avoid put_path() */
+                               kfree(share->path);
+                               share->path = NULL;
+                       }
+               }
+               if (ret || !share->name) {
+                       kill_share(share);
+                       share = NULL;
+                       goto out;
+               }
+       }
+
+       down_write(&shares_table_lock);
+       lookup = __share_lookup(name);
+       if (lookup)
+               lookup = __get_share_config(lookup);
+       if (!lookup) {
+               hash_add(shares_table, &share->hlist, share_name_hash(name));
+       } else {
+               kill_share(share);
+               share = lookup;
+       }
+       up_write(&shares_table_lock);
+
+out:
+       kvfree(resp);
+       return share;
+}
+
+static void strtolower(char *share_name)
+{
+       while (*share_name) {
+               *share_name = tolower(*share_name);
+               share_name++;
+       }
+}
+
+struct ksmbd_share_config *ksmbd_share_config_get(char *name)
+{
+       struct ksmbd_share_config *share;
+
+       strtolower(name);
+
+       down_read(&shares_table_lock);
+       share = __share_lookup(name);
+       if (share)
+               share = __get_share_config(share);
+       up_read(&shares_table_lock);
+
+       if (share)
+               return share;
+       return share_config_request(name);
+}
+
+bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+                              const char *filename)
+{
+       struct ksmbd_veto_pattern *p;
+
+       list_for_each_entry(p, &share->veto_list, list) {
+               if (match_wildcard(p->pattern, filename))
+                       return true;
+       }
+       return false;
+}
+
+void ksmbd_share_configs_cleanup(void)
+{
+       struct ksmbd_share_config *share;
+       struct hlist_node *tmp;
+       int i;
+
+       down_write(&shares_table_lock);
+       hash_for_each_safe(shares_table, i, tmp, share, hlist) {
+               hash_del(&share->hlist);
+               kill_share(share);
+       }
+       up_write(&shares_table_lock);
+}
diff --git a/fs/ksmbd/mgmt/share_config.h b/fs/ksmbd/mgmt/share_config.h
new file mode 100644 (file)
index 0000000..953befc
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __SHARE_CONFIG_MANAGEMENT_H__
+#define __SHARE_CONFIG_MANAGEMENT_H__
+
+#include <linux/workqueue.h>
+#include <linux/hashtable.h>
+#include <linux/path.h>
+
+struct ksmbd_share_config {
+       char                    *name;
+       char                    *path;
+
+       unsigned int            path_sz;
+       unsigned int            flags;
+       struct list_head        veto_list;
+
+       struct path             vfs_path;
+
+       atomic_t                refcount;
+       struct hlist_node       hlist;
+       unsigned short          create_mask;
+       unsigned short          directory_mask;
+       unsigned short          force_create_mode;
+       unsigned short          force_directory_mode;
+       unsigned short          force_uid;
+       unsigned short          force_gid;
+};
+
+#define KSMBD_SHARE_INVALID_UID        ((__u16)-1)
+#define KSMBD_SHARE_INVALID_GID        ((__u16)-1)
+
+static inline int share_config_create_mode(struct ksmbd_share_config *share,
+                                          umode_t posix_mode)
+{
+       if (!share->force_create_mode) {
+               if (!posix_mode)
+                       return share->create_mask;
+               else
+                       return posix_mode & share->create_mask;
+       }
+       return share->force_create_mode & share->create_mask;
+}
+
+static inline int share_config_directory_mode(struct ksmbd_share_config *share,
+                                             umode_t posix_mode)
+{
+       if (!share->force_directory_mode) {
+               if (!posix_mode)
+                       return share->directory_mask;
+               else
+                       return posix_mode & share->directory_mask;
+       }
+
+       return share->force_directory_mode & share->directory_mask;
+}
+
+static inline int test_share_config_flag(struct ksmbd_share_config *share,
+                                        int flag)
+{
+       return share->flags & flag;
+}
+
+void __ksmbd_share_config_put(struct ksmbd_share_config *share);
+
+static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
+{
+       if (!atomic_dec_and_test(&share->refcount))
+               return;
+       __ksmbd_share_config_put(share);
+}
+
+struct ksmbd_share_config *ksmbd_share_config_get(char *name);
+bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+                              const char *filename);
+void ksmbd_share_configs_cleanup(void);
+
+#endif /* __SHARE_CONFIG_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
new file mode 100644 (file)
index 0000000..0d28e72
--- /dev/null
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "../transport_ipc.h"
+#include "../connection.h"
+
+#include "tree_connect.h"
+#include "user_config.h"
+#include "share_config.h"
+#include "user_session.h"
+
+struct ksmbd_tree_conn_status
+ksmbd_tree_conn_connect(struct ksmbd_session *sess, char *share_name)
+{
+       struct ksmbd_tree_conn_status status = {-EINVAL, NULL};
+       struct ksmbd_tree_connect_response *resp = NULL;
+       struct ksmbd_share_config *sc;
+       struct ksmbd_tree_connect *tree_conn = NULL;
+       struct sockaddr *peer_addr;
+       int ret;
+
+       sc = ksmbd_share_config_get(share_name);
+       if (!sc)
+               return status;
+
+       tree_conn = kzalloc(sizeof(struct ksmbd_tree_connect), GFP_KERNEL);
+       if (!tree_conn) {
+               status.ret = -ENOMEM;
+               goto out_error;
+       }
+
+       tree_conn->id = ksmbd_acquire_tree_conn_id(sess);
+       if (tree_conn->id < 0) {
+               status.ret = -EINVAL;
+               goto out_error;
+       }
+
+       peer_addr = KSMBD_TCP_PEER_SOCKADDR(sess->conn);
+       resp = ksmbd_ipc_tree_connect_request(sess,
+                                             sc,
+                                             tree_conn,
+                                             peer_addr);
+       if (!resp) {
+               status.ret = -EINVAL;
+               goto out_error;
+       }
+
+       status.ret = resp->status;
+       if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
+               goto out_error;
+
+       tree_conn->flags = resp->connection_flags;
+       tree_conn->user = sess->user;
+       tree_conn->share_conf = sc;
+       status.tree_conn = tree_conn;
+
+       ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
+                             GFP_KERNEL));
+       if (ret) {
+               status.ret = -ENOMEM;
+               goto out_error;
+       }
+       kvfree(resp);
+       return status;
+
+out_error:
+       if (tree_conn)
+               ksmbd_release_tree_conn_id(sess, tree_conn->id);
+       ksmbd_share_config_put(sc);
+       kfree(tree_conn);
+       kvfree(resp);
+       return status;
+}
+
+int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+                              struct ksmbd_tree_connect *tree_conn)
+{
+       int ret;
+
+       ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
+       ksmbd_release_tree_conn_id(sess, tree_conn->id);
+       xa_erase(&sess->tree_conns, tree_conn->id);
+       ksmbd_share_config_put(tree_conn->share_conf);
+       kfree(tree_conn);
+       return ret;
+}
+
+struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+                                                 unsigned int id)
+{
+       return xa_load(&sess->tree_conns, id);
+}
+
+struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+                                                unsigned int id)
+{
+       struct ksmbd_tree_connect *tc;
+
+       tc = ksmbd_tree_conn_lookup(sess, id);
+       if (tc)
+               return tc->share_conf;
+       return NULL;
+}
+
+int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+{
+       int ret = 0;
+       struct ksmbd_tree_connect *tc;
+       unsigned long id;
+
+       xa_for_each(&sess->tree_conns, id, tc)
+               ret |= ksmbd_tree_conn_disconnect(sess, tc);
+       xa_destroy(&sess->tree_conns);
+       return ret;
+}
diff --git a/fs/ksmbd/mgmt/tree_connect.h b/fs/ksmbd/mgmt/tree_connect.h
new file mode 100644 (file)
index 0000000..18e2a99
--- /dev/null
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __TREE_CONNECT_MANAGEMENT_H__
+#define __TREE_CONNECT_MANAGEMENT_H__
+
+#include <linux/hashtable.h>
+
+#include "../ksmbd_netlink.h"
+
+struct ksmbd_share_config;
+struct ksmbd_user;
+
+struct ksmbd_tree_connect {
+       int                             id;
+
+       unsigned int                    flags;
+       struct ksmbd_share_config       *share_conf;
+       struct ksmbd_user               *user;
+
+       struct list_head                list;
+
+       int                             maximal_access;
+       bool                            posix_extensions;
+};
+
+struct ksmbd_tree_conn_status {
+       unsigned int                    ret;
+       struct ksmbd_tree_connect       *tree_conn;
+};
+
+static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
+                                     int flag)
+{
+       return tree_conn->flags & flag;
+}
+
+struct ksmbd_session;
+
+struct ksmbd_tree_conn_status
+ksmbd_tree_conn_connect(struct ksmbd_session *sess, char *share_name);
+
+int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+                              struct ksmbd_tree_connect *tree_conn);
+
+struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+                                                 unsigned int id);
+
+struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+                                                unsigned int id);
+
+int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess);
+
+#endif /* __TREE_CONNECT_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/mgmt/user_config.c b/fs/ksmbd/mgmt/user_config.c
new file mode 100644 (file)
index 0000000..d21629a
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include "user_config.h"
+#include "../transport_ipc.h"
+
+struct ksmbd_user *ksmbd_login_user(const char *account)
+{
+       struct ksmbd_login_response *resp;
+       struct ksmbd_user *user = NULL;
+
+       resp = ksmbd_ipc_login_request(account);
+       if (!resp)
+               return NULL;
+
+       if (!(resp->status & KSMBD_USER_FLAG_OK))
+               goto out;
+
+       user = ksmbd_alloc_user(resp);
+out:
+       kvfree(resp);
+       return user;
+}
+
+struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp)
+{
+       struct ksmbd_user *user = NULL;
+
+       user = kmalloc(sizeof(struct ksmbd_user), GFP_KERNEL);
+       if (!user)
+               return NULL;
+
+       user->name = kstrdup(resp->account, GFP_KERNEL);
+       user->flags = resp->status;
+       user->gid = resp->gid;
+       user->uid = resp->uid;
+       user->passkey_sz = resp->hash_sz;
+       user->passkey = kmalloc(resp->hash_sz, GFP_KERNEL);
+       if (user->passkey)
+               memcpy(user->passkey, resp->hash, resp->hash_sz);
+
+       if (!user->name || !user->passkey) {
+               kfree(user->name);
+               kfree(user->passkey);
+               kfree(user);
+               user = NULL;
+       }
+       return user;
+}
+
+void ksmbd_free_user(struct ksmbd_user *user)
+{
+       ksmbd_ipc_logout_request(user->name);
+       kfree(user->name);
+       kfree(user->passkey);
+       kfree(user);
+}
+
+int ksmbd_anonymous_user(struct ksmbd_user *user)
+{
+       if (user->name[0] == '\0')
+               return 1;
+       return 0;
+}
diff --git a/fs/ksmbd/mgmt/user_config.h b/fs/ksmbd/mgmt/user_config.h
new file mode 100644 (file)
index 0000000..b2bb074
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __USER_CONFIG_MANAGEMENT_H__
+#define __USER_CONFIG_MANAGEMENT_H__
+
+#include "../glob.h"
+
+struct ksmbd_user {
+       unsigned short          flags;
+
+       unsigned int            uid;
+       unsigned int            gid;
+
+       char                    *name;
+
+       size_t                  passkey_sz;
+       char                    *passkey;
+};
+
+static inline bool user_guest(struct ksmbd_user *user)
+{
+       return user->flags & KSMBD_USER_FLAG_GUEST_ACCOUNT;
+}
+
+static inline void set_user_flag(struct ksmbd_user *user, int flag)
+{
+       user->flags |= flag;
+}
+
+static inline int test_user_flag(struct ksmbd_user *user, int flag)
+{
+       return user->flags & flag;
+}
+
+static inline void set_user_guest(struct ksmbd_user *user)
+{
+}
+
+static inline char *user_passkey(struct ksmbd_user *user)
+{
+       return user->passkey;
+}
+
+static inline char *user_name(struct ksmbd_user *user)
+{
+       return user->name;
+}
+
+static inline unsigned int user_uid(struct ksmbd_user *user)
+{
+       return user->uid;
+}
+
+static inline unsigned int user_gid(struct ksmbd_user *user)
+{
+       return user->gid;
+}
+
+struct ksmbd_user *ksmbd_login_user(const char *account);
+struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp);
+void ksmbd_free_user(struct ksmbd_user *user);
+int ksmbd_anonymous_user(struct ksmbd_user *user);
+#endif /* __USER_CONFIG_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
new file mode 100644 (file)
index 0000000..8d8ffd8
--- /dev/null
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/xarray.h>
+
+#include "ksmbd_ida.h"
+#include "user_session.h"
+#include "user_config.h"
+#include "tree_connect.h"
+#include "../transport_ipc.h"
+#include "../connection.h"
+#include "../vfs_cache.h"
+
+static DEFINE_IDA(session_ida);
+
+#define SESSION_HASH_BITS              3
+static DEFINE_HASHTABLE(sessions_table, SESSION_HASH_BITS);
+static DECLARE_RWSEM(sessions_table_lock);
+
+struct ksmbd_session_rpc {
+       int                     id;
+       unsigned int            method;
+       struct list_head        list;
+};
+
+static void free_channel_list(struct ksmbd_session *sess)
+{
+       struct channel *chann, *tmp;
+
+       list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+                                chann_list) {
+               list_del(&chann->chann_list);
+               kfree(chann);
+       }
+}
+
+static void __session_rpc_close(struct ksmbd_session *sess,
+                               struct ksmbd_session_rpc *entry)
+{
+       struct ksmbd_rpc_command *resp;
+
+       resp = ksmbd_rpc_close(sess, entry->id);
+       if (!resp)
+               pr_err("Unable to close RPC pipe %d\n", entry->id);
+
+       kvfree(resp);
+       ksmbd_rpc_id_free(entry->id);
+       kfree(entry);
+}
+
+static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess)
+{
+       struct ksmbd_session_rpc *entry;
+
+       while (!list_empty(&sess->rpc_handle_list)) {
+               entry = list_entry(sess->rpc_handle_list.next,
+                                  struct ksmbd_session_rpc,
+                                  list);
+
+               list_del(&entry->list);
+               __session_rpc_close(sess, entry);
+       }
+}
+
+static int __rpc_method(char *rpc_name)
+{
+       if (!strcmp(rpc_name, "\\srvsvc") || !strcmp(rpc_name, "srvsvc"))
+               return KSMBD_RPC_SRVSVC_METHOD_INVOKE;
+
+       if (!strcmp(rpc_name, "\\wkssvc") || !strcmp(rpc_name, "wkssvc"))
+               return KSMBD_RPC_WKSSVC_METHOD_INVOKE;
+
+       if (!strcmp(rpc_name, "LANMAN") || !strcmp(rpc_name, "lanman"))
+               return KSMBD_RPC_RAP_METHOD;
+
+       if (!strcmp(rpc_name, "\\samr") || !strcmp(rpc_name, "samr"))
+               return KSMBD_RPC_SAMR_METHOD_INVOKE;
+
+       if (!strcmp(rpc_name, "\\lsarpc") || !strcmp(rpc_name, "lsarpc"))
+               return KSMBD_RPC_LSARPC_METHOD_INVOKE;
+
+       pr_err("Unsupported RPC: %s\n", rpc_name);
+       return 0;
+}
+
+int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+{
+       struct ksmbd_session_rpc *entry;
+       struct ksmbd_rpc_command *resp;
+       int method;
+
+       method = __rpc_method(rpc_name);
+       if (!method)
+               return -EINVAL;
+
+       entry = kzalloc(sizeof(struct ksmbd_session_rpc), GFP_KERNEL);
+       if (!entry)
+               return -EINVAL;
+
+       list_add(&entry->list, &sess->rpc_handle_list);
+       entry->method = method;
+       entry->id = ksmbd_ipc_id_alloc();
+       if (entry->id < 0)
+               goto error;
+
+       resp = ksmbd_rpc_open(sess, entry->id);
+       if (!resp)
+               goto error;
+
+       kvfree(resp);
+       return entry->id;
+error:
+       list_del(&entry->list);
+       kfree(entry);
+       return -EINVAL;
+}
+
+void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
+{
+       struct ksmbd_session_rpc *entry;
+
+       list_for_each_entry(entry, &sess->rpc_handle_list, list) {
+               if (entry->id == id) {
+                       list_del(&entry->list);
+                       __session_rpc_close(sess, entry);
+                       break;
+               }
+       }
+}
+
+int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+{
+       struct ksmbd_session_rpc *entry;
+
+       list_for_each_entry(entry, &sess->rpc_handle_list, list) {
+               if (entry->id == id)
+                       return entry->method;
+       }
+       return 0;
+}
+
+void ksmbd_session_destroy(struct ksmbd_session *sess)
+{
+       if (!sess)
+               return;
+
+       if (!atomic_dec_and_test(&sess->refcnt))
+               return;
+
+       list_del(&sess->sessions_entry);
+
+       down_write(&sessions_table_lock);
+       hash_del(&sess->hlist);
+       up_write(&sessions_table_lock);
+
+       if (sess->user)
+               ksmbd_free_user(sess->user);
+
+       ksmbd_tree_conn_session_logoff(sess);
+       ksmbd_destroy_file_table(&sess->file_table);
+       ksmbd_session_rpc_clear_list(sess);
+       free_channel_list(sess);
+       kfree(sess->Preauth_HashValue);
+       ksmbd_release_id(&session_ida, sess->id);
+       kfree(sess);
+}
+
+static struct ksmbd_session *__session_lookup(unsigned long long id)
+{
+       struct ksmbd_session *sess;
+
+       hash_for_each_possible(sessions_table, sess, hlist, id) {
+               if (id == sess->id)
+                       return sess;
+       }
+       return NULL;
+}
+
+void ksmbd_session_register(struct ksmbd_conn *conn,
+                           struct ksmbd_session *sess)
+{
+       sess->conn = conn;
+       list_add(&sess->sessions_entry, &conn->sessions);
+}
+
+void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+{
+       struct ksmbd_session *sess;
+
+       while (!list_empty(&conn->sessions)) {
+               sess = list_entry(conn->sessions.next,
+                                 struct ksmbd_session,
+                                 sessions_entry);
+
+               ksmbd_session_destroy(sess);
+       }
+}
+
+static bool ksmbd_session_id_match(struct ksmbd_session *sess,
+                                  unsigned long long id)
+{
+       return sess->id == id;
+}
+
+struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+                                          unsigned long long id)
+{
+       struct ksmbd_session *sess = NULL;
+
+       list_for_each_entry(sess, &conn->sessions, sessions_entry) {
+               if (ksmbd_session_id_match(sess, id))
+                       return sess;
+       }
+       return NULL;
+}
+
+int get_session(struct ksmbd_session *sess)
+{
+       return atomic_inc_not_zero(&sess->refcnt);
+}
+
+void put_session(struct ksmbd_session *sess)
+{
+       if (atomic_dec_and_test(&sess->refcnt))
+               pr_err("get/%s seems to be mismatched.", __func__);
+}
+
+struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+{
+       struct ksmbd_session *sess;
+
+       down_read(&sessions_table_lock);
+       sess = __session_lookup(id);
+       if (sess) {
+               if (!get_session(sess))
+                       sess = NULL;
+       }
+       up_read(&sessions_table_lock);
+
+       return sess;
+}
+
+struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
+                                              unsigned long long id)
+{
+       struct ksmbd_session *sess;
+
+       sess = ksmbd_session_lookup(conn, id);
+       if (!sess && conn->binding)
+               sess = ksmbd_session_lookup_slowpath(id);
+       return sess;
+}
+
+struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+                                                   u64 sess_id)
+{
+       struct preauth_session *sess;
+
+       sess = kmalloc(sizeof(struct preauth_session), GFP_KERNEL);
+       if (!sess)
+               return NULL;
+
+       sess->id = sess_id;
+       memcpy(sess->Preauth_HashValue, conn->preauth_info->Preauth_HashValue,
+              PREAUTH_HASHVALUE_SIZE);
+       list_add(&sess->preauth_entry, &conn->preauth_sess_table);
+
+       return sess;
+}
+
+static bool ksmbd_preauth_session_id_match(struct preauth_session *sess,
+                                          unsigned long long id)
+{
+       return sess->id == id;
+}
+
+struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
+                                                    unsigned long long id)
+{
+       struct preauth_session *sess = NULL;
+
+       list_for_each_entry(sess, &conn->preauth_sess_table, preauth_entry) {
+               if (ksmbd_preauth_session_id_match(sess, id))
+                       return sess;
+       }
+       return NULL;
+}
+
+static int __init_smb2_session(struct ksmbd_session *sess)
+{
+       int id = ksmbd_acquire_smb2_uid(&session_ida);
+
+       if (id < 0)
+               return -EINVAL;
+       sess->id = id;
+       return 0;
+}
+
+static struct ksmbd_session *__session_create(int protocol)
+{
+       struct ksmbd_session *sess;
+       int ret;
+
+       sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
+       if (!sess)
+               return NULL;
+
+       if (ksmbd_init_file_table(&sess->file_table))
+               goto error;
+
+       set_session_flag(sess, protocol);
+       INIT_LIST_HEAD(&sess->sessions_entry);
+       xa_init(&sess->tree_conns);
+       INIT_LIST_HEAD(&sess->ksmbd_chann_list);
+       INIT_LIST_HEAD(&sess->rpc_handle_list);
+       sess->sequence_number = 1;
+       atomic_set(&sess->refcnt, 1);
+
+       switch (protocol) {
+       case CIFDS_SESSION_FLAG_SMB2:
+               ret = __init_smb2_session(sess);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       if (ret)
+               goto error;
+
+       ida_init(&sess->tree_conn_ida);
+
+       if (protocol == CIFDS_SESSION_FLAG_SMB2) {
+               down_write(&sessions_table_lock);
+               hash_add(sessions_table, &sess->hlist, sess->id);
+               up_write(&sessions_table_lock);
+       }
+       return sess;
+
+error:
+       ksmbd_session_destroy(sess);
+       return NULL;
+}
+
+struct ksmbd_session *ksmbd_smb2_session_create(void)
+{
+       return __session_create(CIFDS_SESSION_FLAG_SMB2);
+}
+
+int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess)
+{
+       int id = -EINVAL;
+
+       if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
+               id = ksmbd_acquire_smb2_tid(&sess->tree_conn_ida);
+
+       return id;
+}
+
+void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id)
+{
+       if (id >= 0)
+               ksmbd_release_id(&sess->tree_conn_ida, id);
+}
diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
new file mode 100644 (file)
index 0000000..82289c3
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __USER_SESSION_MANAGEMENT_H__
+#define __USER_SESSION_MANAGEMENT_H__
+
+#include <linux/hashtable.h>
+#include <linux/xarray.h>
+
+#include "../smb_common.h"
+#include "../ntlmssp.h"
+
+#define CIFDS_SESSION_FLAG_SMB2                BIT(1)
+
+#define PREAUTH_HASHVALUE_SIZE         64
+
+struct ksmbd_file_table;
+
+struct channel {
+       __u8                    smb3signingkey[SMB3_SIGN_KEY_SIZE];
+       struct ksmbd_conn       *conn;
+       struct list_head        chann_list;
+};
+
+struct preauth_session {
+       __u8                    Preauth_HashValue[PREAUTH_HASHVALUE_SIZE];
+       u64                     id;
+       struct list_head        preauth_entry;
+};
+
+struct ksmbd_session {
+       u64                             id;
+
+       struct ksmbd_user               *user;
+       struct ksmbd_conn               *conn;
+       unsigned int                    sequence_number;
+       unsigned int                    flags;
+
+       bool                            sign;
+       bool                            enc;
+       bool                            is_anonymous;
+
+       int                             state;
+       __u8                            *Preauth_HashValue;
+
+       struct ntlmssp_auth             ntlmssp;
+       char                            sess_key[CIFS_KEY_SIZE];
+
+       struct hlist_node               hlist;
+       struct list_head                ksmbd_chann_list;
+       struct xarray                   tree_conns;
+       struct ida                      tree_conn_ida;
+       struct list_head                rpc_handle_list;
+
+       __u8                            smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+       __u8                            smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+       __u8                            smb3signingkey[SMB3_SIGN_KEY_SIZE];
+
+       struct list_head                sessions_entry;
+       struct ksmbd_file_table         file_table;
+       atomic_t                        refcnt;
+};
+
+static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+{
+       return sess->flags & bit;
+}
+
+static inline void set_session_flag(struct ksmbd_session *sess, int bit)
+{
+       sess->flags |= bit;
+}
+
+static inline void clear_session_flag(struct ksmbd_session *sess, int bit)
+{
+       sess->flags &= ~bit;
+}
+
+struct ksmbd_session *ksmbd_smb2_session_create(void);
+
+void ksmbd_session_destroy(struct ksmbd_session *sess);
+
+struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id);
+struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+                                          unsigned long long id);
+void ksmbd_session_register(struct ksmbd_conn *conn,
+                           struct ksmbd_session *sess);
+void ksmbd_sessions_deregister(struct ksmbd_conn *conn);
+struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
+                                              unsigned long long id);
+struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+                                                   u64 sess_id);
+struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
+                                                    unsigned long long id);
+
+int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess);
+void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id);
+
+int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name);
+void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id);
+int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id);
+int get_session(struct ksmbd_session *sess);
+void put_session(struct ksmbd_session *sess);
+#endif /* __USER_SESSION_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c
new file mode 100644 (file)
index 0000000..0b307ca
--- /dev/null
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/xattr.h>
+#include <linux/fs.h>
+
+#include "misc.h"
+#include "smb_common.h"
+#include "connection.h"
+#include "vfs.h"
+
+#include "mgmt/share_config.h"
+
+/**
+ * match_pattern() - compare a string with a pattern which might include
+ * wildcard '*' and '?'
+ * TODO : implement consideration about DOS_DOT, DOS_QM and DOS_STAR
+ *
+ * @string:    string to compare with a pattern
+ * @len:       string length
+ * @pattern:   pattern string which might include wildcard '*' and '?'
+ *
+ * Return:     0 if pattern matched with the string, otherwise non zero value
+ */
+int match_pattern(const char *str, size_t len, const char *pattern)
+{
+       const char *s = str;
+       const char *p = pattern;
+       bool star = false;
+
+       while (*s && len) {
+               switch (*p) {
+               case '?':
+                       s++;
+                       len--;
+                       p++;
+                       break;
+               case '*':
+                       star = true;
+                       str = s;
+                       if (!*++p)
+                               return true;
+                       pattern = p;
+                       break;
+               default:
+                       if (tolower(*s) == tolower(*p)) {
+                               s++;
+                               len--;
+                               p++;
+                       } else {
+                               if (!star)
+                                       return false;
+                               str++;
+                               s = str;
+                               p = pattern;
+                       }
+                       break;
+               }
+       }
+
+       if (*p == '*')
+               ++p;
+       return !*p;
+}
+
+/*
+ * is_char_allowed() - check for valid character
+ * @ch:                input character to be checked
+ *
+ * Return:     1 if char is allowed, otherwise 0
+ */
+static inline int is_char_allowed(char ch)
+{
+       /* check for control chars, wildcards etc. */
+       if (!(ch & 0x80) &&
+           (ch <= 0x1f ||
+            ch == '?' || ch == '"' || ch == '<' ||
+            ch == '>' || ch == '|' || ch == '*'))
+               return 0;
+
+       return 1;
+}
+
+int ksmbd_validate_filename(char *filename)
+{
+       while (*filename) {
+               char c = *filename;
+
+               filename++;
+               if (!is_char_allowed(c)) {
+                       ksmbd_debug(VFS, "File name validation failed: 0x%x\n", c);
+                       return -ENOENT;
+               }
+       }
+
+       return 0;
+}
+
+static int ksmbd_validate_stream_name(char *stream_name)
+{
+       while (*stream_name) {
+               char c = *stream_name;
+
+               stream_name++;
+               if (c == '/' || c == ':' || c == '\\') {
+                       pr_err("Stream name validation failed: %c\n", c);
+                       return -ENOENT;
+               }
+       }
+
+       return 0;
+}
+
+int parse_stream_name(char *filename, char **stream_name, int *s_type)
+{
+       char *stream_type;
+       char *s_name;
+       int rc = 0;
+
+       s_name = filename;
+       filename = strsep(&s_name, ":");
+       ksmbd_debug(SMB, "filename : %s, streams : %s\n", filename, s_name);
+       if (strchr(s_name, ':')) {
+               stream_type = s_name;
+               s_name = strsep(&stream_type, ":");
+
+               rc = ksmbd_validate_stream_name(s_name);
+               if (rc < 0) {
+                       rc = -ENOENT;
+                       goto out;
+               }
+
+               ksmbd_debug(SMB, "stream name : %s, stream type : %s\n", s_name,
+                           stream_type);
+               if (!strncasecmp("$data", stream_type, 5))
+                       *s_type = DATA_STREAM;
+               else if (!strncasecmp("$index_allocation", stream_type, 17))
+                       *s_type = DIR_STREAM;
+               else
+                       rc = -ENOENT;
+       }
+
+       *stream_name = s_name;
+out:
+       return rc;
+}
+
+/**
+ * convert_to_nt_pathname() - extract and return windows path string
+ *      whose share directory prefix was removed from file path
+ * @filename : unix filename
+ * @sharepath: share path string
+ *
+ * Return : windows path string or error
+ */
+
+char *convert_to_nt_pathname(char *filename, char *sharepath)
+{
+       char *ab_pathname;
+       int len, name_len;
+
+       name_len = strlen(filename);
+       ab_pathname = kmalloc(name_len, GFP_KERNEL);
+       if (!ab_pathname)
+               return NULL;
+
+       ab_pathname[0] = '\\';
+       ab_pathname[1] = '\0';
+
+       len = strlen(sharepath);
+       if (!strncmp(filename, sharepath, len) && name_len != len) {
+               strscpy(ab_pathname, &filename[len], name_len);
+               ksmbd_conv_path_to_windows(ab_pathname);
+       }
+
+       return ab_pathname;
+}
+
+int get_nlink(struct kstat *st)
+{
+       int nlink;
+
+       nlink = st->nlink;
+       if (S_ISDIR(st->mode))
+               nlink--;
+
+       return nlink;
+}
+
+void ksmbd_conv_path_to_unix(char *path)
+{
+       strreplace(path, '\\', '/');
+}
+
+void ksmbd_strip_last_slash(char *path)
+{
+       int len = strlen(path);
+
+       while (len && path[len - 1] == '/') {
+               path[len - 1] = '\0';
+               len--;
+       }
+}
+
+void ksmbd_conv_path_to_windows(char *path)
+{
+       strreplace(path, '/', '\\');
+}
+
+/**
+ * ksmbd_extract_sharename() - get share name from tree connect request
+ * @treename:  buffer containing tree name and share name
+ *
+ * Return:      share name on success, otherwise error
+ */
+char *ksmbd_extract_sharename(char *treename)
+{
+       char *name = treename;
+       char *dst;
+       char *pos = strrchr(name, '\\');
+
+       if (pos)
+               name = (pos + 1);
+
+       /* caller has to free the memory */
+       dst = kstrdup(name, GFP_KERNEL);
+       if (!dst)
+               return ERR_PTR(-ENOMEM);
+       return dst;
+}
+
+/**
+ * convert_to_unix_name() - convert windows name to unix format
+ * @path:      name to be converted
+ * @tid:       tree id of mathing share
+ *
+ * Return:     converted name on success, otherwise NULL
+ */
+char *convert_to_unix_name(struct ksmbd_share_config *share, char *name)
+{
+       int no_slash = 0, name_len, path_len;
+       char *new_name;
+
+       if (name[0] == '/')
+               name++;
+
+       path_len = share->path_sz;
+       name_len = strlen(name);
+       new_name = kmalloc(path_len + name_len + 2, GFP_KERNEL);
+       if (!new_name)
+               return new_name;
+
+       memcpy(new_name, share->path, path_len);
+       if (new_name[path_len - 1] != '/') {
+               new_name[path_len] = '/';
+               no_slash = 1;
+       }
+
+       memcpy(new_name + path_len + no_slash, name, name_len);
+       path_len += name_len + no_slash;
+       new_name[path_len] = 0x00;
+       return new_name;
+}
+
+char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info,
+                                 const struct nls_table *local_nls,
+                                 int *conv_len)
+{
+       char *conv;
+       int  sz = min(4 * d_info->name_len, PATH_MAX);
+
+       if (!sz)
+               return NULL;
+
+       conv = kmalloc(sz, GFP_KERNEL);
+       if (!conv)
+               return NULL;
+
+       /* XXX */
+       *conv_len = smbConvertToUTF16((__le16 *)conv, d_info->name,
+                                     d_info->name_len, local_nls, 0);
+       *conv_len *= 2;
+
+       /* We allocate buffer twice bigger than needed. */
+       conv[*conv_len] = 0x00;
+       conv[*conv_len + 1] = 0x00;
+       return conv;
+}
+
+/*
+ * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
+ * into Unix UTC (based 1970-01-01, in seconds).
+ */
+struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc)
+{
+       struct timespec64 ts;
+
+       /* Subtract the NTFS time offset, then convert to 1s intervals. */
+       s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+       u64 abs_t;
+
+       /*
+        * Unfortunately can not use normal 64 bit division on 32 bit arch, but
+        * the alternative, do_div, does not work with negative numbers so have
+        * to special case them
+        */
+       if (t < 0) {
+               abs_t = -t;
+               ts.tv_nsec = do_div(abs_t, 10000000) * 100;
+               ts.tv_nsec = -ts.tv_nsec;
+               ts.tv_sec = -abs_t;
+       } else {
+               abs_t = t;
+               ts.tv_nsec = do_div(abs_t, 10000000) * 100;
+               ts.tv_sec = abs_t;
+       }
+
+       return ts;
+}
+
+/* Convert the Unix UTC into NT UTC. */
+inline u64 ksmbd_UnixTimeToNT(struct timespec64 t)
+{
+       /* Convert to 100ns intervals and then add the NTFS time offset. */
+       return (u64)t.tv_sec * 10000000 + t.tv_nsec / 100 + NTFS_TIME_OFFSET;
+}
+
+inline long long ksmbd_systime(void)
+{
+       struct timespec64       ts;
+
+       ktime_get_real_ts64(&ts);
+       return ksmbd_UnixTimeToNT(ts);
+}
diff --git a/fs/ksmbd/misc.h b/fs/ksmbd/misc.h
new file mode 100644 (file)
index 0000000..af8717d
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_MISC_H__
+#define __KSMBD_MISC_H__
+
+struct ksmbd_share_config;
+struct nls_table;
+struct kstat;
+struct ksmbd_file;
+
+int match_pattern(const char *str, size_t len, const char *pattern);
+int ksmbd_validate_filename(char *filename);
+int parse_stream_name(char *filename, char **stream_name, int *s_type);
+char *convert_to_nt_pathname(char *filename, char *sharepath);
+int get_nlink(struct kstat *st);
+void ksmbd_conv_path_to_unix(char *path);
+void ksmbd_strip_last_slash(char *path);
+void ksmbd_conv_path_to_windows(char *path);
+char *ksmbd_extract_sharename(char *treename);
+char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
+
+#define KSMBD_DIR_INFO_ALIGNMENT       8
+struct ksmbd_dir_info;
+char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info,
+                                 const struct nls_table *local_nls,
+                                 int *conv_len);
+
+#define NTFS_TIME_OFFSET       ((u64)(369 * 365 + 89) * 24 * 3600 * 10000000)
+struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc);
+u64 ksmbd_UnixTimeToNT(struct timespec64 t);
+long long ksmbd_systime(void);
+#endif /* __KSMBD_MISC_H__ */
diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
new file mode 100644 (file)
index 0000000..2243a2c
--- /dev/null
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2021 Samsung Electronics Co., Ltd.
+ *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
+ */
+
+#include <linux/fs.h>
+
+#include "glob.h"
+#include "ndr.h"
+
+static inline char *ndr_get_field(struct ndr *n)
+{
+       return n->data + n->offset;
+}
+
+static int try_to_realloc_ndr_blob(struct ndr *n, size_t sz)
+{
+       char *data;
+
+       data = krealloc(n->data, n->offset + sz + 1024, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       n->data = data;
+       n->length += 1024;
+       memset(n->data + n->offset, 0, 1024);
+       return 0;
+}
+
+static void ndr_write_int16(struct ndr *n, __u16 value)
+{
+       if (n->length <= n->offset + sizeof(value))
+               try_to_realloc_ndr_blob(n, sizeof(value));
+
+       *(__le16 *)ndr_get_field(n) = cpu_to_le16(value);
+       n->offset += sizeof(value);
+}
+
+static void ndr_write_int32(struct ndr *n, __u32 value)
+{
+       if (n->length <= n->offset + sizeof(value))
+               try_to_realloc_ndr_blob(n, sizeof(value));
+
+       *(__le32 *)ndr_get_field(n) = cpu_to_le32(value);
+       n->offset += sizeof(value);
+}
+
+static void ndr_write_int64(struct ndr *n, __u64 value)
+{
+       if (n->length <= n->offset + sizeof(value))
+               try_to_realloc_ndr_blob(n, sizeof(value));
+
+       *(__le64 *)ndr_get_field(n) = cpu_to_le64(value);
+       n->offset += sizeof(value);
+}
+
+static int ndr_write_bytes(struct ndr *n, void *value, size_t sz)
+{
+       if (n->length <= n->offset + sz)
+               try_to_realloc_ndr_blob(n, sz);
+
+       memcpy(ndr_get_field(n), value, sz);
+       n->offset += sz;
+       return 0;
+}
+
+static int ndr_write_string(struct ndr *n, char *value)
+{
+       size_t sz;
+
+       sz = strlen(value) + 1;
+       if (n->length <= n->offset + sz)
+               try_to_realloc_ndr_blob(n, sz);
+
+       memcpy(ndr_get_field(n), value, sz);
+       n->offset += sz;
+       n->offset = ALIGN(n->offset, 2);
+       return 0;
+}
+
+static int ndr_read_string(struct ndr *n, void *value, size_t sz)
+{
+       int len = strnlen(ndr_get_field(n), sz);
+
+       memcpy(value, ndr_get_field(n), len);
+       len++;
+       n->offset += len;
+       n->offset = ALIGN(n->offset, 2);
+       return 0;
+}
+
+static int ndr_read_bytes(struct ndr *n, void *value, size_t sz)
+{
+       memcpy(value, ndr_get_field(n), sz);
+       n->offset += sz;
+       return 0;
+}
+
+static __u16 ndr_read_int16(struct ndr *n)
+{
+       __u16 ret;
+
+       ret = le16_to_cpu(*(__le16 *)ndr_get_field(n));
+       n->offset += sizeof(__u16);
+       return ret;
+}
+
+static __u32 ndr_read_int32(struct ndr *n)
+{
+       __u32 ret;
+
+       ret = le32_to_cpu(*(__le32 *)ndr_get_field(n));
+       n->offset += sizeof(__u32);
+       return ret;
+}
+
+static __u64 ndr_read_int64(struct ndr *n)
+{
+       __u64 ret;
+
+       ret = le64_to_cpu(*(__le64 *)ndr_get_field(n));
+       n->offset += sizeof(__u64);
+       return ret;
+}
+
+int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+{
+       char hex_attr[12] = {0};
+
+       n->offset = 0;
+       n->length = 1024;
+       n->data = kzalloc(n->length, GFP_KERNEL);
+       if (!n->data)
+               return -ENOMEM;
+
+       if (da->version == 3) {
+               snprintf(hex_attr, 10, "0x%x", da->attr);
+               ndr_write_string(n, hex_attr);
+       } else {
+               ndr_write_string(n, "");
+       }
+       ndr_write_int16(n, da->version);
+       ndr_write_int32(n, da->version);
+
+       ndr_write_int32(n, da->flags);
+       ndr_write_int32(n, da->attr);
+       if (da->version == 3) {
+               ndr_write_int32(n, da->ea_size);
+               ndr_write_int64(n, da->size);
+               ndr_write_int64(n, da->alloc_size);
+       } else {
+               ndr_write_int64(n, da->itime);
+       }
+       ndr_write_int64(n, da->create_time);
+       if (da->version == 3)
+               ndr_write_int64(n, da->change_time);
+       return 0;
+}
+
+int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+{
+       char *hex_attr;
+       int version2;
+
+       hex_attr = kzalloc(n->length, GFP_KERNEL);
+       if (!hex_attr)
+               return -ENOMEM;
+
+       n->offset = 0;
+       ndr_read_string(n, hex_attr, n->length);
+       kfree(hex_attr);
+       da->version = ndr_read_int16(n);
+
+       if (da->version != 3 && da->version != 4) {
+               pr_err("v%d version is not supported\n", da->version);
+               return -EINVAL;
+       }
+
+       version2 = ndr_read_int32(n);
+       if (da->version != version2) {
+               pr_err("ndr version mismatched(version: %d, version2: %d)\n",
+                      da->version, version2);
+               return -EINVAL;
+       }
+
+       ndr_read_int32(n);
+       da->attr = ndr_read_int32(n);
+       if (da->version == 4) {
+               da->itime = ndr_read_int64(n);
+               da->create_time = ndr_read_int64(n);
+       } else {
+               ndr_read_int32(n);
+               ndr_read_int64(n);
+               ndr_read_int64(n);
+               da->create_time = ndr_read_int64(n);
+               ndr_read_int64(n);
+       }
+
+       return 0;
+}
+
+static int ndr_encode_posix_acl_entry(struct ndr *n, struct xattr_smb_acl *acl)
+{
+       int i;
+
+       ndr_write_int32(n, acl->count);
+       n->offset = ALIGN(n->offset, 8);
+       ndr_write_int32(n, acl->count);
+       ndr_write_int32(n, 0);
+
+       for (i = 0; i < acl->count; i++) {
+               n->offset = ALIGN(n->offset, 8);
+               ndr_write_int16(n, acl->entries[i].type);
+               ndr_write_int16(n, acl->entries[i].type);
+
+               if (acl->entries[i].type == SMB_ACL_USER) {
+                       n->offset = ALIGN(n->offset, 8);
+                       ndr_write_int64(n, acl->entries[i].uid);
+               } else if (acl->entries[i].type == SMB_ACL_GROUP) {
+                       n->offset = ALIGN(n->offset, 8);
+                       ndr_write_int64(n, acl->entries[i].gid);
+               }
+
+               /* push permission */
+               ndr_write_int32(n, acl->entries[i].perm);
+       }
+
+       return 0;
+}
+
+int ndr_encode_posix_acl(struct ndr *n,
+                        struct user_namespace *user_ns,
+                        struct inode *inode,
+                        struct xattr_smb_acl *acl,
+                        struct xattr_smb_acl *def_acl)
+{
+       int ref_id = 0x00020000;
+
+       n->offset = 0;
+       n->length = 1024;
+       n->data = kzalloc(n->length, GFP_KERNEL);
+       if (!n->data)
+               return -ENOMEM;
+
+       if (acl) {
+               /* ACL ACCESS */
+               ndr_write_int32(n, ref_id);
+               ref_id += 4;
+       } else {
+               ndr_write_int32(n, 0);
+       }
+
+       if (def_acl) {
+               /* DEFAULT ACL ACCESS */
+               ndr_write_int32(n, ref_id);
+               ref_id += 4;
+       } else {
+               ndr_write_int32(n, 0);
+       }
+
+       ndr_write_int64(n, from_kuid(user_ns, inode->i_uid));
+       ndr_write_int64(n, from_kgid(user_ns, inode->i_gid));
+       ndr_write_int32(n, inode->i_mode);
+
+       if (acl) {
+               ndr_encode_posix_acl_entry(n, acl);
+               if (def_acl)
+                       ndr_encode_posix_acl_entry(n, def_acl);
+       }
+       return 0;
+}
+
+int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+{
+       int ref_id = 0x00020004;
+
+       n->offset = 0;
+       n->length = 2048;
+       n->data = kzalloc(n->length, GFP_KERNEL);
+       if (!n->data)
+               return -ENOMEM;
+
+       ndr_write_int16(n, acl->version);
+       ndr_write_int32(n, acl->version);
+       ndr_write_int16(n, 2);
+       ndr_write_int32(n, ref_id);
+
+       /* push hash type and hash 64bytes */
+       ndr_write_int16(n, acl->hash_type);
+       ndr_write_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
+       ndr_write_bytes(n, acl->desc, acl->desc_len);
+       ndr_write_int64(n, acl->current_time);
+       ndr_write_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+
+       /* push ndr for security descriptor */
+       ndr_write_bytes(n, acl->sd_buf, acl->sd_size);
+
+       return 0;
+}
+
+int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+{
+       int version2;
+
+       n->offset = 0;
+       acl->version = ndr_read_int16(n);
+       if (acl->version != 4) {
+               pr_err("v%d version is not supported\n", acl->version);
+               return -EINVAL;
+       }
+
+       version2 = ndr_read_int32(n);
+       if (acl->version != version2) {
+               pr_err("ndr version mismatched(version: %d, version2: %d)\n",
+                      acl->version, version2);
+               return -EINVAL;
+       }
+
+       /* Read Level */
+       ndr_read_int16(n);
+       /* Read Ref Id */
+       ndr_read_int32(n);
+       acl->hash_type = ndr_read_int16(n);
+       ndr_read_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
+
+       ndr_read_bytes(n, acl->desc, 10);
+       if (strncmp(acl->desc, "posix_acl", 9)) {
+               pr_err("Invalid acl description : %s\n", acl->desc);
+               return -EINVAL;
+       }
+
+       /* Read Time */
+       ndr_read_int64(n);
+       /* Read Posix ACL hash */
+       ndr_read_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+       acl->sd_size = n->length - n->offset;
+       acl->sd_buf = kzalloc(acl->sd_size, GFP_KERNEL);
+       if (!acl->sd_buf)
+               return -ENOMEM;
+
+       ndr_read_bytes(n, acl->sd_buf, acl->sd_size);
+
+       return 0;
+}
diff --git a/fs/ksmbd/ndr.h b/fs/ksmbd/ndr.h
new file mode 100644 (file)
index 0000000..60ca265
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
+ */
+
+struct ndr {
+       char    *data;
+       int     offset;
+       int     length;
+};
+
+#define NDR_NTSD_OFFSETOF      0xA0
+
+int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da);
+int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da);
+int ndr_encode_posix_acl(struct ndr *n, struct user_namespace *user_ns,
+                        struct inode *inode, struct xattr_smb_acl *acl,
+                        struct xattr_smb_acl *def_acl);
+int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl);
+int ndr_encode_v3_ntacl(struct ndr *n, struct xattr_ntacl *acl);
+int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl);
diff --git a/fs/ksmbd/nterr.h b/fs/ksmbd/nterr.h
new file mode 100644 (file)
index 0000000..2f358f8
--- /dev/null
@@ -0,0 +1,543 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Unix SMB/Netbios implementation.
+ * Version 1.9.
+ * NT error code constants
+ * Copyright (C) Andrew Tridgell              1992-2000
+ * Copyright (C) John H Terpstra              1996-2000
+ * Copyright (C) Luke Kenneth Casson Leighton 1996-2000
+ * Copyright (C) Paul Ashton                  1998-2000
+ */
+
+#ifndef _NTERR_H
+#define _NTERR_H
+
+/* Win32 Status codes. */
+#define NT_STATUS_MORE_ENTRIES         0x0105
+#define NT_ERROR_INVALID_PARAMETER     0x0057
+#define NT_ERROR_INSUFFICIENT_BUFFER   0x007a
+#define NT_STATUS_1804                 0x070c
+#define NT_STATUS_NOTIFY_ENUM_DIR      0x010c
+#define NT_STATUS_INVALID_LOCK_RANGE   (0xC0000000 | 0x01a1)
+/*
+ * Win32 Error codes extracted using a loop in smbclient then printing a netmon
+ * sniff to a file.
+ */
+
+#define NT_STATUS_OK                   0x0000
+#define NT_STATUS_SOME_UNMAPPED        0x0107
+#define NT_STATUS_BUFFER_OVERFLOW  0x80000005
+#define NT_STATUS_NO_MORE_ENTRIES  0x8000001a
+#define NT_STATUS_MEDIA_CHANGED    0x8000001c
+#define NT_STATUS_END_OF_MEDIA     0x8000001e
+#define NT_STATUS_MEDIA_CHECK      0x80000020
+#define NT_STATUS_NO_DATA_DETECTED 0x8000001c
+#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d
+#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288
+#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288
+#define NT_STATUS_UNSUCCESSFUL (0xC0000000 | 0x0001)
+#define NT_STATUS_NOT_IMPLEMENTED (0xC0000000 | 0x0002)
+#define NT_STATUS_INVALID_INFO_CLASS (0xC0000000 | 0x0003)
+#define NT_STATUS_INFO_LENGTH_MISMATCH (0xC0000000 | 0x0004)
+#define NT_STATUS_ACCESS_VIOLATION (0xC0000000 | 0x0005)
+#define NT_STATUS_IN_PAGE_ERROR (0xC0000000 | 0x0006)
+#define NT_STATUS_PAGEFILE_QUOTA (0xC0000000 | 0x0007)
+#define NT_STATUS_INVALID_HANDLE (0xC0000000 | 0x0008)
+#define NT_STATUS_BAD_INITIAL_STACK (0xC0000000 | 0x0009)
+#define NT_STATUS_BAD_INITIAL_PC (0xC0000000 | 0x000a)
+#define NT_STATUS_INVALID_CID (0xC0000000 | 0x000b)
+#define NT_STATUS_TIMER_NOT_CANCELED (0xC0000000 | 0x000c)
+#define NT_STATUS_INVALID_PARAMETER (0xC0000000 | 0x000d)
+#define NT_STATUS_NO_SUCH_DEVICE (0xC0000000 | 0x000e)
+#define NT_STATUS_NO_SUCH_FILE (0xC0000000 | 0x000f)
+#define NT_STATUS_INVALID_DEVICE_REQUEST (0xC0000000 | 0x0010)
+#define NT_STATUS_END_OF_FILE (0xC0000000 | 0x0011)
+#define NT_STATUS_WRONG_VOLUME (0xC0000000 | 0x0012)
+#define NT_STATUS_NO_MEDIA_IN_DEVICE (0xC0000000 | 0x0013)
+#define NT_STATUS_UNRECOGNIZED_MEDIA (0xC0000000 | 0x0014)
+#define NT_STATUS_NONEXISTENT_SECTOR (0xC0000000 | 0x0015)
+#define NT_STATUS_MORE_PROCESSING_REQUIRED (0xC0000000 | 0x0016)
+#define NT_STATUS_NO_MEMORY (0xC0000000 | 0x0017)
+#define NT_STATUS_CONFLICTING_ADDRESSES (0xC0000000 | 0x0018)
+#define NT_STATUS_NOT_MAPPED_VIEW (0xC0000000 | 0x0019)
+#define NT_STATUS_UNABLE_TO_FREE_VM (0x80000000 | 0x001a)
+#define NT_STATUS_UNABLE_TO_DELETE_SECTION (0xC0000000 | 0x001b)
+#define NT_STATUS_INVALID_SYSTEM_SERVICE (0xC0000000 | 0x001c)
+#define NT_STATUS_ILLEGAL_INSTRUCTION (0xC0000000 | 0x001d)
+#define NT_STATUS_INVALID_LOCK_SEQUENCE (0xC0000000 | 0x001e)
+#define NT_STATUS_INVALID_VIEW_SIZE (0xC0000000 | 0x001f)
+#define NT_STATUS_INVALID_FILE_FOR_SECTION (0xC0000000 | 0x0020)
+#define NT_STATUS_ALREADY_COMMITTED (0xC0000000 | 0x0021)
+#define NT_STATUS_ACCESS_DENIED (0xC0000000 | 0x0022)
+#define NT_STATUS_BUFFER_TOO_SMALL (0xC0000000 | 0x0023)
+#define NT_STATUS_OBJECT_TYPE_MISMATCH (0xC0000000 | 0x0024)
+#define NT_STATUS_NONCONTINUABLE_EXCEPTION (0xC0000000 | 0x0025)
+#define NT_STATUS_INVALID_DISPOSITION (0xC0000000 | 0x0026)
+#define NT_STATUS_UNWIND (0xC0000000 | 0x0027)
+#define NT_STATUS_BAD_STACK (0xC0000000 | 0x0028)
+#define NT_STATUS_INVALID_UNWIND_TARGET (0xC0000000 | 0x0029)
+#define NT_STATUS_NOT_LOCKED (0xC0000000 | 0x002a)
+#define NT_STATUS_PARITY_ERROR (0xC0000000 | 0x002b)
+#define NT_STATUS_UNABLE_TO_DECOMMIT_VM (0xC0000000 | 0x002c)
+#define NT_STATUS_NOT_COMMITTED (0xC0000000 | 0x002d)
+#define NT_STATUS_INVALID_PORT_ATTRIBUTES (0xC0000000 | 0x002e)
+#define NT_STATUS_PORT_MESSAGE_TOO_LONG (0xC0000000 | 0x002f)
+#define NT_STATUS_INVALID_PARAMETER_MIX (0xC0000000 | 0x0030)
+#define NT_STATUS_INVALID_QUOTA_LOWER (0xC0000000 | 0x0031)
+#define NT_STATUS_DISK_CORRUPT_ERROR (0xC0000000 | 0x0032)
+#define NT_STATUS_OBJECT_NAME_INVALID (0xC0000000 | 0x0033)
+#define NT_STATUS_OBJECT_NAME_NOT_FOUND (0xC0000000 | 0x0034)
+#define NT_STATUS_OBJECT_NAME_COLLISION (0xC0000000 | 0x0035)
+#define NT_STATUS_HANDLE_NOT_WAITABLE (0xC0000000 | 0x0036)
+#define NT_STATUS_PORT_DISCONNECTED (0xC0000000 | 0x0037)
+#define NT_STATUS_DEVICE_ALREADY_ATTACHED (0xC0000000 | 0x0038)
+#define NT_STATUS_OBJECT_PATH_INVALID (0xC0000000 | 0x0039)
+#define NT_STATUS_OBJECT_PATH_NOT_FOUND (0xC0000000 | 0x003a)
+#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD (0xC0000000 | 0x003b)
+#define NT_STATUS_DATA_OVERRUN (0xC0000000 | 0x003c)
+#define NT_STATUS_DATA_LATE_ERROR (0xC0000000 | 0x003d)
+#define NT_STATUS_DATA_ERROR (0xC0000000 | 0x003e)
+#define NT_STATUS_CRC_ERROR (0xC0000000 | 0x003f)
+#define NT_STATUS_SECTION_TOO_BIG (0xC0000000 | 0x0040)
+#define NT_STATUS_PORT_CONNECTION_REFUSED (0xC0000000 | 0x0041)
+#define NT_STATUS_INVALID_PORT_HANDLE (0xC0000000 | 0x0042)
+#define NT_STATUS_SHARING_VIOLATION (0xC0000000 | 0x0043)
+#define NT_STATUS_QUOTA_EXCEEDED (0xC0000000 | 0x0044)
+#define NT_STATUS_INVALID_PAGE_PROTECTION (0xC0000000 | 0x0045)
+#define NT_STATUS_MUTANT_NOT_OWNED (0xC0000000 | 0x0046)
+#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED (0xC0000000 | 0x0047)
+#define NT_STATUS_PORT_ALREADY_SET (0xC0000000 | 0x0048)
+#define NT_STATUS_SECTION_NOT_IMAGE (0xC0000000 | 0x0049)
+#define NT_STATUS_SUSPEND_COUNT_EXCEEDED (0xC0000000 | 0x004a)
+#define NT_STATUS_THREAD_IS_TERMINATING (0xC0000000 | 0x004b)
+#define NT_STATUS_BAD_WORKING_SET_LIMIT (0xC0000000 | 0x004c)
+#define NT_STATUS_INCOMPATIBLE_FILE_MAP (0xC0000000 | 0x004d)
+#define NT_STATUS_SECTION_PROTECTION (0xC0000000 | 0x004e)
+#define NT_STATUS_EAS_NOT_SUPPORTED (0xC0000000 | 0x004f)
+#define NT_STATUS_EA_TOO_LARGE (0xC0000000 | 0x0050)
+#define NT_STATUS_NONEXISTENT_EA_ENTRY (0xC0000000 | 0x0051)
+#define NT_STATUS_NO_EAS_ON_FILE (0xC0000000 | 0x0052)
+#define NT_STATUS_EA_CORRUPT_ERROR (0xC0000000 | 0x0053)
+#define NT_STATUS_FILE_LOCK_CONFLICT (0xC0000000 | 0x0054)
+#define NT_STATUS_LOCK_NOT_GRANTED (0xC0000000 | 0x0055)
+#define NT_STATUS_DELETE_PENDING (0xC0000000 | 0x0056)
+#define NT_STATUS_CTL_FILE_NOT_SUPPORTED (0xC0000000 | 0x0057)
+#define NT_STATUS_UNKNOWN_REVISION (0xC0000000 | 0x0058)
+#define NT_STATUS_REVISION_MISMATCH (0xC0000000 | 0x0059)
+#define NT_STATUS_INVALID_OWNER (0xC0000000 | 0x005a)
+#define NT_STATUS_INVALID_PRIMARY_GROUP (0xC0000000 | 0x005b)
+#define NT_STATUS_NO_IMPERSONATION_TOKEN (0xC0000000 | 0x005c)
+#define NT_STATUS_CANT_DISABLE_MANDATORY (0xC0000000 | 0x005d)
+#define NT_STATUS_NO_LOGON_SERVERS (0xC0000000 | 0x005e)
+#define NT_STATUS_NO_SUCH_LOGON_SESSION (0xC0000000 | 0x005f)
+#define NT_STATUS_NO_SUCH_PRIVILEGE (0xC0000000 | 0x0060)
+#define NT_STATUS_PRIVILEGE_NOT_HELD (0xC0000000 | 0x0061)
+#define NT_STATUS_INVALID_ACCOUNT_NAME (0xC0000000 | 0x0062)
+#define NT_STATUS_USER_EXISTS (0xC0000000 | 0x0063)
+#define NT_STATUS_NO_SUCH_USER (0xC0000000 | 0x0064)
+#define NT_STATUS_GROUP_EXISTS (0xC0000000 | 0x0065)
+#define NT_STATUS_NO_SUCH_GROUP (0xC0000000 | 0x0066)
+#define NT_STATUS_MEMBER_IN_GROUP (0xC0000000 | 0x0067)
+#define NT_STATUS_MEMBER_NOT_IN_GROUP (0xC0000000 | 0x0068)
+#define NT_STATUS_LAST_ADMIN (0xC0000000 | 0x0069)
+#define NT_STATUS_WRONG_PASSWORD (0xC0000000 | 0x006a)
+#define NT_STATUS_ILL_FORMED_PASSWORD (0xC0000000 | 0x006b)
+#define NT_STATUS_PASSWORD_RESTRICTION (0xC0000000 | 0x006c)
+#define NT_STATUS_LOGON_FAILURE (0xC0000000 | 0x006d)
+#define NT_STATUS_ACCOUNT_RESTRICTION (0xC0000000 | 0x006e)
+#define NT_STATUS_INVALID_LOGON_HOURS (0xC0000000 | 0x006f)
+#define NT_STATUS_INVALID_WORKSTATION (0xC0000000 | 0x0070)
+#define NT_STATUS_PASSWORD_EXPIRED (0xC0000000 | 0x0071)
+#define NT_STATUS_ACCOUNT_DISABLED (0xC0000000 | 0x0072)
+#define NT_STATUS_NONE_MAPPED (0xC0000000 | 0x0073)
+#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED (0xC0000000 | 0x0074)
+#define NT_STATUS_LUIDS_EXHAUSTED (0xC0000000 | 0x0075)
+#define NT_STATUS_INVALID_SUB_AUTHORITY (0xC0000000 | 0x0076)
+#define NT_STATUS_INVALID_ACL (0xC0000000 | 0x0077)
+#define NT_STATUS_INVALID_SID (0xC0000000 | 0x0078)
+#define NT_STATUS_INVALID_SECURITY_DESCR (0xC0000000 | 0x0079)
+#define NT_STATUS_PROCEDURE_NOT_FOUND (0xC0000000 | 0x007a)
+#define NT_STATUS_INVALID_IMAGE_FORMAT (0xC0000000 | 0x007b)
+#define NT_STATUS_NO_TOKEN (0xC0000000 | 0x007c)
+#define NT_STATUS_BAD_INHERITANCE_ACL (0xC0000000 | 0x007d)
+#define NT_STATUS_RANGE_NOT_LOCKED (0xC0000000 | 0x007e)
+#define NT_STATUS_DISK_FULL (0xC0000000 | 0x007f)
+#define NT_STATUS_SERVER_DISABLED (0xC0000000 | 0x0080)
+#define NT_STATUS_SERVER_NOT_DISABLED (0xC0000000 | 0x0081)
+#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED (0xC0000000 | 0x0082)
+#define NT_STATUS_GUIDS_EXHAUSTED (0xC0000000 | 0x0083)
+#define NT_STATUS_INVALID_ID_AUTHORITY (0xC0000000 | 0x0084)
+#define NT_STATUS_AGENTS_EXHAUSTED (0xC0000000 | 0x0085)
+#define NT_STATUS_INVALID_VOLUME_LABEL (0xC0000000 | 0x0086)
+#define NT_STATUS_SECTION_NOT_EXTENDED (0xC0000000 | 0x0087)
+#define NT_STATUS_NOT_MAPPED_DATA (0xC0000000 | 0x0088)
+#define NT_STATUS_RESOURCE_DATA_NOT_FOUND (0xC0000000 | 0x0089)
+#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND (0xC0000000 | 0x008a)
+#define NT_STATUS_RESOURCE_NAME_NOT_FOUND (0xC0000000 | 0x008b)
+#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED (0xC0000000 | 0x008c)
+#define NT_STATUS_FLOAT_DENORMAL_OPERAND (0xC0000000 | 0x008d)
+#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO (0xC0000000 | 0x008e)
+#define NT_STATUS_FLOAT_INEXACT_RESULT (0xC0000000 | 0x008f)
+#define NT_STATUS_FLOAT_INVALID_OPERATION (0xC0000000 | 0x0090)
+#define NT_STATUS_FLOAT_OVERFLOW (0xC0000000 | 0x0091)
+#define NT_STATUS_FLOAT_STACK_CHECK (0xC0000000 | 0x0092)
+#define NT_STATUS_FLOAT_UNDERFLOW (0xC0000000 | 0x0093)
+#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO (0xC0000000 | 0x0094)
+#define NT_STATUS_INTEGER_OVERFLOW (0xC0000000 | 0x0095)
+#define NT_STATUS_PRIVILEGED_INSTRUCTION (0xC0000000 | 0x0096)
+#define NT_STATUS_TOO_MANY_PAGING_FILES (0xC0000000 | 0x0097)
+#define NT_STATUS_FILE_INVALID (0xC0000000 | 0x0098)
+#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED (0xC0000000 | 0x0099)
+#define NT_STATUS_INSUFFICIENT_RESOURCES (0xC0000000 | 0x009a)
+#define NT_STATUS_DFS_EXIT_PATH_FOUND (0xC0000000 | 0x009b)
+#define NT_STATUS_DEVICE_DATA_ERROR (0xC0000000 | 0x009c)
+#define NT_STATUS_DEVICE_NOT_CONNECTED (0xC0000000 | 0x009d)
+#define NT_STATUS_DEVICE_POWER_FAILURE (0xC0000000 | 0x009e)
+#define NT_STATUS_FREE_VM_NOT_AT_BASE (0xC0000000 | 0x009f)
+#define NT_STATUS_MEMORY_NOT_ALLOCATED (0xC0000000 | 0x00a0)
+#define NT_STATUS_WORKING_SET_QUOTA (0xC0000000 | 0x00a1)
+#define NT_STATUS_MEDIA_WRITE_PROTECTED (0xC0000000 | 0x00a2)
+#define NT_STATUS_DEVICE_NOT_READY (0xC0000000 | 0x00a3)
+#define NT_STATUS_INVALID_GROUP_ATTRIBUTES (0xC0000000 | 0x00a4)
+#define NT_STATUS_BAD_IMPERSONATION_LEVEL (0xC0000000 | 0x00a5)
+#define NT_STATUS_CANT_OPEN_ANONYMOUS (0xC0000000 | 0x00a6)
+#define NT_STATUS_BAD_VALIDATION_CLASS (0xC0000000 | 0x00a7)
+#define NT_STATUS_BAD_TOKEN_TYPE (0xC0000000 | 0x00a8)
+#define NT_STATUS_BAD_MASTER_BOOT_RECORD (0xC0000000 | 0x00a9)
+#define NT_STATUS_INSTRUCTION_MISALIGNMENT (0xC0000000 | 0x00aa)
+#define NT_STATUS_INSTANCE_NOT_AVAILABLE (0xC0000000 | 0x00ab)
+#define NT_STATUS_PIPE_NOT_AVAILABLE (0xC0000000 | 0x00ac)
+#define NT_STATUS_INVALID_PIPE_STATE (0xC0000000 | 0x00ad)
+#define NT_STATUS_PIPE_BUSY (0xC0000000 | 0x00ae)
+#define NT_STATUS_ILLEGAL_FUNCTION (0xC0000000 | 0x00af)
+#define NT_STATUS_PIPE_DISCONNECTED (0xC0000000 | 0x00b0)
+#define NT_STATUS_PIPE_CLOSING (0xC0000000 | 0x00b1)
+#define NT_STATUS_PIPE_CONNECTED (0xC0000000 | 0x00b2)
+#define NT_STATUS_PIPE_LISTENING (0xC0000000 | 0x00b3)
+#define NT_STATUS_INVALID_READ_MODE (0xC0000000 | 0x00b4)
+#define NT_STATUS_IO_TIMEOUT (0xC0000000 | 0x00b5)
+#define NT_STATUS_FILE_FORCED_CLOSED (0xC0000000 | 0x00b6)
+#define NT_STATUS_PROFILING_NOT_STARTED (0xC0000000 | 0x00b7)
+#define NT_STATUS_PROFILING_NOT_STOPPED (0xC0000000 | 0x00b8)
+#define NT_STATUS_COULD_NOT_INTERPRET (0xC0000000 | 0x00b9)
+#define NT_STATUS_FILE_IS_A_DIRECTORY (0xC0000000 | 0x00ba)
+#define NT_STATUS_NOT_SUPPORTED (0xC0000000 | 0x00bb)
+#define NT_STATUS_REMOTE_NOT_LISTENING (0xC0000000 | 0x00bc)
+#define NT_STATUS_DUPLICATE_NAME (0xC0000000 | 0x00bd)
+#define NT_STATUS_BAD_NETWORK_PATH (0xC0000000 | 0x00be)
+#define NT_STATUS_NETWORK_BUSY (0xC0000000 | 0x00bf)
+#define NT_STATUS_DEVICE_DOES_NOT_EXIST (0xC0000000 | 0x00c0)
+#define NT_STATUS_TOO_MANY_COMMANDS (0xC0000000 | 0x00c1)
+#define NT_STATUS_ADAPTER_HARDWARE_ERROR (0xC0000000 | 0x00c2)
+#define NT_STATUS_INVALID_NETWORK_RESPONSE (0xC0000000 | 0x00c3)
+#define NT_STATUS_UNEXPECTED_NETWORK_ERROR (0xC0000000 | 0x00c4)
+#define NT_STATUS_BAD_REMOTE_ADAPTER (0xC0000000 | 0x00c5)
+#define NT_STATUS_PRINT_QUEUE_FULL (0xC0000000 | 0x00c6)
+#define NT_STATUS_NO_SPOOL_SPACE (0xC0000000 | 0x00c7)
+#define NT_STATUS_PRINT_CANCELLED (0xC0000000 | 0x00c8)
+#define NT_STATUS_NETWORK_NAME_DELETED (0xC0000000 | 0x00c9)
+#define NT_STATUS_NETWORK_ACCESS_DENIED (0xC0000000 | 0x00ca)
+#define NT_STATUS_BAD_DEVICE_TYPE (0xC0000000 | 0x00cb)
+#define NT_STATUS_BAD_NETWORK_NAME (0xC0000000 | 0x00cc)
+#define NT_STATUS_TOO_MANY_NAMES (0xC0000000 | 0x00cd)
+#define NT_STATUS_TOO_MANY_SESSIONS (0xC0000000 | 0x00ce)
+#define NT_STATUS_SHARING_PAUSED (0xC0000000 | 0x00cf)
+#define NT_STATUS_REQUEST_NOT_ACCEPTED (0xC0000000 | 0x00d0)
+#define NT_STATUS_REDIRECTOR_PAUSED (0xC0000000 | 0x00d1)
+#define NT_STATUS_NET_WRITE_FAULT (0xC0000000 | 0x00d2)
+#define NT_STATUS_PROFILING_AT_LIMIT (0xC0000000 | 0x00d3)
+#define NT_STATUS_NOT_SAME_DEVICE (0xC0000000 | 0x00d4)
+#define NT_STATUS_FILE_RENAMED (0xC0000000 | 0x00d5)
+#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED (0xC0000000 | 0x00d6)
+#define NT_STATUS_NO_SECURITY_ON_OBJECT (0xC0000000 | 0x00d7)
+#define NT_STATUS_CANT_WAIT (0xC0000000 | 0x00d8)
+#define NT_STATUS_PIPE_EMPTY (0xC0000000 | 0x00d9)
+#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO (0xC0000000 | 0x00da)
+#define NT_STATUS_CANT_TERMINATE_SELF (0xC0000000 | 0x00db)
+#define NT_STATUS_INVALID_SERVER_STATE (0xC0000000 | 0x00dc)
+#define NT_STATUS_INVALID_DOMAIN_STATE (0xC0000000 | 0x00dd)
+#define NT_STATUS_INVALID_DOMAIN_ROLE (0xC0000000 | 0x00de)
+#define NT_STATUS_NO_SUCH_DOMAIN (0xC0000000 | 0x00df)
+#define NT_STATUS_DOMAIN_EXISTS (0xC0000000 | 0x00e0)
+#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED (0xC0000000 | 0x00e1)
+#define NT_STATUS_OPLOCK_NOT_GRANTED (0xC0000000 | 0x00e2)
+#define NT_STATUS_INVALID_OPLOCK_PROTOCOL (0xC0000000 | 0x00e3)
+#define NT_STATUS_INTERNAL_DB_CORRUPTION (0xC0000000 | 0x00e4)
+#define NT_STATUS_INTERNAL_ERROR (0xC0000000 | 0x00e5)
+#define NT_STATUS_GENERIC_NOT_MAPPED (0xC0000000 | 0x00e6)
+#define NT_STATUS_BAD_DESCRIPTOR_FORMAT (0xC0000000 | 0x00e7)
+#define NT_STATUS_INVALID_USER_BUFFER (0xC0000000 | 0x00e8)
+#define NT_STATUS_UNEXPECTED_IO_ERROR (0xC0000000 | 0x00e9)
+#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR (0xC0000000 | 0x00ea)
+#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR (0xC0000000 | 0x00eb)
+#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR (0xC0000000 | 0x00ec)
+#define NT_STATUS_NOT_LOGON_PROCESS (0xC0000000 | 0x00ed)
+#define NT_STATUS_LOGON_SESSION_EXISTS (0xC0000000 | 0x00ee)
+#define NT_STATUS_INVALID_PARAMETER_1 (0xC0000000 | 0x00ef)
+#define NT_STATUS_INVALID_PARAMETER_2 (0xC0000000 | 0x00f0)
+#define NT_STATUS_INVALID_PARAMETER_3 (0xC0000000 | 0x00f1)
+#define NT_STATUS_INVALID_PARAMETER_4 (0xC0000000 | 0x00f2)
+#define NT_STATUS_INVALID_PARAMETER_5 (0xC0000000 | 0x00f3)
+#define NT_STATUS_INVALID_PARAMETER_6 (0xC0000000 | 0x00f4)
+#define NT_STATUS_INVALID_PARAMETER_7 (0xC0000000 | 0x00f5)
+#define NT_STATUS_INVALID_PARAMETER_8 (0xC0000000 | 0x00f6)
+#define NT_STATUS_INVALID_PARAMETER_9 (0xC0000000 | 0x00f7)
+#define NT_STATUS_INVALID_PARAMETER_10 (0xC0000000 | 0x00f8)
+#define NT_STATUS_INVALID_PARAMETER_11 (0xC0000000 | 0x00f9)
+#define NT_STATUS_INVALID_PARAMETER_12 (0xC0000000 | 0x00fa)
+#define NT_STATUS_REDIRECTOR_NOT_STARTED (0xC0000000 | 0x00fb)
+#define NT_STATUS_REDIRECTOR_STARTED (0xC0000000 | 0x00fc)
+#define NT_STATUS_STACK_OVERFLOW (0xC0000000 | 0x00fd)
+#define NT_STATUS_NO_SUCH_PACKAGE (0xC0000000 | 0x00fe)
+#define NT_STATUS_BAD_FUNCTION_TABLE (0xC0000000 | 0x00ff)
+#define NT_STATUS_DIRECTORY_NOT_EMPTY (0xC0000000 | 0x0101)
+#define NT_STATUS_FILE_CORRUPT_ERROR (0xC0000000 | 0x0102)
+#define NT_STATUS_NOT_A_DIRECTORY (0xC0000000 | 0x0103)
+#define NT_STATUS_BAD_LOGON_SESSION_STATE (0xC0000000 | 0x0104)
+#define NT_STATUS_LOGON_SESSION_COLLISION (0xC0000000 | 0x0105)
+#define NT_STATUS_NAME_TOO_LONG (0xC0000000 | 0x0106)
+#define NT_STATUS_FILES_OPEN (0xC0000000 | 0x0107)
+#define NT_STATUS_CONNECTION_IN_USE (0xC0000000 | 0x0108)
+#define NT_STATUS_MESSAGE_NOT_FOUND (0xC0000000 | 0x0109)
+#define NT_STATUS_PROCESS_IS_TERMINATING (0xC0000000 | 0x010a)
+#define NT_STATUS_INVALID_LOGON_TYPE (0xC0000000 | 0x010b)
+#define NT_STATUS_NO_GUID_TRANSLATION (0xC0000000 | 0x010c)
+#define NT_STATUS_CANNOT_IMPERSONATE (0xC0000000 | 0x010d)
+#define NT_STATUS_IMAGE_ALREADY_LOADED (0xC0000000 | 0x010e)
+#define NT_STATUS_ABIOS_NOT_PRESENT (0xC0000000 | 0x010f)
+#define NT_STATUS_ABIOS_LID_NOT_EXIST (0xC0000000 | 0x0110)
+#define NT_STATUS_ABIOS_LID_ALREADY_OWNED (0xC0000000 | 0x0111)
+#define NT_STATUS_ABIOS_NOT_LID_OWNER (0xC0000000 | 0x0112)
+#define NT_STATUS_ABIOS_INVALID_COMMAND (0xC0000000 | 0x0113)
+#define NT_STATUS_ABIOS_INVALID_LID (0xC0000000 | 0x0114)
+#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE (0xC0000000 | 0x0115)
+#define NT_STATUS_ABIOS_INVALID_SELECTOR (0xC0000000 | 0x0116)
+#define NT_STATUS_NO_LDT (0xC0000000 | 0x0117)
+#define NT_STATUS_INVALID_LDT_SIZE (0xC0000000 | 0x0118)
+#define NT_STATUS_INVALID_LDT_OFFSET (0xC0000000 | 0x0119)
+#define NT_STATUS_INVALID_LDT_DESCRIPTOR (0xC0000000 | 0x011a)
+#define NT_STATUS_INVALID_IMAGE_NE_FORMAT (0xC0000000 | 0x011b)
+#define NT_STATUS_RXACT_INVALID_STATE (0xC0000000 | 0x011c)
+#define NT_STATUS_RXACT_COMMIT_FAILURE (0xC0000000 | 0x011d)
+#define NT_STATUS_MAPPED_FILE_SIZE_ZERO (0xC0000000 | 0x011e)
+#define NT_STATUS_TOO_MANY_OPENED_FILES (0xC0000000 | 0x011f)
+#define NT_STATUS_CANCELLED (0xC0000000 | 0x0120)
+#define NT_STATUS_CANNOT_DELETE (0xC0000000 | 0x0121)
+#define NT_STATUS_INVALID_COMPUTER_NAME (0xC0000000 | 0x0122)
+#define NT_STATUS_FILE_DELETED (0xC0000000 | 0x0123)
+#define NT_STATUS_SPECIAL_ACCOUNT (0xC0000000 | 0x0124)
+#define NT_STATUS_SPECIAL_GROUP (0xC0000000 | 0x0125)
+#define NT_STATUS_SPECIAL_USER (0xC0000000 | 0x0126)
+#define NT_STATUS_MEMBERS_PRIMARY_GROUP (0xC0000000 | 0x0127)
+#define NT_STATUS_FILE_CLOSED (0xC0000000 | 0x0128)
+#define NT_STATUS_TOO_MANY_THREADS (0xC0000000 | 0x0129)
+#define NT_STATUS_THREAD_NOT_IN_PROCESS (0xC0000000 | 0x012a)
+#define NT_STATUS_TOKEN_ALREADY_IN_USE (0xC0000000 | 0x012b)
+#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED (0xC0000000 | 0x012c)
+#define NT_STATUS_COMMITMENT_LIMIT (0xC0000000 | 0x012d)
+#define NT_STATUS_INVALID_IMAGE_LE_FORMAT (0xC0000000 | 0x012e)
+#define NT_STATUS_INVALID_IMAGE_NOT_MZ (0xC0000000 | 0x012f)
+#define NT_STATUS_INVALID_IMAGE_PROTECT (0xC0000000 | 0x0130)
+#define NT_STATUS_INVALID_IMAGE_WIN_16 (0xC0000000 | 0x0131)
+#define NT_STATUS_LOGON_SERVER_CONFLICT (0xC0000000 | 0x0132)
+#define NT_STATUS_TIME_DIFFERENCE_AT_DC (0xC0000000 | 0x0133)
+#define NT_STATUS_SYNCHRONIZATION_REQUIRED (0xC0000000 | 0x0134)
+#define NT_STATUS_DLL_NOT_FOUND (0xC0000000 | 0x0135)
+#define NT_STATUS_OPEN_FAILED (0xC0000000 | 0x0136)
+#define NT_STATUS_IO_PRIVILEGE_FAILED (0xC0000000 | 0x0137)
+#define NT_STATUS_ORDINAL_NOT_FOUND (0xC0000000 | 0x0138)
+#define NT_STATUS_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0139)
+#define NT_STATUS_CONTROL_C_EXIT (0xC0000000 | 0x013a)
+#define NT_STATUS_LOCAL_DISCONNECT (0xC0000000 | 0x013b)
+#define NT_STATUS_REMOTE_DISCONNECT (0xC0000000 | 0x013c)
+#define NT_STATUS_REMOTE_RESOURCES (0xC0000000 | 0x013d)
+#define NT_STATUS_LINK_FAILED (0xC0000000 | 0x013e)
+#define NT_STATUS_LINK_TIMEOUT (0xC0000000 | 0x013f)
+#define NT_STATUS_INVALID_CONNECTION (0xC0000000 | 0x0140)
+#define NT_STATUS_INVALID_ADDRESS (0xC0000000 | 0x0141)
+#define NT_STATUS_DLL_INIT_FAILED (0xC0000000 | 0x0142)
+#define NT_STATUS_MISSING_SYSTEMFILE (0xC0000000 | 0x0143)
+#define NT_STATUS_UNHANDLED_EXCEPTION (0xC0000000 | 0x0144)
+#define NT_STATUS_APP_INIT_FAILURE (0xC0000000 | 0x0145)
+#define NT_STATUS_PAGEFILE_CREATE_FAILED (0xC0000000 | 0x0146)
+#define NT_STATUS_NO_PAGEFILE (0xC0000000 | 0x0147)
+#define NT_STATUS_INVALID_LEVEL (0xC0000000 | 0x0148)
+#define NT_STATUS_WRONG_PASSWORD_CORE (0xC0000000 | 0x0149)
+#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT (0xC0000000 | 0x014a)
+#define NT_STATUS_PIPE_BROKEN (0xC0000000 | 0x014b)
+#define NT_STATUS_REGISTRY_CORRUPT (0xC0000000 | 0x014c)
+#define NT_STATUS_REGISTRY_IO_FAILED (0xC0000000 | 0x014d)
+#define NT_STATUS_NO_EVENT_PAIR (0xC0000000 | 0x014e)
+#define NT_STATUS_UNRECOGNIZED_VOLUME (0xC0000000 | 0x014f)
+#define NT_STATUS_SERIAL_NO_DEVICE_INITED (0xC0000000 | 0x0150)
+#define NT_STATUS_NO_SUCH_ALIAS (0xC0000000 | 0x0151)
+#define NT_STATUS_MEMBER_NOT_IN_ALIAS (0xC0000000 | 0x0152)
+#define NT_STATUS_MEMBER_IN_ALIAS (0xC0000000 | 0x0153)
+#define NT_STATUS_ALIAS_EXISTS (0xC0000000 | 0x0154)
+#define NT_STATUS_LOGON_NOT_GRANTED (0xC0000000 | 0x0155)
+#define NT_STATUS_TOO_MANY_SECRETS (0xC0000000 | 0x0156)
+#define NT_STATUS_SECRET_TOO_LONG (0xC0000000 | 0x0157)
+#define NT_STATUS_INTERNAL_DB_ERROR (0xC0000000 | 0x0158)
+#define NT_STATUS_FULLSCREEN_MODE (0xC0000000 | 0x0159)
+#define NT_STATUS_TOO_MANY_CONTEXT_IDS (0xC0000000 | 0x015a)
+#define NT_STATUS_LOGON_TYPE_NOT_GRANTED (0xC0000000 | 0x015b)
+#define NT_STATUS_NOT_REGISTRY_FILE (0xC0000000 | 0x015c)
+#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x015d)
+#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR (0xC0000000 | 0x015e)
+#define NT_STATUS_FT_MISSING_MEMBER (0xC0000000 | 0x015f)
+#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY (0xC0000000 | 0x0160)
+#define NT_STATUS_ILLEGAL_CHARACTER (0xC0000000 | 0x0161)
+#define NT_STATUS_UNMAPPABLE_CHARACTER (0xC0000000 | 0x0162)
+#define NT_STATUS_UNDEFINED_CHARACTER (0xC0000000 | 0x0163)
+#define NT_STATUS_FLOPPY_VOLUME (0xC0000000 | 0x0164)
+#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND (0xC0000000 | 0x0165)
+#define NT_STATUS_FLOPPY_WRONG_CYLINDER (0xC0000000 | 0x0166)
+#define NT_STATUS_FLOPPY_UNKNOWN_ERROR (0xC0000000 | 0x0167)
+#define NT_STATUS_FLOPPY_BAD_REGISTERS (0xC0000000 | 0x0168)
+#define NT_STATUS_DISK_RECALIBRATE_FAILED (0xC0000000 | 0x0169)
+#define NT_STATUS_DISK_OPERATION_FAILED (0xC0000000 | 0x016a)
+#define NT_STATUS_DISK_RESET_FAILED (0xC0000000 | 0x016b)
+#define NT_STATUS_SHARED_IRQ_BUSY (0xC0000000 | 0x016c)
+#define NT_STATUS_FT_ORPHANING (0xC0000000 | 0x016d)
+#define NT_STATUS_PARTITION_FAILURE (0xC0000000 | 0x0172)
+#define NT_STATUS_INVALID_BLOCK_LENGTH (0xC0000000 | 0x0173)
+#define NT_STATUS_DEVICE_NOT_PARTITIONED (0xC0000000 | 0x0174)
+#define NT_STATUS_UNABLE_TO_LOCK_MEDIA (0xC0000000 | 0x0175)
+#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA (0xC0000000 | 0x0176)
+#define NT_STATUS_EOM_OVERFLOW (0xC0000000 | 0x0177)
+#define NT_STATUS_NO_MEDIA (0xC0000000 | 0x0178)
+#define NT_STATUS_NO_SUCH_MEMBER (0xC0000000 | 0x017a)
+#define NT_STATUS_INVALID_MEMBER (0xC0000000 | 0x017b)
+#define NT_STATUS_KEY_DELETED (0xC0000000 | 0x017c)
+#define NT_STATUS_NO_LOG_SPACE (0xC0000000 | 0x017d)
+#define NT_STATUS_TOO_MANY_SIDS (0xC0000000 | 0x017e)
+#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x017f)
+#define NT_STATUS_KEY_HAS_CHILDREN (0xC0000000 | 0x0180)
+#define NT_STATUS_CHILD_MUST_BE_VOLATILE (0xC0000000 | 0x0181)
+#define NT_STATUS_DEVICE_CONFIGURATION_ERROR (0xC0000000 | 0x0182)
+#define NT_STATUS_DRIVER_INTERNAL_ERROR (0xC0000000 | 0x0183)
+#define NT_STATUS_INVALID_DEVICE_STATE (0xC0000000 | 0x0184)
+#define NT_STATUS_IO_DEVICE_ERROR (0xC0000000 | 0x0185)
+#define NT_STATUS_DEVICE_PROTOCOL_ERROR (0xC0000000 | 0x0186)
+#define NT_STATUS_BACKUP_CONTROLLER (0xC0000000 | 0x0187)
+#define NT_STATUS_LOG_FILE_FULL (0xC0000000 | 0x0188)
+#define NT_STATUS_TOO_LATE (0xC0000000 | 0x0189)
+#define NT_STATUS_NO_TRUST_LSA_SECRET (0xC0000000 | 0x018a)
+#define NT_STATUS_NO_TRUST_SAM_ACCOUNT (0xC0000000 | 0x018b)
+#define NT_STATUS_TRUSTED_DOMAIN_FAILURE (0xC0000000 | 0x018c)
+#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE (0xC0000000 | 0x018d)
+#define NT_STATUS_EVENTLOG_FILE_CORRUPT (0xC0000000 | 0x018e)
+#define NT_STATUS_EVENTLOG_CANT_START (0xC0000000 | 0x018f)
+#define NT_STATUS_TRUST_FAILURE (0xC0000000 | 0x0190)
+#define NT_STATUS_MUTANT_LIMIT_EXCEEDED (0xC0000000 | 0x0191)
+#define NT_STATUS_NETLOGON_NOT_STARTED (0xC0000000 | 0x0192)
+#define NT_STATUS_ACCOUNT_EXPIRED (0xC0000000 | 0x0193)
+#define NT_STATUS_POSSIBLE_DEADLOCK (0xC0000000 | 0x0194)
+#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT (0xC0000000 | 0x0195)
+#define NT_STATUS_REMOTE_SESSION_LIMIT (0xC0000000 | 0x0196)
+#define NT_STATUS_EVENTLOG_FILE_CHANGED (0xC0000000 | 0x0197)
+#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT (0xC0000000 | 0x0198)
+#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT (0xC0000000 | 0x0199)
+#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT (0xC0000000 | 0x019a)
+#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT (0xC0000000 | 0x019b)
+#define NT_STATUS_FS_DRIVER_REQUIRED (0xC0000000 | 0x019c)
+#define NT_STATUS_NO_USER_SESSION_KEY (0xC0000000 | 0x0202)
+#define NT_STATUS_USER_SESSION_DELETED (0xC0000000 | 0x0203)
+#define NT_STATUS_RESOURCE_LANG_NOT_FOUND (0xC0000000 | 0x0204)
+#define NT_STATUS_INSUFF_SERVER_RESOURCES (0xC0000000 | 0x0205)
+#define NT_STATUS_INVALID_BUFFER_SIZE (0xC0000000 | 0x0206)
+#define NT_STATUS_INVALID_ADDRESS_COMPONENT (0xC0000000 | 0x0207)
+#define NT_STATUS_INVALID_ADDRESS_WILDCARD (0xC0000000 | 0x0208)
+#define NT_STATUS_TOO_MANY_ADDRESSES (0xC0000000 | 0x0209)
+#define NT_STATUS_ADDRESS_ALREADY_EXISTS (0xC0000000 | 0x020a)
+#define NT_STATUS_ADDRESS_CLOSED (0xC0000000 | 0x020b)
+#define NT_STATUS_CONNECTION_DISCONNECTED (0xC0000000 | 0x020c)
+#define NT_STATUS_CONNECTION_RESET (0xC0000000 | 0x020d)
+#define NT_STATUS_TOO_MANY_NODES (0xC0000000 | 0x020e)
+#define NT_STATUS_TRANSACTION_ABORTED (0xC0000000 | 0x020f)
+#define NT_STATUS_TRANSACTION_TIMED_OUT (0xC0000000 | 0x0210)
+#define NT_STATUS_TRANSACTION_NO_RELEASE (0xC0000000 | 0x0211)
+#define NT_STATUS_TRANSACTION_NO_MATCH (0xC0000000 | 0x0212)
+#define NT_STATUS_TRANSACTION_RESPONDED (0xC0000000 | 0x0213)
+#define NT_STATUS_TRANSACTION_INVALID_ID (0xC0000000 | 0x0214)
+#define NT_STATUS_TRANSACTION_INVALID_TYPE (0xC0000000 | 0x0215)
+#define NT_STATUS_NOT_SERVER_SESSION (0xC0000000 | 0x0216)
+#define NT_STATUS_NOT_CLIENT_SESSION (0xC0000000 | 0x0217)
+#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE (0xC0000000 | 0x0218)
+#define NT_STATUS_DEBUG_ATTACH_FAILED (0xC0000000 | 0x0219)
+#define NT_STATUS_SYSTEM_PROCESS_TERMINATED (0xC0000000 | 0x021a)
+#define NT_STATUS_DATA_NOT_ACCEPTED (0xC0000000 | 0x021b)
+#define NT_STATUS_NO_BROWSER_SERVERS_FOUND (0xC0000000 | 0x021c)
+#define NT_STATUS_VDM_HARD_ERROR (0xC0000000 | 0x021d)
+#define NT_STATUS_DRIVER_CANCEL_TIMEOUT (0xC0000000 | 0x021e)
+#define NT_STATUS_REPLY_MESSAGE_MISMATCH (0xC0000000 | 0x021f)
+#define NT_STATUS_MAPPED_ALIGNMENT (0xC0000000 | 0x0220)
+#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH (0xC0000000 | 0x0221)
+#define NT_STATUS_LOST_WRITEBEHIND_DATA (0xC0000000 | 0x0222)
+#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID (0xC0000000 | 0x0223)
+#define NT_STATUS_PASSWORD_MUST_CHANGE (0xC0000000 | 0x0224)
+#define NT_STATUS_NOT_FOUND (0xC0000000 | 0x0225)
+#define NT_STATUS_NOT_TINY_STREAM (0xC0000000 | 0x0226)
+#define NT_STATUS_RECOVERY_FAILURE (0xC0000000 | 0x0227)
+#define NT_STATUS_STACK_OVERFLOW_READ (0xC0000000 | 0x0228)
+#define NT_STATUS_FAIL_CHECK (0xC0000000 | 0x0229)
+#define NT_STATUS_DUPLICATE_OBJECTID (0xC0000000 | 0x022a)
+#define NT_STATUS_OBJECTID_EXISTS (0xC0000000 | 0x022b)
+#define NT_STATUS_CONVERT_TO_LARGE (0xC0000000 | 0x022c)
+#define NT_STATUS_RETRY (0xC0000000 | 0x022d)
+#define NT_STATUS_FOUND_OUT_OF_SCOPE (0xC0000000 | 0x022e)
+#define NT_STATUS_ALLOCATE_BUCKET (0xC0000000 | 0x022f)
+#define NT_STATUS_PROPSET_NOT_FOUND (0xC0000000 | 0x0230)
+#define NT_STATUS_MARSHALL_OVERFLOW (0xC0000000 | 0x0231)
+#define NT_STATUS_INVALID_VARIANT (0xC0000000 | 0x0232)
+#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND (0xC0000000 | 0x0233)
+#define NT_STATUS_ACCOUNT_LOCKED_OUT (0xC0000000 | 0x0234)
+#define NT_STATUS_HANDLE_NOT_CLOSABLE (0xC0000000 | 0x0235)
+#define NT_STATUS_CONNECTION_REFUSED (0xC0000000 | 0x0236)
+#define NT_STATUS_GRACEFUL_DISCONNECT (0xC0000000 | 0x0237)
+#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED (0xC0000000 | 0x0238)
+#define NT_STATUS_ADDRESS_NOT_ASSOCIATED (0xC0000000 | 0x0239)
+#define NT_STATUS_CONNECTION_INVALID (0xC0000000 | 0x023a)
+#define NT_STATUS_CONNECTION_ACTIVE (0xC0000000 | 0x023b)
+#define NT_STATUS_NETWORK_UNREACHABLE (0xC0000000 | 0x023c)
+#define NT_STATUS_HOST_UNREACHABLE (0xC0000000 | 0x023d)
+#define NT_STATUS_PROTOCOL_UNREACHABLE (0xC0000000 | 0x023e)
+#define NT_STATUS_PORT_UNREACHABLE (0xC0000000 | 0x023f)
+#define NT_STATUS_REQUEST_ABORTED (0xC0000000 | 0x0240)
+#define NT_STATUS_CONNECTION_ABORTED (0xC0000000 | 0x0241)
+#define NT_STATUS_BAD_COMPRESSION_BUFFER (0xC0000000 | 0x0242)
+#define NT_STATUS_USER_MAPPED_FILE (0xC0000000 | 0x0243)
+#define NT_STATUS_AUDIT_FAILED (0xC0000000 | 0x0244)
+#define NT_STATUS_TIMER_RESOLUTION_NOT_SET (0xC0000000 | 0x0245)
+#define NT_STATUS_CONNECTION_COUNT_LIMIT (0xC0000000 | 0x0246)
+#define NT_STATUS_LOGIN_TIME_RESTRICTION (0xC0000000 | 0x0247)
+#define NT_STATUS_LOGIN_WKSTA_RESTRICTION (0xC0000000 | 0x0248)
+#define NT_STATUS_IMAGE_MP_UP_MISMATCH (0xC0000000 | 0x0249)
+#define NT_STATUS_INSUFFICIENT_LOGON_INFO (0xC0000000 | 0x0250)
+#define NT_STATUS_BAD_DLL_ENTRYPOINT (0xC0000000 | 0x0251)
+#define NT_STATUS_BAD_SERVICE_ENTRYPOINT (0xC0000000 | 0x0252)
+#define NT_STATUS_LPC_REPLY_LOST (0xC0000000 | 0x0253)
+#define NT_STATUS_IP_ADDRESS_CONFLICT1 (0xC0000000 | 0x0254)
+#define NT_STATUS_IP_ADDRESS_CONFLICT2 (0xC0000000 | 0x0255)
+#define NT_STATUS_REGISTRY_QUOTA_LIMIT (0xC0000000 | 0x0256)
+#define NT_STATUS_PATH_NOT_COVERED (0xC0000000 | 0x0257)
+#define NT_STATUS_NO_CALLBACK_ACTIVE (0xC0000000 | 0x0258)
+#define NT_STATUS_LICENSE_QUOTA_EXCEEDED (0xC0000000 | 0x0259)
+#define NT_STATUS_PWD_TOO_SHORT (0xC0000000 | 0x025a)
+#define NT_STATUS_PWD_TOO_RECENT (0xC0000000 | 0x025b)
+#define NT_STATUS_PWD_HISTORY_CONFLICT (0xC0000000 | 0x025c)
+#define NT_STATUS_PLUGPLAY_NO_DEVICE (0xC0000000 | 0x025e)
+#define NT_STATUS_UNSUPPORTED_COMPRESSION (0xC0000000 | 0x025f)
+#define NT_STATUS_INVALID_HW_PROFILE (0xC0000000 | 0x0260)
+#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH (0xC0000000 | 0x0261)
+#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND (0xC0000000 | 0x0262)
+#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0263)
+#define NT_STATUS_RESOURCE_NOT_OWNED (0xC0000000 | 0x0264)
+#define NT_STATUS_TOO_MANY_LINKS (0xC0000000 | 0x0265)
+#define NT_STATUS_QUOTA_LIST_INCONSISTENT (0xC0000000 | 0x0266)
+#define NT_STATUS_FILE_IS_OFFLINE (0xC0000000 | 0x0267)
+#define NT_STATUS_NETWORK_SESSION_EXPIRED  (0xC0000000 | 0x035c)
+#define NT_STATUS_NO_SUCH_JOB (0xC0000000 | 0xEDE)     /* scheduler */
+#define NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP (0xC0000000 | 0x5D0000)
+#define NT_STATUS_PENDING 0x00000103
+#endif                         /* _NTERR_H */
diff --git a/fs/ksmbd/ntlmssp.h b/fs/ksmbd/ntlmssp.h
new file mode 100644 (file)
index 0000000..adaf4c0
--- /dev/null
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ *   Copyright (c) International Business Machines  Corp., 2002,2007
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ */
+
+#ifndef __KSMBD_NTLMSSP_H
+#define __KSMBD_NTLMSSP_H
+
+#define NTLMSSP_SIGNATURE "NTLMSSP"
+
+/* Security blob target info data */
+#define TGT_Name        "KSMBD"
+
+/*
+ * Size of the crypto key returned on the negotiate SMB in bytes
+ */
+#define CIFS_CRYPTO_KEY_SIZE   (8)
+#define CIFS_KEY_SIZE  (40)
+
+/*
+ * Size of encrypted user password in bytes
+ */
+#define CIFS_ENCPWD_SIZE       (16)
+#define CIFS_CPHTXT_SIZE       (16)
+
+/* Message Types */
+#define NtLmNegotiate     cpu_to_le32(1)
+#define NtLmChallenge     cpu_to_le32(2)
+#define NtLmAuthenticate  cpu_to_le32(3)
+#define UnknownMessage    cpu_to_le32(8)
+
+/* Negotiate Flags */
+#define NTLMSSP_NEGOTIATE_UNICODE         0x01 /* Text strings are unicode */
+#define NTLMSSP_NEGOTIATE_OEM             0x02 /* Text strings are in OEM */
+#define NTLMSSP_REQUEST_TARGET            0x04 /* Srv returns its auth realm */
+/* define reserved9                       0x08 */
+#define NTLMSSP_NEGOTIATE_SIGN          0x0010 /* Request signing capability */
+#define NTLMSSP_NEGOTIATE_SEAL          0x0020 /* Request confidentiality */
+#define NTLMSSP_NEGOTIATE_DGRAM         0x0040
+#define NTLMSSP_NEGOTIATE_LM_KEY        0x0080 /* Use LM session key */
+/* defined reserved 8                   0x0100 */
+#define NTLMSSP_NEGOTIATE_NTLM          0x0200 /* NTLM authentication */
+#define NTLMSSP_NEGOTIATE_NT_ONLY       0x0400 /* Lanman not allowed */
+#define NTLMSSP_ANONYMOUS               0x0800
+#define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000 /* reserved6 */
+#define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
+#define NTLMSSP_NEGOTIATE_LOCAL_CALL    0x4000 /* client/server same machine */
+#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN   0x8000 /* Sign. All security levels  */
+#define NTLMSSP_TARGET_TYPE_DOMAIN     0x10000
+#define NTLMSSP_TARGET_TYPE_SERVER     0x20000
+#define NTLMSSP_TARGET_TYPE_SHARE      0x40000
+#define NTLMSSP_NEGOTIATE_EXTENDED_SEC 0x80000 /* NB:not related to NTLMv2 pwd*/
+/* #define NTLMSSP_REQUEST_INIT_RESP     0x100000 */
+#define NTLMSSP_NEGOTIATE_IDENTIFY    0x100000
+#define NTLMSSP_REQUEST_ACCEPT_RESP   0x200000 /* reserved5 */
+#define NTLMSSP_REQUEST_NON_NT_KEY    0x400000
+#define NTLMSSP_NEGOTIATE_TARGET_INFO 0x800000
+/* #define reserved4                 0x1000000 */
+#define NTLMSSP_NEGOTIATE_VERSION    0x2000000 /* we do not set */
+/* #define reserved3                 0x4000000 */
+/* #define reserved2                 0x8000000 */
+/* #define reserved1                0x10000000 */
+#define NTLMSSP_NEGOTIATE_128       0x20000000
+#define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
+#define NTLMSSP_NEGOTIATE_56        0x80000000
+
+/* Define AV Pair Field IDs */
+enum av_field_type {
+       NTLMSSP_AV_EOL = 0,
+       NTLMSSP_AV_NB_COMPUTER_NAME,
+       NTLMSSP_AV_NB_DOMAIN_NAME,
+       NTLMSSP_AV_DNS_COMPUTER_NAME,
+       NTLMSSP_AV_DNS_DOMAIN_NAME,
+       NTLMSSP_AV_DNS_TREE_NAME,
+       NTLMSSP_AV_FLAGS,
+       NTLMSSP_AV_TIMESTAMP,
+       NTLMSSP_AV_RESTRICTION,
+       NTLMSSP_AV_TARGET_NAME,
+       NTLMSSP_AV_CHANNEL_BINDINGS
+};
+
+/* Although typedefs are not commonly used for structure definitions */
+/* in the Linux kernel, in this particular case they are useful      */
+/* to more closely match the standards document for NTLMSSP from     */
+/* OpenGroup and to make the code more closely match the standard in */
+/* appearance */
+
+struct security_buffer {
+       __le16 Length;
+       __le16 MaximumLength;
+       __le32 BufferOffset;    /* offset to buffer */
+} __packed;
+
+struct target_info {
+       __le16 Type;
+       __le16 Length;
+       __u8 Content[0];
+} __packed;
+
+struct negotiate_message {
+       __u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+       __le32 MessageType;     /* NtLmNegotiate = 1 */
+       __le32 NegotiateFlags;
+       struct security_buffer DomainName;      /* RFC 1001 style and ASCII */
+       struct security_buffer WorkstationName; /* RFC 1001 and ASCII */
+       /*
+        * struct security_buffer for version info not present since we
+        * do not set the version is present flag
+        */
+       char DomainString[0];
+       /* followed by WorkstationString */
+} __packed;
+
+struct challenge_message {
+       __u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+       __le32 MessageType;   /* NtLmChallenge = 2 */
+       struct security_buffer TargetName;
+       __le32 NegotiateFlags;
+       __u8 Challenge[CIFS_CRYPTO_KEY_SIZE];
+       __u8 Reserved[8];
+       struct security_buffer TargetInfoArray;
+       /*
+        * struct security_buffer for version info not present since we
+        * do not set the version is present flag
+        */
+} __packed;
+
+struct authenticate_message {
+       __u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+       __le32 MessageType;  /* NtLmsAuthenticate = 3 */
+       struct security_buffer LmChallengeResponse;
+       struct security_buffer NtChallengeResponse;
+       struct security_buffer DomainName;
+       struct security_buffer UserName;
+       struct security_buffer WorkstationName;
+       struct security_buffer SessionKey;
+       __le32 NegotiateFlags;
+       /*
+        * struct security_buffer for version info not present since we
+        * do not set the version is present flag
+        */
+       char UserString[0];
+} __packed;
+
+struct ntlmv2_resp {
+       char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+       __le32 blob_signature;
+       __u32  reserved;
+       __le64  time;
+       __u64  client_chal; /* random */
+       __u32  reserved2;
+       /* array of name entries could follow ending in minimum 4 byte struct */
+} __packed;
+
+/* per smb session structure/fields */
+struct ntlmssp_auth {
+       /* whether session key is per smb session */
+       bool            sesskey_per_smbsess;
+       /* sent by client in type 1 ntlmsssp exchange */
+       __u32           client_flags;
+       /* sent by server in type 2 ntlmssp exchange */
+       __u32           conn_flags;
+       /* sent to server */
+       unsigned char   ciphertext[CIFS_CPHTXT_SIZE];
+       /* used by ntlmssp */
+       char            cryptkey[CIFS_CRYPTO_KEY_SIZE];
+};
+#endif /* __KSMBD_NTLMSSP_H */
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
new file mode 100644 (file)
index 0000000..6ace6c2
--- /dev/null
@@ -0,0 +1,1709 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/moduleparam.h>
+
+#include "glob.h"
+#include "oplock.h"
+
+#include "smb_common.h"
+#include "smbstatus.h"
+#include "connection.h"
+#include "mgmt/user_session.h"
+#include "mgmt/share_config.h"
+#include "mgmt/tree_connect.h"
+
+static LIST_HEAD(lease_table_list);
+static DEFINE_RWLOCK(lease_list_lock);
+
+/**
+ * alloc_opinfo() - allocate a new opinfo object for oplock info
+ * @work:      smb work
+ * @id:                fid of open file
+ * @Tid:       tree id of connection
+ *
+ * Return:      allocated opinfo object on success, otherwise NULL
+ */
+static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
+                                       u64 id, __u16 Tid)
+{
+       struct ksmbd_session *sess = work->sess;
+       struct oplock_info *opinfo;
+
+       opinfo = kzalloc(sizeof(struct oplock_info), GFP_KERNEL);
+       if (!opinfo)
+               return NULL;
+
+       opinfo->sess = sess;
+       opinfo->conn = sess->conn;
+       opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+       opinfo->op_state = OPLOCK_STATE_NONE;
+       opinfo->pending_break = 0;
+       opinfo->fid = id;
+       opinfo->Tid = Tid;
+       INIT_LIST_HEAD(&opinfo->op_entry);
+       INIT_LIST_HEAD(&opinfo->interim_list);
+       init_waitqueue_head(&opinfo->oplock_q);
+       init_waitqueue_head(&opinfo->oplock_brk);
+       atomic_set(&opinfo->refcount, 1);
+       atomic_set(&opinfo->breaking_cnt, 0);
+
+       return opinfo;
+}
+
+static void lease_add_list(struct oplock_info *opinfo)
+{
+       struct lease_table *lb = opinfo->o_lease->l_lb;
+
+       spin_lock(&lb->lb_lock);
+       list_add_rcu(&opinfo->lease_entry, &lb->lease_list);
+       spin_unlock(&lb->lb_lock);
+}
+
+static void lease_del_list(struct oplock_info *opinfo)
+{
+       struct lease_table *lb = opinfo->o_lease->l_lb;
+
+       if (!lb)
+               return;
+
+       spin_lock(&lb->lb_lock);
+       if (list_empty(&opinfo->lease_entry)) {
+               spin_unlock(&lb->lb_lock);
+               return;
+       }
+
+       list_del_init(&opinfo->lease_entry);
+       opinfo->o_lease->l_lb = NULL;
+       spin_unlock(&lb->lb_lock);
+}
+
+static void lb_add(struct lease_table *lb)
+{
+       write_lock(&lease_list_lock);
+       list_add(&lb->l_entry, &lease_table_list);
+       write_unlock(&lease_list_lock);
+}
+
+static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+{
+       struct lease *lease;
+
+       lease = kmalloc(sizeof(struct lease), GFP_KERNEL);
+       if (!lease)
+               return -ENOMEM;
+
+       memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+       lease->state = lctx->req_state;
+       lease->new_state = 0;
+       lease->flags = lctx->flags;
+       lease->duration = lctx->duration;
+       memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
+       lease->version = lctx->version;
+       lease->epoch = 0;
+       INIT_LIST_HEAD(&opinfo->lease_entry);
+       opinfo->o_lease = lease;
+
+       return 0;
+}
+
+static void free_lease(struct oplock_info *opinfo)
+{
+       struct lease *lease;
+
+       lease = opinfo->o_lease;
+       kfree(lease);
+}
+
+static void free_opinfo(struct oplock_info *opinfo)
+{
+       if (opinfo->is_lease)
+               free_lease(opinfo);
+       kfree(opinfo);
+}
+
+static inline void opinfo_free_rcu(struct rcu_head *rcu_head)
+{
+       struct oplock_info *opinfo;
+
+       opinfo = container_of(rcu_head, struct oplock_info, rcu_head);
+       free_opinfo(opinfo);
+}
+
+struct oplock_info *opinfo_get(struct ksmbd_file *fp)
+{
+       struct oplock_info *opinfo;
+
+       rcu_read_lock();
+       opinfo = rcu_dereference(fp->f_opinfo);
+       if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
+               opinfo = NULL;
+       rcu_read_unlock();
+
+       return opinfo;
+}
+
+static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+{
+       struct oplock_info *opinfo;
+
+       if (list_empty(&ci->m_op_list))
+               return NULL;
+
+       rcu_read_lock();
+       opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
+                                       op_entry);
+       if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
+               opinfo = NULL;
+       rcu_read_unlock();
+
+       return opinfo;
+}
+
+void opinfo_put(struct oplock_info *opinfo)
+{
+       if (!atomic_dec_and_test(&opinfo->refcount))
+               return;
+
+       call_rcu(&opinfo->rcu_head, opinfo_free_rcu);
+}
+
+static void opinfo_add(struct oplock_info *opinfo)
+{
+       struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+
+       write_lock(&ci->m_lock);
+       list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
+       write_unlock(&ci->m_lock);
+}
+
+static void opinfo_del(struct oplock_info *opinfo)
+{
+       struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+
+       if (opinfo->is_lease) {
+               write_lock(&lease_list_lock);
+               lease_del_list(opinfo);
+               write_unlock(&lease_list_lock);
+       }
+       write_lock(&ci->m_lock);
+       list_del_rcu(&opinfo->op_entry);
+       write_unlock(&ci->m_lock);
+}
+
+static unsigned long opinfo_count(struct ksmbd_file *fp)
+{
+       if (ksmbd_stream_fd(fp))
+               return atomic_read(&fp->f_ci->sop_count);
+       else
+               return atomic_read(&fp->f_ci->op_count);
+}
+
+static void opinfo_count_inc(struct ksmbd_file *fp)
+{
+       if (ksmbd_stream_fd(fp))
+               return atomic_inc(&fp->f_ci->sop_count);
+       else
+               return atomic_inc(&fp->f_ci->op_count);
+}
+
+static void opinfo_count_dec(struct ksmbd_file *fp)
+{
+       if (ksmbd_stream_fd(fp))
+               return atomic_dec(&fp->f_ci->sop_count);
+       else
+               return atomic_dec(&fp->f_ci->op_count);
+}
+
+/**
+ * opinfo_write_to_read() - convert a write oplock to read oplock
+ * @opinfo:            current oplock info
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+int opinfo_write_to_read(struct oplock_info *opinfo)
+{
+       struct lease *lease = opinfo->o_lease;
+
+       if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+             opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+               pr_err("bad oplock(0x%x)\n", opinfo->level);
+               if (opinfo->is_lease)
+                       pr_err("lease state(0x%x)\n", lease->state);
+               return -EINVAL;
+       }
+       opinfo->level = SMB2_OPLOCK_LEVEL_II;
+
+       if (opinfo->is_lease)
+               lease->state = lease->new_state;
+       return 0;
+}
+
+/**
+ * opinfo_read_handle_to_read() - convert a read/handle oplock to read oplock
+ * @opinfo:            current oplock info
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+int opinfo_read_handle_to_read(struct oplock_info *opinfo)
+{
+       struct lease *lease = opinfo->o_lease;
+
+       lease->state = lease->new_state;
+       opinfo->level = SMB2_OPLOCK_LEVEL_II;
+       return 0;
+}
+
+/**
+ * opinfo_write_to_none() - convert a write oplock to none
+ * @opinfo:    current oplock info
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+int opinfo_write_to_none(struct oplock_info *opinfo)
+{
+       struct lease *lease = opinfo->o_lease;
+
+       if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+             opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+               pr_err("bad oplock(0x%x)\n", opinfo->level);
+               if (opinfo->is_lease)
+                       pr_err("lease state(0x%x)\n", lease->state);
+               return -EINVAL;
+       }
+       opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+       if (opinfo->is_lease)
+               lease->state = lease->new_state;
+       return 0;
+}
+
+/**
+ * opinfo_read_to_none() - convert a write read to none
+ * @opinfo:    current oplock info
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+int opinfo_read_to_none(struct oplock_info *opinfo)
+{
+       struct lease *lease = opinfo->o_lease;
+
+       if (opinfo->level != SMB2_OPLOCK_LEVEL_II) {
+               pr_err("bad oplock(0x%x)\n", opinfo->level);
+               if (opinfo->is_lease)
+                       pr_err("lease state(0x%x)\n", lease->state);
+               return -EINVAL;
+       }
+       opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+       if (opinfo->is_lease)
+               lease->state = lease->new_state;
+       return 0;
+}
+
+/**
+ * lease_read_to_write() - upgrade lease state from read to write
+ * @opinfo:    current lease info
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+int lease_read_to_write(struct oplock_info *opinfo)
+{
+       struct lease *lease = opinfo->o_lease;
+
+       if (!(lease->state & SMB2_LEASE_READ_CACHING_LE)) {
+               ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state);
+               return -EINVAL;
+       }
+
+       lease->new_state = SMB2_LEASE_NONE_LE;
+       lease->state |= SMB2_LEASE_WRITE_CACHING_LE;
+       if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+               opinfo->level = SMB2_OPLOCK_LEVEL_BATCH;
+       else
+               opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+       return 0;
+}
+
+/**
+ * lease_none_upgrade() - upgrade lease state from none
+ * @opinfo:    current lease info
+ * @new_state: new lease state
+ *
+ * Return:     0 on success, otherwise -EINVAL
+ */
+static int lease_none_upgrade(struct oplock_info *opinfo, __le32 new_state)
+{
+       struct lease *lease = opinfo->o_lease;
+
+       if (!(lease->state == SMB2_LEASE_NONE_LE)) {
+               ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state);
+               return -EINVAL;
+       }
+
+       lease->new_state = SMB2_LEASE_NONE_LE;
+       lease->state = new_state;
+       if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+               if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+                       opinfo->level = SMB2_OPLOCK_LEVEL_BATCH;
+               else
+                       opinfo->level = SMB2_OPLOCK_LEVEL_II;
+       else if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+               opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+       else if (lease->state & SMB2_LEASE_READ_CACHING_LE)
+               opinfo->level = SMB2_OPLOCK_LEVEL_II;
+
+       return 0;
+}
+
+/**
+ * close_id_del_oplock() - release oplock object at file close time
+ * @fp:                ksmbd file pointer
+ */
+void close_id_del_oplock(struct ksmbd_file *fp)
+{
+       struct oplock_info *opinfo;
+
+       if (S_ISDIR(file_inode(fp->filp)->i_mode))
+               return;
+
+       opinfo = opinfo_get(fp);
+       if (!opinfo)
+               return;
+
+       opinfo_del(opinfo);
+
+       rcu_assign_pointer(fp->f_opinfo, NULL);
+       if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+               opinfo->op_state = OPLOCK_CLOSING;
+               wake_up_interruptible_all(&opinfo->oplock_q);
+               if (opinfo->is_lease) {
+                       atomic_set(&opinfo->breaking_cnt, 0);
+                       wake_up_interruptible_all(&opinfo->oplock_brk);
+               }
+       }
+
+       opinfo_count_dec(fp);
+       atomic_dec(&opinfo->refcount);
+       opinfo_put(opinfo);
+}
+
+/**
+ * grant_write_oplock() - grant exclusive/batch oplock or write lease
+ * @opinfo_new:        new oplock info object
+ * @req_oplock: request oplock
+ * @lctx:      lease context information
+ *
+ * Return:      0
+ */
+static void grant_write_oplock(struct oplock_info *opinfo_new, int req_oplock,
+                              struct lease_ctx_info *lctx)
+{
+       struct lease *lease = opinfo_new->o_lease;
+
+       if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH)
+               opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH;
+       else
+               opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+
+       if (lctx) {
+               lease->state = lctx->req_state;
+               memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+       }
+}
+
+/**
+ * grant_read_oplock() - grant level2 oplock or read lease
+ * @opinfo_new:        new oplock info object
+ * @lctx:      lease context information
+ *
+ * Return:      0
+ */
+static void grant_read_oplock(struct oplock_info *opinfo_new,
+                             struct lease_ctx_info *lctx)
+{
+       struct lease *lease = opinfo_new->o_lease;
+
+       opinfo_new->level = SMB2_OPLOCK_LEVEL_II;
+
+       if (lctx) {
+               lease->state = SMB2_LEASE_READ_CACHING_LE;
+               if (lctx->req_state & SMB2_LEASE_HANDLE_CACHING_LE)
+                       lease->state |= SMB2_LEASE_HANDLE_CACHING_LE;
+               memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+       }
+}
+
+/**
+ * grant_none_oplock() - grant none oplock or none lease
+ * @opinfo_new:        new oplock info object
+ * @lctx:      lease context information
+ *
+ * Return:      0
+ */
+static void grant_none_oplock(struct oplock_info *opinfo_new,
+                             struct lease_ctx_info *lctx)
+{
+       struct lease *lease = opinfo_new->o_lease;
+
+       opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE;
+
+       if (lctx) {
+               lease->state = 0;
+               memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+       }
+}
+
+static inline int compare_guid_key(struct oplock_info *opinfo,
+                                  const char *guid1, const char *key1)
+{
+       const char *guid2, *key2;
+
+       guid2 = opinfo->conn->ClientGUID;
+       key2 = opinfo->o_lease->lease_key;
+       if (!memcmp(guid1, guid2, SMB2_CLIENT_GUID_SIZE) &&
+           !memcmp(key1, key2, SMB2_LEASE_KEY_SIZE))
+               return 1;
+
+       return 0;
+}
+
+/**
+ * same_client_has_lease() - check whether current lease request is
+ *             from lease owner of file
+ * @ci:                master file pointer
+ * @client_guid:       Client GUID
+ * @lctx:              lease context information
+ *
+ * Return:      oplock(lease) object on success, otherwise NULL
+ */
+static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+                                                char *client_guid,
+                                                struct lease_ctx_info *lctx)
+{
+       int ret;
+       struct lease *lease;
+       struct oplock_info *opinfo;
+       struct oplock_info *m_opinfo = NULL;
+
+       if (!lctx)
+               return NULL;
+
+       /*
+        * Compare lease key and client_guid to know request from same owner
+        * of same client
+        */
+       read_lock(&ci->m_lock);
+       list_for_each_entry(opinfo, &ci->m_op_list, op_entry) {
+               if (!opinfo->is_lease)
+                       continue;
+               read_unlock(&ci->m_lock);
+               lease = opinfo->o_lease;
+
+               ret = compare_guid_key(opinfo, client_guid, lctx->lease_key);
+               if (ret) {
+                       m_opinfo = opinfo;
+                       /* skip upgrading lease about breaking lease */
+                       if (atomic_read(&opinfo->breaking_cnt)) {
+                               read_lock(&ci->m_lock);
+                               continue;
+                       }
+
+                       /* upgrading lease */
+                       if ((atomic_read(&ci->op_count) +
+                            atomic_read(&ci->sop_count)) == 1) {
+                               if (lease->state ==
+                                   (lctx->req_state & lease->state)) {
+                                       lease->state |= lctx->req_state;
+                                       if (lctx->req_state &
+                                               SMB2_LEASE_WRITE_CACHING_LE)
+                                               lease_read_to_write(opinfo);
+                               }
+                       } else if ((atomic_read(&ci->op_count) +
+                                   atomic_read(&ci->sop_count)) > 1) {
+                               if (lctx->req_state ==
+                                   (SMB2_LEASE_READ_CACHING_LE |
+                                    SMB2_LEASE_HANDLE_CACHING_LE))
+                                       lease->state = lctx->req_state;
+                       }
+
+                       if (lctx->req_state && lease->state ==
+                           SMB2_LEASE_NONE_LE)
+                               lease_none_upgrade(opinfo, lctx->req_state);
+               }
+               read_lock(&ci->m_lock);
+       }
+       read_unlock(&ci->m_lock);
+
+       return m_opinfo;
+}
+
+static void wait_for_break_ack(struct oplock_info *opinfo)
+{
+       int rc = 0;
+
+       rc = wait_event_interruptible_timeout(opinfo->oplock_q,
+                                             opinfo->op_state == OPLOCK_STATE_NONE ||
+                                             opinfo->op_state == OPLOCK_CLOSING,
+                                             OPLOCK_WAIT_TIME);
+
+       /* is this a timeout ? */
+       if (!rc) {
+               if (opinfo->is_lease)
+                       opinfo->o_lease->state = SMB2_LEASE_NONE_LE;
+               opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+               opinfo->op_state = OPLOCK_STATE_NONE;
+       }
+}
+
+static void wake_up_oplock_break(struct oplock_info *opinfo)
+{
+       clear_bit_unlock(0, &opinfo->pending_break);
+       /* memory barrier is needed for wake_up_bit() */
+       smp_mb__after_atomic();
+       wake_up_bit(&opinfo->pending_break, 0);
+}
+
+static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
+{
+       while (test_and_set_bit(0, &opinfo->pending_break)) {
+               wait_on_bit(&opinfo->pending_break, 0, TASK_UNINTERRUPTIBLE);
+
+               /* Not immediately break to none. */
+               opinfo->open_trunc = 0;
+
+               if (opinfo->op_state == OPLOCK_CLOSING)
+                       return -ENOENT;
+               else if (!opinfo->is_lease && opinfo->level <= req_op_level)
+                       return 1;
+       }
+
+       if (!opinfo->is_lease && opinfo->level <= req_op_level) {
+               wake_up_oplock_break(opinfo);
+               return 1;
+       }
+       return 0;
+}
+
+static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
+{
+       work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
+       if (!work->response_buf)
+               return -ENOMEM;
+       work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+       return 0;
+}
+
+/**
+ * __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
+ * to client
+ * @wk:     smb work object
+ *
+ * There are two ways this function can be called. 1- while file open we break
+ * from exclusive/batch lock to levelII oplock and 2- while file write/truncate
+ * we break from levelII oplock no oplock.
+ * work->request_buf contains oplock_info.
+ */
+static void __smb2_oplock_break_noti(struct work_struct *wk)
+{
+       struct smb2_oplock_break *rsp = NULL;
+       struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+       struct ksmbd_conn *conn = work->conn;
+       struct oplock_break_info *br_info = work->request_buf;
+       struct smb2_hdr *rsp_hdr;
+       struct ksmbd_file *fp;
+
+       fp = ksmbd_lookup_durable_fd(br_info->fid);
+       if (!fp) {
+               atomic_dec(&conn->r_count);
+               ksmbd_free_work_struct(work);
+               return;
+       }
+
+       if (allocate_oplock_break_buf(work)) {
+               pr_err("smb2_allocate_rsp_buf failed! ");
+               atomic_dec(&conn->r_count);
+               ksmbd_fd_put(work, fp);
+               ksmbd_free_work_struct(work);
+               return;
+       }
+
+       rsp_hdr = work->response_buf;
+       memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+       rsp_hdr->smb2_buf_length =
+               cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+       rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+       rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+       rsp_hdr->CreditRequest = cpu_to_le16(0);
+       rsp_hdr->Command = SMB2_OPLOCK_BREAK;
+       rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+       rsp_hdr->NextCommand = 0;
+       rsp_hdr->MessageId = cpu_to_le64(-1);
+       rsp_hdr->Id.SyncId.ProcessId = 0;
+       rsp_hdr->Id.SyncId.TreeId = 0;
+       rsp_hdr->SessionId = 0;
+       memset(rsp_hdr->Signature, 0, 16);
+
+       rsp = work->response_buf;
+
+       rsp->StructureSize = cpu_to_le16(24);
+       if (!br_info->open_trunc &&
+           (br_info->level == SMB2_OPLOCK_LEVEL_BATCH ||
+            br_info->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
+               rsp->OplockLevel = SMB2_OPLOCK_LEVEL_II;
+       else
+               rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+       rsp->Reserved = 0;
+       rsp->Reserved2 = 0;
+       rsp->PersistentFid = cpu_to_le64(fp->persistent_id);
+       rsp->VolatileFid = cpu_to_le64(fp->volatile_id);
+
+       inc_rfc1001_len(rsp, 24);
+
+       ksmbd_debug(OPLOCK,
+                   "sending oplock break v_id %llu p_id = %llu lock level = %d\n",
+                   rsp->VolatileFid, rsp->PersistentFid, rsp->OplockLevel);
+
+       ksmbd_fd_put(work, fp);
+       ksmbd_conn_write(work);
+       ksmbd_free_work_struct(work);
+       atomic_dec(&conn->r_count);
+}
+
+/**
+ * smb2_oplock_break_noti() - send smb2 exclusive/batch to level2 oplock
+ *             break command from server to client
+ * @opinfo:            oplock info object
+ *
+ * Return:      0 on success, otherwise error
+ */
+static int smb2_oplock_break_noti(struct oplock_info *opinfo)
+{
+       struct ksmbd_conn *conn = opinfo->conn;
+       struct oplock_break_info *br_info;
+       int ret = 0;
+       struct ksmbd_work *work = ksmbd_alloc_work_struct();
+
+       if (!work)
+               return -ENOMEM;
+
+       br_info = kmalloc(sizeof(struct oplock_break_info), GFP_KERNEL);
+       if (!br_info) {
+               ksmbd_free_work_struct(work);
+               return -ENOMEM;
+       }
+
+       br_info->level = opinfo->level;
+       br_info->fid = opinfo->fid;
+       br_info->open_trunc = opinfo->open_trunc;
+
+       work->request_buf = (char *)br_info;
+       work->conn = conn;
+       work->sess = opinfo->sess;
+
+       atomic_inc(&conn->r_count);
+       if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+               INIT_WORK(&work->work, __smb2_oplock_break_noti);
+               ksmbd_queue_work(work);
+
+               wait_for_break_ack(opinfo);
+       } else {
+               __smb2_oplock_break_noti(&work->work);
+               if (opinfo->level == SMB2_OPLOCK_LEVEL_II)
+                       opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+       }
+       return ret;
+}
+
+/**
+ * __smb2_lease_break_noti() - send lease break command from server
+ * to client
+ * @wk:     smb work object
+ */
+static void __smb2_lease_break_noti(struct work_struct *wk)
+{
+       struct smb2_lease_break *rsp = NULL;
+       struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+       struct lease_break_info *br_info = work->request_buf;
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_hdr *rsp_hdr;
+
+       if (allocate_oplock_break_buf(work)) {
+               ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
+               ksmbd_free_work_struct(work);
+               atomic_dec(&conn->r_count);
+               return;
+       }
+
+       rsp_hdr = work->response_buf;
+       memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+       rsp_hdr->smb2_buf_length =
+               cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+       rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+       rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+       rsp_hdr->CreditRequest = cpu_to_le16(0);
+       rsp_hdr->Command = SMB2_OPLOCK_BREAK;
+       rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+       rsp_hdr->NextCommand = 0;
+       rsp_hdr->MessageId = cpu_to_le64(-1);
+       rsp_hdr->Id.SyncId.ProcessId = 0;
+       rsp_hdr->Id.SyncId.TreeId = 0;
+       rsp_hdr->SessionId = 0;
+       memset(rsp_hdr->Signature, 0, 16);
+
+       rsp = work->response_buf;
+       rsp->StructureSize = cpu_to_le16(44);
+       rsp->Epoch = br_info->epoch;
+       rsp->Flags = 0;
+
+       if (br_info->curr_state & (SMB2_LEASE_WRITE_CACHING_LE |
+                       SMB2_LEASE_HANDLE_CACHING_LE))
+               rsp->Flags = SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED;
+
+       memcpy(rsp->LeaseKey, br_info->lease_key, SMB2_LEASE_KEY_SIZE);
+       rsp->CurrentLeaseState = br_info->curr_state;
+       rsp->NewLeaseState = br_info->new_state;
+       rsp->BreakReason = 0;
+       rsp->AccessMaskHint = 0;
+       rsp->ShareMaskHint = 0;
+
+       inc_rfc1001_len(rsp, 44);
+
+       ksmbd_conn_write(work);
+       ksmbd_free_work_struct(work);
+       atomic_dec(&conn->r_count);
+}
+
+/**
+ * smb2_lease_break_noti() - break lease when a new client request
+ *                     write lease
+ * @opinfo:            conains lease state information
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int smb2_lease_break_noti(struct oplock_info *opinfo)
+{
+       struct ksmbd_conn *conn = opinfo->conn;
+       struct list_head *tmp, *t;
+       struct ksmbd_work *work;
+       struct lease_break_info *br_info;
+       struct lease *lease = opinfo->o_lease;
+
+       work = ksmbd_alloc_work_struct();
+       if (!work)
+               return -ENOMEM;
+
+       br_info = kmalloc(sizeof(struct lease_break_info), GFP_KERNEL);
+       if (!br_info) {
+               ksmbd_free_work_struct(work);
+               return -ENOMEM;
+       }
+
+       br_info->curr_state = lease->state;
+       br_info->new_state = lease->new_state;
+       if (lease->version == 2)
+               br_info->epoch = cpu_to_le16(++lease->epoch);
+       else
+               br_info->epoch = 0;
+       memcpy(br_info->lease_key, lease->lease_key, SMB2_LEASE_KEY_SIZE);
+
+       work->request_buf = (char *)br_info;
+       work->conn = conn;
+       work->sess = opinfo->sess;
+
+       atomic_inc(&conn->r_count);
+       if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+               list_for_each_safe(tmp, t, &opinfo->interim_list) {
+                       struct ksmbd_work *in_work;
+
+                       in_work = list_entry(tmp, struct ksmbd_work,
+                                            interim_entry);
+                       setup_async_work(in_work, NULL, NULL);
+                       smb2_send_interim_resp(in_work, STATUS_PENDING);
+                       list_del(&in_work->interim_entry);
+               }
+               INIT_WORK(&work->work, __smb2_lease_break_noti);
+               ksmbd_queue_work(work);
+               wait_for_break_ack(opinfo);
+       } else {
+               __smb2_lease_break_noti(&work->work);
+               if (opinfo->o_lease->new_state == SMB2_LEASE_NONE_LE) {
+                       opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+                       opinfo->o_lease->state = SMB2_LEASE_NONE_LE;
+               }
+       }
+       return 0;
+}
+
+static void wait_lease_breaking(struct oplock_info *opinfo)
+{
+       if (!opinfo->is_lease)
+               return;
+
+       wake_up_interruptible_all(&opinfo->oplock_brk);
+       if (atomic_read(&opinfo->breaking_cnt)) {
+               int ret = 0;
+
+               ret = wait_event_interruptible_timeout(opinfo->oplock_brk,
+                                                      atomic_read(&opinfo->breaking_cnt) == 0,
+                                                      HZ);
+               if (!ret)
+                       atomic_set(&opinfo->breaking_cnt, 0);
+       }
+}
+
+static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
+{
+       int err = 0;
+
+       /* Need to break exclusive/batch oplock, write lease or overwrite_if */
+       ksmbd_debug(OPLOCK,
+                   "request to send oplock(level : 0x%x) break notification\n",
+                   brk_opinfo->level);
+
+       if (brk_opinfo->is_lease) {
+               struct lease *lease = brk_opinfo->o_lease;
+
+               atomic_inc(&brk_opinfo->breaking_cnt);
+
+               err = oplock_break_pending(brk_opinfo, req_op_level);
+               if (err)
+                       return err < 0 ? err : 0;
+
+               if (brk_opinfo->open_trunc) {
+                       /*
+                        * Create overwrite break trigger the lease break to
+                        * none.
+                        */
+                       lease->new_state = SMB2_LEASE_NONE_LE;
+               } else {
+                       if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) {
+                               if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+                                       lease->new_state =
+                                               SMB2_LEASE_READ_CACHING_LE |
+                                               SMB2_LEASE_HANDLE_CACHING_LE;
+                               else
+                                       lease->new_state =
+                                               SMB2_LEASE_READ_CACHING_LE;
+                       } else {
+                               if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+                                       lease->new_state =
+                                               SMB2_LEASE_READ_CACHING_LE;
+                               else
+                                       lease->new_state = SMB2_LEASE_NONE_LE;
+                       }
+               }
+
+               if (lease->state & (SMB2_LEASE_WRITE_CACHING_LE |
+                               SMB2_LEASE_HANDLE_CACHING_LE))
+                       brk_opinfo->op_state = OPLOCK_ACK_WAIT;
+               else
+                       atomic_dec(&brk_opinfo->breaking_cnt);
+       } else {
+               err = oplock_break_pending(brk_opinfo, req_op_level);
+               if (err)
+                       return err < 0 ? err : 0;
+
+               if (brk_opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+                   brk_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+                       brk_opinfo->op_state = OPLOCK_ACK_WAIT;
+       }
+
+       if (brk_opinfo->is_lease)
+               err = smb2_lease_break_noti(brk_opinfo);
+       else
+               err = smb2_oplock_break_noti(brk_opinfo);
+
+       ksmbd_debug(OPLOCK, "oplock granted = %d\n", brk_opinfo->level);
+       if (brk_opinfo->op_state == OPLOCK_CLOSING)
+               err = -ENOENT;
+       wake_up_oplock_break(brk_opinfo);
+
+       wait_lease_breaking(brk_opinfo);
+
+       return err;
+}
+
+void destroy_lease_table(struct ksmbd_conn *conn)
+{
+       struct lease_table *lb, *lbtmp;
+       struct oplock_info *opinfo;
+
+       write_lock(&lease_list_lock);
+       if (list_empty(&lease_table_list)) {
+               write_unlock(&lease_list_lock);
+               return;
+       }
+
+       list_for_each_entry_safe(lb, lbtmp, &lease_table_list, l_entry) {
+               if (conn && memcmp(lb->client_guid, conn->ClientGUID,
+                                  SMB2_CLIENT_GUID_SIZE))
+                       continue;
+again:
+               rcu_read_lock();
+               list_for_each_entry_rcu(opinfo, &lb->lease_list,
+                                       lease_entry) {
+                       rcu_read_unlock();
+                       lease_del_list(opinfo);
+                       goto again;
+               }
+               rcu_read_unlock();
+               list_del(&lb->l_entry);
+               kfree(lb);
+       }
+       write_unlock(&lease_list_lock);
+}
+
+int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
+                       struct lease_ctx_info *lctx)
+{
+       struct oplock_info *opinfo;
+       int err = 0;
+       struct lease_table *lb;
+
+       if (!lctx)
+               return err;
+
+       read_lock(&lease_list_lock);
+       if (list_empty(&lease_table_list)) {
+               read_unlock(&lease_list_lock);
+               return 0;
+       }
+
+       list_for_each_entry(lb, &lease_table_list, l_entry) {
+               if (!memcmp(lb->client_guid, sess->conn->ClientGUID,
+                           SMB2_CLIENT_GUID_SIZE))
+                       goto found;
+       }
+       read_unlock(&lease_list_lock);
+
+       return 0;
+
+found:
+       rcu_read_lock();
+       list_for_each_entry_rcu(opinfo, &lb->lease_list, lease_entry) {
+               if (!atomic_inc_not_zero(&opinfo->refcount))
+                       continue;
+               rcu_read_unlock();
+               if (opinfo->o_fp->f_ci == ci)
+                       goto op_next;
+               err = compare_guid_key(opinfo, sess->conn->ClientGUID,
+                                      lctx->lease_key);
+               if (err) {
+                       err = -EINVAL;
+                       ksmbd_debug(OPLOCK,
+                                   "found same lease key is already used in other files\n");
+                       opinfo_put(opinfo);
+                       goto out;
+               }
+op_next:
+               opinfo_put(opinfo);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+
+out:
+       read_unlock(&lease_list_lock);
+       return err;
+}
+
+static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
+{
+       struct lease *lease1 = op1->o_lease;
+       struct lease *lease2 = op2->o_lease;
+
+       op2->level = op1->level;
+       lease2->state = lease1->state;
+       memcpy(lease2->lease_key, lease1->lease_key,
+              SMB2_LEASE_KEY_SIZE);
+       lease2->duration = lease1->duration;
+       lease2->flags = lease1->flags;
+}
+
+static int add_lease_global_list(struct oplock_info *opinfo)
+{
+       struct lease_table *lb;
+
+       read_lock(&lease_list_lock);
+       list_for_each_entry(lb, &lease_table_list, l_entry) {
+               if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID,
+                           SMB2_CLIENT_GUID_SIZE)) {
+                       opinfo->o_lease->l_lb = lb;
+                       lease_add_list(opinfo);
+                       read_unlock(&lease_list_lock);
+                       return 0;
+               }
+       }
+       read_unlock(&lease_list_lock);
+
+       lb = kmalloc(sizeof(struct lease_table), GFP_KERNEL);
+       if (!lb)
+               return -ENOMEM;
+
+       memcpy(lb->client_guid, opinfo->conn->ClientGUID,
+              SMB2_CLIENT_GUID_SIZE);
+       INIT_LIST_HEAD(&lb->lease_list);
+       spin_lock_init(&lb->lb_lock);
+       opinfo->o_lease->l_lb = lb;
+       lease_add_list(opinfo);
+       lb_add(lb);
+       return 0;
+}
+
+static void set_oplock_level(struct oplock_info *opinfo, int level,
+                            struct lease_ctx_info *lctx)
+{
+       switch (level) {
+       case SMB2_OPLOCK_LEVEL_BATCH:
+       case SMB2_OPLOCK_LEVEL_EXCLUSIVE:
+               grant_write_oplock(opinfo, level, lctx);
+               break;
+       case SMB2_OPLOCK_LEVEL_II:
+               grant_read_oplock(opinfo, lctx);
+               break;
+       default:
+               grant_none_oplock(opinfo, lctx);
+               break;
+       }
+}
+
+/**
+ * smb_grant_oplock() - handle oplock/lease request on file open
+ * @work:              smb work
+ * @req_op_level:      oplock level
+ * @pid:               id of open file
+ * @fp:                        ksmbd file pointer
+ * @tid:               Tree id of connection
+ * @lctx:              lease context information on file open
+ * @share_ret:         share mode
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+                    struct ksmbd_file *fp, __u16 tid,
+                    struct lease_ctx_info *lctx, int share_ret)
+{
+       struct ksmbd_session *sess = work->sess;
+       int err = 0;
+       struct oplock_info *opinfo = NULL, *prev_opinfo = NULL;
+       struct ksmbd_inode *ci = fp->f_ci;
+       bool prev_op_has_lease;
+       __le32 prev_op_state = 0;
+
+       /* not support directory lease */
+       if (S_ISDIR(file_inode(fp->filp)->i_mode))
+               return 0;
+
+       opinfo = alloc_opinfo(work, pid, tid);
+       if (!opinfo)
+               return -ENOMEM;
+
+       if (lctx) {
+               err = alloc_lease(opinfo, lctx);
+               if (err)
+                       goto err_out;
+               opinfo->is_lease = 1;
+       }
+
+       /* ci does not have any oplock */
+       if (!opinfo_count(fp))
+               goto set_lev;
+
+       /* grant none-oplock if second open is trunc */
+       if (fp->attrib_only && fp->cdoption != FILE_OVERWRITE_IF_LE &&
+           fp->cdoption != FILE_OVERWRITE_LE &&
+           fp->cdoption != FILE_SUPERSEDE_LE) {
+               req_op_level = SMB2_OPLOCK_LEVEL_NONE;
+               goto set_lev;
+       }
+
+       if (lctx) {
+               struct oplock_info *m_opinfo;
+
+               /* is lease already granted ? */
+               m_opinfo = same_client_has_lease(ci, sess->conn->ClientGUID,
+                                                lctx);
+               if (m_opinfo) {
+                       copy_lease(m_opinfo, opinfo);
+                       if (atomic_read(&m_opinfo->breaking_cnt))
+                               opinfo->o_lease->flags =
+                                       SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE;
+                       goto out;
+               }
+       }
+       prev_opinfo = opinfo_get_list(ci);
+       if (!prev_opinfo ||
+           (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx))
+               goto set_lev;
+       prev_op_has_lease = prev_opinfo->is_lease;
+       if (prev_op_has_lease)
+               prev_op_state = prev_opinfo->o_lease->state;
+
+       if (share_ret < 0 &&
+           prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+               err = share_ret;
+               opinfo_put(prev_opinfo);
+               goto err_out;
+       }
+
+       if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+           prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+               opinfo_put(prev_opinfo);
+               goto op_break_not_needed;
+       }
+
+       list_add(&work->interim_entry, &prev_opinfo->interim_list);
+       err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
+       opinfo_put(prev_opinfo);
+       if (err == -ENOENT)
+               goto set_lev;
+       /* Check all oplock was freed by close */
+       else if (err < 0)
+               goto err_out;
+
+op_break_not_needed:
+       if (share_ret < 0) {
+               err = share_ret;
+               goto err_out;
+       }
+
+       if (req_op_level != SMB2_OPLOCK_LEVEL_NONE)
+               req_op_level = SMB2_OPLOCK_LEVEL_II;
+
+       /* grant fixed oplock on stacked locking between lease and oplock */
+       if (prev_op_has_lease && !lctx)
+               if (prev_op_state & SMB2_LEASE_HANDLE_CACHING_LE)
+                       req_op_level = SMB2_OPLOCK_LEVEL_NONE;
+
+       if (!prev_op_has_lease && lctx) {
+               req_op_level = SMB2_OPLOCK_LEVEL_II;
+               lctx->req_state = SMB2_LEASE_READ_CACHING_LE;
+       }
+
+set_lev:
+       set_oplock_level(opinfo, req_op_level, lctx);
+
+out:
+       rcu_assign_pointer(fp->f_opinfo, opinfo);
+       opinfo->o_fp = fp;
+
+       opinfo_count_inc(fp);
+       opinfo_add(opinfo);
+       if (opinfo->is_lease) {
+               err = add_lease_global_list(opinfo);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+err_out:
+       free_opinfo(opinfo);
+       return err;
+}
+
+/**
+ * smb_break_all_write_oplock() - break batch/exclusive oplock to level2
+ * @work:      smb work
+ * @fp:                ksmbd file pointer
+ * @is_trunc:  truncate on open
+ */
+static void smb_break_all_write_oplock(struct ksmbd_work *work,
+                                      struct ksmbd_file *fp, int is_trunc)
+{
+       struct oplock_info *brk_opinfo;
+
+       brk_opinfo = opinfo_get_list(fp->f_ci);
+       if (!brk_opinfo)
+               return;
+       if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+           brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+               opinfo_put(brk_opinfo);
+               return;
+       }
+
+       brk_opinfo->open_trunc = is_trunc;
+       list_add(&work->interim_entry, &brk_opinfo->interim_list);
+       oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
+       opinfo_put(brk_opinfo);
+}
+
+/**
+ * smb_break_all_levII_oplock() - send level2 oplock or read lease break command
+ *     from server to client
+ * @work:      smb work
+ * @fp:                ksmbd file pointer
+ * @is_trunc:  truncate on open
+ */
+void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+                               int is_trunc)
+{
+       struct oplock_info *op, *brk_op;
+       struct ksmbd_inode *ci;
+       struct ksmbd_conn *conn = work->sess->conn;
+
+       if (!test_share_config_flag(work->tcon->share_conf,
+                                   KSMBD_SHARE_FLAG_OPLOCKS))
+               return;
+
+       ci = fp->f_ci;
+       op = opinfo_get(fp);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
+               if (!atomic_inc_not_zero(&brk_op->refcount))
+                       continue;
+               rcu_read_unlock();
+               if (brk_op->is_lease && (brk_op->o_lease->state &
+                   (~(SMB2_LEASE_READ_CACHING_LE |
+                               SMB2_LEASE_HANDLE_CACHING_LE)))) {
+                       ksmbd_debug(OPLOCK, "unexpected lease state(0x%x)\n",
+                                   brk_op->o_lease->state);
+                       goto next;
+               } else if (brk_op->level !=
+                               SMB2_OPLOCK_LEVEL_II) {
+                       ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n",
+                                   brk_op->level);
+                       goto next;
+               }
+
+               /* Skip oplock being break to none */
+               if (brk_op->is_lease &&
+                   brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE &&
+                   atomic_read(&brk_op->breaking_cnt))
+                       goto next;
+
+               if (op && op->is_lease && brk_op->is_lease &&
+                   !memcmp(conn->ClientGUID, brk_op->conn->ClientGUID,
+                           SMB2_CLIENT_GUID_SIZE) &&
+                   !memcmp(op->o_lease->lease_key, brk_op->o_lease->lease_key,
+                           SMB2_LEASE_KEY_SIZE))
+                       goto next;
+               brk_op->open_trunc = is_trunc;
+               oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
+next:
+               opinfo_put(brk_op);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+
+       if (op)
+               opinfo_put(op);
+}
+
+/**
+ * smb_break_all_oplock() - break both batch/exclusive and level2 oplock
+ * @work:      smb work
+ * @fp:                ksmbd file pointer
+ */
+void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp)
+{
+       if (!test_share_config_flag(work->tcon->share_conf,
+                                   KSMBD_SHARE_FLAG_OPLOCKS))
+               return;
+
+       smb_break_all_write_oplock(work, fp, 1);
+       smb_break_all_levII_oplock(work, fp, 1);
+}
+
+/**
+ * smb2_map_lease_to_oplock() - map lease state to corresponding oplock type
+ * @lease_state:     lease type
+ *
+ * Return:      0 if no mapping, otherwise corresponding oplock type
+ */
+__u8 smb2_map_lease_to_oplock(__le32 lease_state)
+{
+       if (lease_state == (SMB2_LEASE_HANDLE_CACHING_LE |
+                           SMB2_LEASE_READ_CACHING_LE |
+                           SMB2_LEASE_WRITE_CACHING_LE)) {
+               return SMB2_OPLOCK_LEVEL_BATCH;
+       } else if (lease_state != SMB2_LEASE_WRITE_CACHING_LE &&
+                lease_state & SMB2_LEASE_WRITE_CACHING_LE) {
+               if (!(lease_state & SMB2_LEASE_HANDLE_CACHING_LE))
+                       return SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+       } else if (lease_state & SMB2_LEASE_READ_CACHING_LE) {
+               return SMB2_OPLOCK_LEVEL_II;
+       }
+       return 0;
+}
+
+/**
+ * create_lease_buf() - create lease context for open cmd response
+ * @rbuf:      buffer to create lease context response
+ * @lease:     buffer to stored parsed lease state information
+ */
+void create_lease_buf(u8 *rbuf, struct lease *lease)
+{
+       char *LeaseKey = (char *)&lease->lease_key;
+
+       if (lease->version == 2) {
+               struct create_lease_v2 *buf = (struct create_lease_v2 *)rbuf;
+               char *ParentLeaseKey = (char *)&lease->parent_lease_key;
+
+               memset(buf, 0, sizeof(struct create_lease_v2));
+               buf->lcontext.LeaseKeyLow = *((__le64 *)LeaseKey);
+               buf->lcontext.LeaseKeyHigh = *((__le64 *)(LeaseKey + 8));
+               buf->lcontext.LeaseFlags = lease->flags;
+               buf->lcontext.LeaseState = lease->state;
+               buf->lcontext.ParentLeaseKeyLow = *((__le64 *)ParentLeaseKey);
+               buf->lcontext.ParentLeaseKeyHigh = *((__le64 *)(ParentLeaseKey + 8));
+               buf->ccontext.DataOffset = cpu_to_le16(offsetof
+                               (struct create_lease_v2, lcontext));
+               buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
+               buf->ccontext.NameOffset = cpu_to_le16(offsetof
+                               (struct create_lease_v2, Name));
+               buf->ccontext.NameLength = cpu_to_le16(4);
+               buf->Name[0] = 'R';
+               buf->Name[1] = 'q';
+               buf->Name[2] = 'L';
+               buf->Name[3] = 's';
+       } else {
+               struct create_lease *buf = (struct create_lease *)rbuf;
+
+               memset(buf, 0, sizeof(struct create_lease));
+               buf->lcontext.LeaseKeyLow = *((__le64 *)LeaseKey);
+               buf->lcontext.LeaseKeyHigh = *((__le64 *)(LeaseKey + 8));
+               buf->lcontext.LeaseFlags = lease->flags;
+               buf->lcontext.LeaseState = lease->state;
+               buf->ccontext.DataOffset = cpu_to_le16(offsetof
+                               (struct create_lease, lcontext));
+               buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
+               buf->ccontext.NameOffset = cpu_to_le16(offsetof
+                               (struct create_lease, Name));
+               buf->ccontext.NameLength = cpu_to_le16(4);
+               buf->Name[0] = 'R';
+               buf->Name[1] = 'q';
+               buf->Name[2] = 'L';
+               buf->Name[3] = 's';
+       }
+}
+
+/**
+ * parse_lease_state() - parse lease context containted in file open request
+ * @open_req:  buffer containing smb2 file open(create) request
+ *
+ * Return:  oplock state, -ENOENT if create lease context not found
+ */
+struct lease_ctx_info *parse_lease_state(void *open_req)
+{
+       char *data_offset;
+       struct create_context *cc;
+       unsigned int next = 0;
+       char *name;
+       bool found = false;
+       struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+       struct lease_ctx_info *lreq = kzalloc(sizeof(struct lease_ctx_info),
+               GFP_KERNEL);
+       if (!lreq)
+               return NULL;
+
+       data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
+       cc = (struct create_context *)data_offset;
+       do {
+               cc = (struct create_context *)((char *)cc + next);
+               name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+               if (le16_to_cpu(cc->NameLength) != 4 ||
+                   strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
+                       next = le32_to_cpu(cc->Next);
+                       continue;
+               }
+               found = true;
+               break;
+       } while (next != 0);
+
+       if (found) {
+               if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
+                       struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+
+                       *((__le64 *)lreq->lease_key) = lc->lcontext.LeaseKeyLow;
+                       *((__le64 *)(lreq->lease_key + 8)) = lc->lcontext.LeaseKeyHigh;
+                       lreq->req_state = lc->lcontext.LeaseState;
+                       lreq->flags = lc->lcontext.LeaseFlags;
+                       lreq->duration = lc->lcontext.LeaseDuration;
+                       *((__le64 *)lreq->parent_lease_key) = lc->lcontext.ParentLeaseKeyLow;
+                       *((__le64 *)(lreq->parent_lease_key + 8)) = lc->lcontext.ParentLeaseKeyHigh;
+                       lreq->version = 2;
+               } else {
+                       struct create_lease *lc = (struct create_lease *)cc;
+
+                       *((__le64 *)lreq->lease_key) = lc->lcontext.LeaseKeyLow;
+                       *((__le64 *)(lreq->lease_key + 8)) = lc->lcontext.LeaseKeyHigh;
+                       lreq->req_state = lc->lcontext.LeaseState;
+                       lreq->flags = lc->lcontext.LeaseFlags;
+                       lreq->duration = lc->lcontext.LeaseDuration;
+                       lreq->version = 1;
+               }
+               return lreq;
+       }
+
+       kfree(lreq);
+       return NULL;
+}
+
+/**
+ * smb2_find_context_vals() - find a particular context info in open request
+ * @open_req:  buffer containing smb2 file open(create) request
+ * @tag:       context name to search for
+ *
+ * Return:     pointer to requested context, NULL if @str context not found
+ *             or error pointer if name length is invalid.
+ */
+struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
+{
+       char *data_offset;
+       struct create_context *cc;
+       unsigned int next = 0;
+       char *name;
+       struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+
+       data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
+       cc = (struct create_context *)data_offset;
+       do {
+               int val;
+
+               cc = (struct create_context *)((char *)cc + next);
+               name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+               val = le16_to_cpu(cc->NameLength);
+               if (val < 4)
+                       return ERR_PTR(-EINVAL);
+
+               if (memcmp(name, tag, val) == 0)
+                       return cc;
+               next = le32_to_cpu(cc->Next);
+       } while (next != 0);
+
+       return NULL;
+}
+
+/**
+ * create_durable_rsp_buf() - create durable handle context
+ * @cc:        buffer to create durable context response
+ */
+void create_durable_rsp_buf(char *cc)
+{
+       struct create_durable_rsp *buf;
+
+       buf = (struct create_durable_rsp *)cc;
+       memset(buf, 0, sizeof(struct create_durable_rsp));
+       buf->ccontext.DataOffset = cpu_to_le16(offsetof
+                       (struct create_durable_rsp, Data));
+       buf->ccontext.DataLength = cpu_to_le32(8);
+       buf->ccontext.NameOffset = cpu_to_le16(offsetof
+                       (struct create_durable_rsp, Name));
+       buf->ccontext.NameLength = cpu_to_le16(4);
+       /* SMB2_CREATE_DURABLE_HANDLE_RESPONSE is "DHnQ" */
+       buf->Name[0] = 'D';
+       buf->Name[1] = 'H';
+       buf->Name[2] = 'n';
+       buf->Name[3] = 'Q';
+}
+
+/**
+ * create_durable_v2_rsp_buf() - create durable handle v2 context
+ * @cc:        buffer to create durable context response
+ * @fp: ksmbd file pointer
+ */
+void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp)
+{
+       struct create_durable_v2_rsp *buf;
+
+       buf = (struct create_durable_v2_rsp *)cc;
+       memset(buf, 0, sizeof(struct create_durable_rsp));
+       buf->ccontext.DataOffset = cpu_to_le16(offsetof
+                       (struct create_durable_rsp, Data));
+       buf->ccontext.DataLength = cpu_to_le32(8);
+       buf->ccontext.NameOffset = cpu_to_le16(offsetof
+                       (struct create_durable_rsp, Name));
+       buf->ccontext.NameLength = cpu_to_le16(4);
+       /* SMB2_CREATE_DURABLE_HANDLE_RESPONSE_V2 is "DH2Q" */
+       buf->Name[0] = 'D';
+       buf->Name[1] = 'H';
+       buf->Name[2] = '2';
+       buf->Name[3] = 'Q';
+
+       buf->Timeout = cpu_to_le32(fp->durable_timeout);
+}
+
+/**
+ * create_mxac_rsp_buf() - create query maximal access context
+ * @cc:                        buffer to create maximal access context response
+ * @maximal_access:    maximal access
+ */
+void create_mxac_rsp_buf(char *cc, int maximal_access)
+{
+       struct create_mxac_rsp *buf;
+
+       buf = (struct create_mxac_rsp *)cc;
+       memset(buf, 0, sizeof(struct create_mxac_rsp));
+       buf->ccontext.DataOffset = cpu_to_le16(offsetof
+                       (struct create_mxac_rsp, QueryStatus));
+       buf->ccontext.DataLength = cpu_to_le32(8);
+       buf->ccontext.NameOffset = cpu_to_le16(offsetof
+                       (struct create_mxac_rsp, Name));
+       buf->ccontext.NameLength = cpu_to_le16(4);
+       /* SMB2_CREATE_QUERY_MAXIMAL_ACCESS_RESPONSE is "MxAc" */
+       buf->Name[0] = 'M';
+       buf->Name[1] = 'x';
+       buf->Name[2] = 'A';
+       buf->Name[3] = 'c';
+
+       buf->QueryStatus = STATUS_SUCCESS;
+       buf->MaximalAccess = cpu_to_le32(maximal_access);
+}
+
+void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id)
+{
+       struct create_disk_id_rsp *buf;
+
+       buf = (struct create_disk_id_rsp *)cc;
+       memset(buf, 0, sizeof(struct create_disk_id_rsp));
+       buf->ccontext.DataOffset = cpu_to_le16(offsetof
+                       (struct create_disk_id_rsp, DiskFileId));
+       buf->ccontext.DataLength = cpu_to_le32(32);
+       buf->ccontext.NameOffset = cpu_to_le16(offsetof
+                       (struct create_mxac_rsp, Name));
+       buf->ccontext.NameLength = cpu_to_le16(4);
+       /* SMB2_CREATE_QUERY_ON_DISK_ID_RESPONSE is "QFid" */
+       buf->Name[0] = 'Q';
+       buf->Name[1] = 'F';
+       buf->Name[2] = 'i';
+       buf->Name[3] = 'd';
+
+       buf->DiskFileId = cpu_to_le64(file_id);
+       buf->VolumeId = cpu_to_le64(vol_id);
+}
+
+/**
+ * create_posix_rsp_buf() - create posix extension context
+ * @cc:        buffer to create posix on posix response
+ * @fp: ksmbd file pointer
+ */
+void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp)
+{
+       struct create_posix_rsp *buf;
+       struct inode *inode = file_inode(fp->filp);
+       struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+
+       buf = (struct create_posix_rsp *)cc;
+       memset(buf, 0, sizeof(struct create_posix_rsp));
+       buf->ccontext.DataOffset = cpu_to_le16(offsetof
+                       (struct create_posix_rsp, nlink));
+       buf->ccontext.DataLength = cpu_to_le32(52);
+       buf->ccontext.NameOffset = cpu_to_le16(offsetof
+                       (struct create_posix_rsp, Name));
+       buf->ccontext.NameLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
+       /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
+       buf->Name[0] = 0x93;
+       buf->Name[1] = 0xAD;
+       buf->Name[2] = 0x25;
+       buf->Name[3] = 0x50;
+       buf->Name[4] = 0x9C;
+       buf->Name[5] = 0xB4;
+       buf->Name[6] = 0x11;
+       buf->Name[7] = 0xE7;
+       buf->Name[8] = 0xB4;
+       buf->Name[9] = 0x23;
+       buf->Name[10] = 0x83;
+       buf->Name[11] = 0xDE;
+       buf->Name[12] = 0x96;
+       buf->Name[13] = 0x8B;
+       buf->Name[14] = 0xCD;
+       buf->Name[15] = 0x7C;
+
+       buf->nlink = cpu_to_le32(inode->i_nlink);
+       buf->reparse_tag = cpu_to_le32(fp->volatile_id);
+       buf->mode = cpu_to_le32(inode->i_mode);
+       id_to_sid(from_kuid(user_ns, inode->i_uid),
+                 SIDNFS_USER, (struct smb_sid *)&buf->SidBuffer[0]);
+       id_to_sid(from_kgid(user_ns, inode->i_gid),
+                 SIDNFS_GROUP, (struct smb_sid *)&buf->SidBuffer[20]);
+}
+
+/*
+ * Find lease object(opinfo) for given lease key/fid from lease
+ * break/file close path.
+ */
+/**
+ * lookup_lease_in_table() - find a matching lease info object
+ * @conn:      connection instance
+ * @lease_key: lease key to be searched for
+ *
+ * Return:      opinfo if found matching opinfo, otherwise NULL
+ */
+struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+                                         char *lease_key)
+{
+       struct oplock_info *opinfo = NULL, *ret_op = NULL;
+       struct lease_table *lt;
+       int ret;
+
+       read_lock(&lease_list_lock);
+       list_for_each_entry(lt, &lease_table_list, l_entry) {
+               if (!memcmp(lt->client_guid, conn->ClientGUID,
+                           SMB2_CLIENT_GUID_SIZE))
+                       goto found;
+       }
+
+       read_unlock(&lease_list_lock);
+       return NULL;
+
+found:
+       rcu_read_lock();
+       list_for_each_entry_rcu(opinfo, &lt->lease_list, lease_entry) {
+               if (!atomic_inc_not_zero(&opinfo->refcount))
+                       continue;
+               rcu_read_unlock();
+               if (!opinfo->op_state || opinfo->op_state == OPLOCK_CLOSING)
+                       goto op_next;
+               if (!(opinfo->o_lease->state &
+                     (SMB2_LEASE_HANDLE_CACHING_LE |
+                      SMB2_LEASE_WRITE_CACHING_LE)))
+                       goto op_next;
+               ret = compare_guid_key(opinfo, conn->ClientGUID,
+                                      lease_key);
+               if (ret) {
+                       ksmbd_debug(OPLOCK, "found opinfo\n");
+                       ret_op = opinfo;
+                       goto out;
+               }
+op_next:
+               opinfo_put(opinfo);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+
+out:
+       read_unlock(&lease_list_lock);
+       return ret_op;
+}
+
+int smb2_check_durable_oplock(struct ksmbd_file *fp,
+                             struct lease_ctx_info *lctx, char *name)
+{
+       struct oplock_info *opinfo = opinfo_get(fp);
+       int ret = 0;
+
+       if (opinfo && opinfo->is_lease) {
+               if (!lctx) {
+                       pr_err("open does not include lease\n");
+                       ret = -EBADF;
+                       goto out;
+               }
+               if (memcmp(opinfo->o_lease->lease_key, lctx->lease_key,
+                          SMB2_LEASE_KEY_SIZE)) {
+                       pr_err("invalid lease key\n");
+                       ret = -EBADF;
+                       goto out;
+               }
+               if (name && strcmp(fp->filename, name)) {
+                       pr_err("invalid name reconnect %s\n", name);
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+out:
+       if (opinfo)
+               opinfo_put(opinfo);
+       return ret;
+}
diff --git a/fs/ksmbd/oplock.h b/fs/ksmbd/oplock.h
new file mode 100644 (file)
index 0000000..119b804
--- /dev/null
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_OPLOCK_H
+#define __KSMBD_OPLOCK_H
+
+#include "smb_common.h"
+
+#define OPLOCK_WAIT_TIME       (35 * HZ)
+
+/* SMB2 Oplock levels */
+#define SMB2_OPLOCK_LEVEL_NONE          0x00
+#define SMB2_OPLOCK_LEVEL_II            0x01
+#define SMB2_OPLOCK_LEVEL_EXCLUSIVE     0x08
+#define SMB2_OPLOCK_LEVEL_BATCH         0x09
+#define SMB2_OPLOCK_LEVEL_LEASE         0xFF
+
+/* Oplock states */
+#define OPLOCK_STATE_NONE      0x00
+#define OPLOCK_ACK_WAIT                0x01
+#define OPLOCK_CLOSING         0x02
+
+#define OPLOCK_WRITE_TO_READ           0x01
+#define OPLOCK_READ_HANDLE_TO_READ     0x02
+#define OPLOCK_WRITE_TO_NONE           0x04
+#define OPLOCK_READ_TO_NONE            0x08
+
+#define SMB2_LEASE_KEY_SIZE            16
+
+struct lease_ctx_info {
+       __u8                    lease_key[SMB2_LEASE_KEY_SIZE];
+       __le32                  req_state;
+       __le32                  flags;
+       __le64                  duration;
+       __u8                    parent_lease_key[SMB2_LEASE_KEY_SIZE];
+       int                     version;
+};
+
+struct lease_table {
+       char                    client_guid[SMB2_CLIENT_GUID_SIZE];
+       struct list_head        lease_list;
+       struct list_head        l_entry;
+       spinlock_t              lb_lock;
+};
+
+struct lease {
+       __u8                    lease_key[SMB2_LEASE_KEY_SIZE];
+       __le32                  state;
+       __le32                  new_state;
+       __le32                  flags;
+       __le64                  duration;
+       __u8                    parent_lease_key[SMB2_LEASE_KEY_SIZE];
+       int                     version;
+       unsigned short          epoch;
+       struct lease_table      *l_lb;
+};
+
+struct oplock_info {
+       struct ksmbd_conn       *conn;
+       struct ksmbd_session    *sess;
+       struct ksmbd_work       *work;
+       struct ksmbd_file       *o_fp;
+       int                     level;
+       int                     op_state;
+       unsigned long           pending_break;
+       u64                     fid;
+       atomic_t                breaking_cnt;
+       atomic_t                refcount;
+       __u16                   Tid;
+       bool                    is_lease;
+       bool                    open_trunc;     /* truncate on open */
+       struct lease            *o_lease;
+       struct list_head        interim_list;
+       struct list_head        op_entry;
+       struct list_head        lease_entry;
+       wait_queue_head_t oplock_q; /* Other server threads */
+       wait_queue_head_t oplock_brk; /* oplock breaking wait */
+       struct rcu_head         rcu_head;
+};
+
+struct lease_break_info {
+       __le32                  curr_state;
+       __le32                  new_state;
+       __le16                  epoch;
+       char                    lease_key[SMB2_LEASE_KEY_SIZE];
+};
+
+struct oplock_break_info {
+       int level;
+       int open_trunc;
+       int fid;
+};
+
+int smb_grant_oplock(struct ksmbd_work *work, int req_op_level,
+                    u64 pid, struct ksmbd_file *fp, __u16 tid,
+                    struct lease_ctx_info *lctx, int share_ret);
+void smb_break_all_levII_oplock(struct ksmbd_work *work,
+                               struct ksmbd_file *fp, int is_trunc);
+int opinfo_write_to_read(struct oplock_info *opinfo);
+int opinfo_read_handle_to_read(struct oplock_info *opinfo);
+int opinfo_write_to_none(struct oplock_info *opinfo);
+int opinfo_read_to_none(struct oplock_info *opinfo);
+void close_id_del_oplock(struct ksmbd_file *fp);
+void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp);
+struct oplock_info *opinfo_get(struct ksmbd_file *fp);
+void opinfo_put(struct oplock_info *opinfo);
+
+/* Lease related functions */
+void create_lease_buf(u8 *rbuf, struct lease *lease);
+struct lease_ctx_info *parse_lease_state(void *open_req);
+__u8 smb2_map_lease_to_oplock(__le32 lease_state);
+int lease_read_to_write(struct oplock_info *opinfo);
+
+/* Durable related functions */
+void create_durable_rsp_buf(char *cc);
+void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp);
+void create_mxac_rsp_buf(char *cc, int maximal_access);
+void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id);
+void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp);
+struct create_context *smb2_find_context_vals(void *open_req, const char *str);
+struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+                                         char *lease_key);
+int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
+                       struct lease_ctx_info *lctx);
+void destroy_lease_table(struct ksmbd_conn *conn);
+int smb2_check_durable_oplock(struct ksmbd_file *fp,
+                             struct lease_ctx_info *lctx, char *name);
+#endif /* __KSMBD_OPLOCK_H */
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
new file mode 100644 (file)
index 0000000..e6a9f6a
--- /dev/null
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include "glob.h"
+#include "oplock.h"
+#include "misc.h"
+#include <linux/sched/signal.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "server.h"
+#include "smb_common.h"
+#include "smbstatus.h"
+#include "connection.h"
+#include "transport_ipc.h"
+#include "mgmt/user_session.h"
+#include "crypto_ctx.h"
+#include "auth.h"
+
+int ksmbd_debug_types;
+
+struct ksmbd_server_config server_conf;
+
+enum SERVER_CTRL_TYPE {
+       SERVER_CTRL_TYPE_INIT,
+       SERVER_CTRL_TYPE_RESET,
+};
+
+struct server_ctrl_struct {
+       int                     type;
+       struct work_struct      ctrl_work;
+};
+
+static DEFINE_MUTEX(ctrl_lock);
+
+static int ___server_conf_set(int idx, char *val)
+{
+       if (idx >= ARRAY_SIZE(server_conf.conf))
+               return -EINVAL;
+
+       if (!val || val[0] == 0x00)
+               return -EINVAL;
+
+       kfree(server_conf.conf[idx]);
+       server_conf.conf[idx] = kstrdup(val, GFP_KERNEL);
+       if (!server_conf.conf[idx])
+               return -ENOMEM;
+       return 0;
+}
+
+int ksmbd_set_netbios_name(char *v)
+{
+       return ___server_conf_set(SERVER_CONF_NETBIOS_NAME, v);
+}
+
+int ksmbd_set_server_string(char *v)
+{
+       return ___server_conf_set(SERVER_CONF_SERVER_STRING, v);
+}
+
+int ksmbd_set_work_group(char *v)
+{
+       return ___server_conf_set(SERVER_CONF_WORK_GROUP, v);
+}
+
+char *ksmbd_netbios_name(void)
+{
+       return server_conf.conf[SERVER_CONF_NETBIOS_NAME];
+}
+
+char *ksmbd_server_string(void)
+{
+       return server_conf.conf[SERVER_CONF_SERVER_STRING];
+}
+
+char *ksmbd_work_group(void)
+{
+       return server_conf.conf[SERVER_CONF_WORK_GROUP];
+}
+
+/**
+ * check_conn_state() - check state of server thread connection
+ * @work:     smb work containing server thread information
+ *
+ * Return:     0 on valid connection, otherwise 1 to reconnect
+ */
+static inline int check_conn_state(struct ksmbd_work *work)
+{
+       struct smb_hdr *rsp_hdr;
+
+       if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) {
+               rsp_hdr = work->response_buf;
+               rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
+               return 1;
+       }
+       return 0;
+}
+
+#define SERVER_HANDLER_CONTINUE                0
+#define SERVER_HANDLER_ABORT           1
+
+static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn,
+                            u16 *cmd)
+{
+       struct smb_version_cmds *cmds;
+       u16 command;
+       int ret;
+
+       if (check_conn_state(work))
+               return SERVER_HANDLER_CONTINUE;
+
+       if (ksmbd_verify_smb_message(work))
+               return SERVER_HANDLER_ABORT;
+
+       command = conn->ops->get_cmd_val(work);
+       *cmd = command;
+
+andx_again:
+       if (command >= conn->max_cmds) {
+               conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+               return SERVER_HANDLER_CONTINUE;
+       }
+
+       cmds = &conn->cmds[command];
+       if (!cmds->proc) {
+               ksmbd_debug(SMB, "*** not implemented yet cmd = %x\n", command);
+               conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED);
+               return SERVER_HANDLER_CONTINUE;
+       }
+
+       if (work->sess && conn->ops->is_sign_req(work, command)) {
+               ret = conn->ops->check_sign_req(work);
+               if (!ret) {
+                       conn->ops->set_rsp_status(work, STATUS_ACCESS_DENIED);
+                       return SERVER_HANDLER_CONTINUE;
+               }
+       }
+
+       ret = cmds->proc(work);
+
+       if (ret < 0)
+               ksmbd_debug(CONN, "Failed to process %u [%d]\n", command, ret);
+       /* AndX commands - chained request can return positive values */
+       else if (ret > 0) {
+               command = ret;
+               *cmd = command;
+               goto andx_again;
+       }
+
+       if (work->send_no_response)
+               return SERVER_HANDLER_ABORT;
+       return SERVER_HANDLER_CONTINUE;
+}
+
+static void __handle_ksmbd_work(struct ksmbd_work *work,
+                               struct ksmbd_conn *conn)
+{
+       u16 command = 0;
+       int rc;
+
+       if (conn->ops->allocate_rsp_buf(work))
+               return;
+
+       if (conn->ops->is_transform_hdr &&
+           conn->ops->is_transform_hdr(work->request_buf)) {
+               rc = conn->ops->decrypt_req(work);
+               if (rc < 0) {
+                       conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+                       goto send;
+               }
+
+               work->encrypted = true;
+       }
+
+       rc = conn->ops->init_rsp_hdr(work);
+       if (rc) {
+               /* either uid or tid is not correct */
+               conn->ops->set_rsp_status(work, STATUS_INVALID_HANDLE);
+               goto send;
+       }
+
+       if (conn->ops->check_user_session) {
+               rc = conn->ops->check_user_session(work);
+               if (rc < 0) {
+                       command = conn->ops->get_cmd_val(work);
+                       conn->ops->set_rsp_status(work,
+                                       STATUS_USER_SESSION_DELETED);
+                       goto send;
+               } else if (rc > 0) {
+                       rc = conn->ops->get_ksmbd_tcon(work);
+                       if (rc < 0) {
+                               conn->ops->set_rsp_status(work,
+                                       STATUS_NETWORK_NAME_DELETED);
+                               goto send;
+                       }
+               }
+       }
+
+       do {
+               rc = __process_request(work, conn, &command);
+               if (rc == SERVER_HANDLER_ABORT)
+                       break;
+
+               /*
+                * Call smb2_set_rsp_credits() function to set number of credits
+                * granted in hdr of smb2 response.
+                */
+               if (conn->ops->set_rsp_credits) {
+                       spin_lock(&conn->credits_lock);
+                       rc = conn->ops->set_rsp_credits(work);
+                       spin_unlock(&conn->credits_lock);
+                       if (rc < 0) {
+                               conn->ops->set_rsp_status(work,
+                                       STATUS_INVALID_PARAMETER);
+                               goto send;
+                       }
+               }
+
+               if (work->sess &&
+                   (work->sess->sign || smb3_11_final_sess_setup_resp(work) ||
+                    conn->ops->is_sign_req(work, command)))
+                       conn->ops->set_sign_rsp(work);
+       } while (is_chained_smb2_message(work));
+
+       if (work->send_no_response)
+               return;
+
+send:
+       smb3_preauth_hash_rsp(work);
+       if (work->sess && work->sess->enc && work->encrypted &&
+           conn->ops->encrypt_resp) {
+               rc = conn->ops->encrypt_resp(work);
+               if (rc < 0) {
+                       conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+                       goto send;
+               }
+       }
+
+       ksmbd_conn_write(work);
+}
+
+/**
+ * handle_ksmbd_work() - process pending smb work requests
+ * @wk:        smb work containing request command buffer
+ *
+ * called by kworker threads to processing remaining smb work requests
+ */
+static void handle_ksmbd_work(struct work_struct *wk)
+{
+       struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+       struct ksmbd_conn *conn = work->conn;
+
+       atomic64_inc(&conn->stats.request_served);
+
+       __handle_ksmbd_work(work, conn);
+
+       ksmbd_conn_try_dequeue_request(work);
+       ksmbd_free_work_struct(work);
+       atomic_dec(&conn->r_count);
+}
+
+/**
+ * queue_ksmbd_work() - queue a smb request to worker thread queue
+ *             for proccessing smb command and sending response
+ * @conn:      connection instance
+ *
+ * read remaining data from socket create and submit work.
+ */
+static int queue_ksmbd_work(struct ksmbd_conn *conn)
+{
+       struct ksmbd_work *work;
+
+       work = ksmbd_alloc_work_struct();
+       if (!work) {
+               pr_err("allocation for work failed\n");
+               return -ENOMEM;
+       }
+
+       work->conn = conn;
+       work->request_buf = conn->request_buf;
+       conn->request_buf = NULL;
+
+       if (ksmbd_init_smb_server(work)) {
+               ksmbd_free_work_struct(work);
+               return -EINVAL;
+       }
+
+       ksmbd_conn_enqueue_request(work);
+       atomic_inc(&conn->r_count);
+       /* update activity on connection */
+       conn->last_active = jiffies;
+       INIT_WORK(&work->work, handle_ksmbd_work);
+       ksmbd_queue_work(work);
+       return 0;
+}
+
+static int ksmbd_server_process_request(struct ksmbd_conn *conn)
+{
+       return queue_ksmbd_work(conn);
+}
+
+static int ksmbd_server_terminate_conn(struct ksmbd_conn *conn)
+{
+       ksmbd_sessions_deregister(conn);
+       destroy_lease_table(conn);
+       return 0;
+}
+
+static void ksmbd_server_tcp_callbacks_init(void)
+{
+       struct ksmbd_conn_ops ops;
+
+       ops.process_fn = ksmbd_server_process_request;
+       ops.terminate_fn = ksmbd_server_terminate_conn;
+
+       ksmbd_conn_init_server_callbacks(&ops);
+}
+
+static void server_conf_free(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(server_conf.conf); i++) {
+               kfree(server_conf.conf[i]);
+               server_conf.conf[i] = NULL;
+       }
+}
+
+static int server_conf_init(void)
+{
+       WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP);
+       server_conf.enforced_signing = 0;
+       server_conf.min_protocol = ksmbd_min_protocol();
+       server_conf.max_protocol = ksmbd_max_protocol();
+       server_conf.auth_mechs = KSMBD_AUTH_NTLMSSP;
+#ifdef CONFIG_SMB_SERVER_KERBEROS5
+       server_conf.auth_mechs |= KSMBD_AUTH_KRB5 |
+                               KSMBD_AUTH_MSKRB5;
+#endif
+       return 0;
+}
+
+static void server_ctrl_handle_init(struct server_ctrl_struct *ctrl)
+{
+       int ret;
+
+       ret = ksmbd_conn_transport_init();
+       if (ret) {
+               server_queue_ctrl_reset_work();
+               return;
+       }
+
+       WRITE_ONCE(server_conf.state, SERVER_STATE_RUNNING);
+}
+
+static void server_ctrl_handle_reset(struct server_ctrl_struct *ctrl)
+{
+       ksmbd_ipc_soft_reset();
+       ksmbd_conn_transport_destroy();
+       server_conf_free();
+       server_conf_init();
+       WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP);
+}
+
+static void server_ctrl_handle_work(struct work_struct *work)
+{
+       struct server_ctrl_struct *ctrl;
+
+       ctrl = container_of(work, struct server_ctrl_struct, ctrl_work);
+
+       mutex_lock(&ctrl_lock);
+       switch (ctrl->type) {
+       case SERVER_CTRL_TYPE_INIT:
+               server_ctrl_handle_init(ctrl);
+               break;
+       case SERVER_CTRL_TYPE_RESET:
+               server_ctrl_handle_reset(ctrl);
+               break;
+       default:
+               pr_err("Unknown server work type: %d\n", ctrl->type);
+       }
+       mutex_unlock(&ctrl_lock);
+       kfree(ctrl);
+       module_put(THIS_MODULE);
+}
+
+static int __queue_ctrl_work(int type)
+{
+       struct server_ctrl_struct *ctrl;
+
+       ctrl = kmalloc(sizeof(struct server_ctrl_struct), GFP_KERNEL);
+       if (!ctrl)
+               return -ENOMEM;
+
+       __module_get(THIS_MODULE);
+       ctrl->type = type;
+       INIT_WORK(&ctrl->ctrl_work, server_ctrl_handle_work);
+       queue_work(system_long_wq, &ctrl->ctrl_work);
+       return 0;
+}
+
+int server_queue_ctrl_init_work(void)
+{
+       return __queue_ctrl_work(SERVER_CTRL_TYPE_INIT);
+}
+
+int server_queue_ctrl_reset_work(void)
+{
+       return __queue_ctrl_work(SERVER_CTRL_TYPE_RESET);
+}
+
+static ssize_t stats_show(struct class *class, struct class_attribute *attr,
+                         char *buf)
+{
+       /*
+        * Inc this each time you change stats output format,
+        * so user space will know what to do.
+        */
+       static int stats_version = 2;
+       static const char * const state[] = {
+               "startup",
+               "running",
+               "reset",
+               "shutdown"
+       };
+
+       ssize_t sz = scnprintf(buf, PAGE_SIZE, "%d %s %d %lu\n", stats_version,
+                              state[server_conf.state], server_conf.tcp_port,
+                              server_conf.ipc_last_active / HZ);
+       return sz;
+}
+
+static ssize_t kill_server_store(struct class *class,
+                                struct class_attribute *attr, const char *buf,
+                                size_t len)
+{
+       if (!sysfs_streq(buf, "hard"))
+               return len;
+
+       pr_info("kill command received\n");
+       mutex_lock(&ctrl_lock);
+       WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING);
+       __module_get(THIS_MODULE);
+       server_ctrl_handle_reset(NULL);
+       module_put(THIS_MODULE);
+       mutex_unlock(&ctrl_lock);
+       return len;
+}
+
+static const char * const debug_type_strings[] = {"smb", "auth", "vfs",
+                                                 "oplock", "ipc", "conn",
+                                                 "rdma"};
+
+static ssize_t debug_show(struct class *class, struct class_attribute *attr,
+                         char *buf)
+{
+       ssize_t sz = 0;
+       int i, pos = 0;
+
+       for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) {
+               if ((ksmbd_debug_types >> i) & 1) {
+                       pos = scnprintf(buf + sz,
+                                       PAGE_SIZE - sz,
+                                       "[%s] ",
+                                       debug_type_strings[i]);
+               } else {
+                       pos = scnprintf(buf + sz,
+                                       PAGE_SIZE - sz,
+                                       "%s ",
+                                       debug_type_strings[i]);
+               }
+               sz += pos;
+       }
+       sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
+       return sz;
+}
+
+static ssize_t debug_store(struct class *class, struct class_attribute *attr,
+                          const char *buf, size_t len)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) {
+               if (sysfs_streq(buf, "all")) {
+                       if (ksmbd_debug_types == KSMBD_DEBUG_ALL)
+                               ksmbd_debug_types = 0;
+                       else
+                               ksmbd_debug_types = KSMBD_DEBUG_ALL;
+                       break;
+               }
+
+               if (sysfs_streq(buf, debug_type_strings[i])) {
+                       if (ksmbd_debug_types & (1 << i))
+                               ksmbd_debug_types &= ~(1 << i);
+                       else
+                               ksmbd_debug_types |= (1 << i);
+                       break;
+               }
+       }
+
+       return len;
+}
+
+static CLASS_ATTR_RO(stats);
+static CLASS_ATTR_WO(kill_server);
+static CLASS_ATTR_RW(debug);
+
+static struct attribute *ksmbd_control_class_attrs[] = {
+       &class_attr_stats.attr,
+       &class_attr_kill_server.attr,
+       &class_attr_debug.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(ksmbd_control_class);
+
+static struct class ksmbd_control_class = {
+       .name           = "ksmbd-control",
+       .owner          = THIS_MODULE,
+       .class_groups   = ksmbd_control_class_groups,
+};
+
+static int ksmbd_server_shutdown(void)
+{
+       WRITE_ONCE(server_conf.state, SERVER_STATE_SHUTTING_DOWN);
+
+       class_unregister(&ksmbd_control_class);
+       ksmbd_workqueue_destroy();
+       ksmbd_ipc_release();
+       ksmbd_conn_transport_destroy();
+       ksmbd_crypto_destroy();
+       ksmbd_free_global_file_table();
+       destroy_lease_table(NULL);
+       ksmbd_work_pool_destroy();
+       ksmbd_exit_file_cache();
+       server_conf_free();
+       return 0;
+}
+
+static int __init ksmbd_server_init(void)
+{
+       int ret;
+
+       ret = class_register(&ksmbd_control_class);
+       if (ret) {
+               pr_err("Unable to register ksmbd-control class\n");
+               return ret;
+       }
+
+       ksmbd_server_tcp_callbacks_init();
+
+       ret = server_conf_init();
+       if (ret)
+               goto err_unregister;
+
+       ret = ksmbd_work_pool_init();
+       if (ret)
+               goto err_unregister;
+
+       ret = ksmbd_init_file_cache();
+       if (ret)
+               goto err_destroy_work_pools;
+
+       ret = ksmbd_ipc_init();
+       if (ret)
+               goto err_exit_file_cache;
+
+       ret = ksmbd_init_global_file_table();
+       if (ret)
+               goto err_ipc_release;
+
+       ret = ksmbd_inode_hash_init();
+       if (ret)
+               goto err_destroy_file_table;
+
+       ret = ksmbd_crypto_create();
+       if (ret)
+               goto err_release_inode_hash;
+
+       ret = ksmbd_workqueue_init();
+       if (ret)
+               goto err_crypto_destroy;
+       return 0;
+
+err_crypto_destroy:
+       ksmbd_crypto_destroy();
+err_release_inode_hash:
+       ksmbd_release_inode_hash();
+err_destroy_file_table:
+       ksmbd_free_global_file_table();
+err_ipc_release:
+       ksmbd_ipc_release();
+err_exit_file_cache:
+       ksmbd_exit_file_cache();
+err_destroy_work_pools:
+       ksmbd_work_pool_destroy();
+err_unregister:
+       class_unregister(&ksmbd_control_class);
+
+       return ret;
+}
+
+/**
+ * ksmbd_server_exit() - shutdown forker thread and free memory at module exit
+ */
+static void __exit ksmbd_server_exit(void)
+{
+       ksmbd_server_shutdown();
+       ksmbd_release_inode_hash();
+}
+
+MODULE_AUTHOR("Namjae Jeon <linkinjeon@kernel.org>");
+MODULE_VERSION(KSMBD_VERSION);
+MODULE_DESCRIPTION("Linux kernel CIFS/SMB SERVER");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: ecb");
+MODULE_SOFTDEP("pre: hmac");
+MODULE_SOFTDEP("pre: md4");
+MODULE_SOFTDEP("pre: md5");
+MODULE_SOFTDEP("pre: nls");
+MODULE_SOFTDEP("pre: aes");
+MODULE_SOFTDEP("pre: cmac");
+MODULE_SOFTDEP("pre: sha256");
+MODULE_SOFTDEP("pre: sha512");
+MODULE_SOFTDEP("pre: aead2");
+MODULE_SOFTDEP("pre: ccm");
+MODULE_SOFTDEP("pre: gcm");
+module_init(ksmbd_server_init)
+module_exit(ksmbd_server_exit)
diff --git a/fs/ksmbd/server.h b/fs/ksmbd/server.h
new file mode 100644 (file)
index 0000000..ac9d932
--- /dev/null
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __SERVER_H__
+#define __SERVER_H__
+
+#include "smbacl.h"
+
+/*
+ * Server state type
+ */
+enum {
+       SERVER_STATE_STARTING_UP,
+       SERVER_STATE_RUNNING,
+       SERVER_STATE_RESETTING,
+       SERVER_STATE_SHUTTING_DOWN,
+};
+
+/*
+ * Server global config string index
+ */
+enum {
+       SERVER_CONF_NETBIOS_NAME,
+       SERVER_CONF_SERVER_STRING,
+       SERVER_CONF_WORK_GROUP,
+};
+
+struct ksmbd_server_config {
+       unsigned int            flags;
+       unsigned int            state;
+       short                   signing;
+       short                   enforced_signing;
+       short                   min_protocol;
+       short                   max_protocol;
+       unsigned short          tcp_port;
+       unsigned short          ipc_timeout;
+       unsigned long           ipc_last_active;
+       unsigned long           deadtime;
+       unsigned int            share_fake_fscaps;
+       struct smb_sid          domain_sid;
+       unsigned int            auth_mechs;
+
+       char                    *conf[SERVER_CONF_WORK_GROUP + 1];
+};
+
+extern struct ksmbd_server_config server_conf;
+
+int ksmbd_set_netbios_name(char *v);
+int ksmbd_set_server_string(char *v);
+int ksmbd_set_work_group(char *v);
+
+char *ksmbd_netbios_name(void);
+char *ksmbd_server_string(void);
+char *ksmbd_work_group(void);
+
+static inline int ksmbd_server_running(void)
+{
+       return READ_ONCE(server_conf.state) == SERVER_STATE_RUNNING;
+}
+
+static inline int ksmbd_server_configurable(void)
+{
+       return READ_ONCE(server_conf.state) < SERVER_STATE_RESETTING;
+}
+
+int server_queue_ctrl_init_work(void);
+int server_queue_ctrl_reset_work(void);
+#endif /* __SERVER_H__ */
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
new file mode 100644 (file)
index 0000000..9aa46bb
--- /dev/null
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include "glob.h"
+#include "nterr.h"
+#include "smb2pdu.h"
+#include "smb_common.h"
+#include "smbstatus.h"
+#include "mgmt/user_session.h"
+#include "connection.h"
+
+static int check_smb2_hdr(struct smb2_hdr *hdr)
+{
+       /*
+        * Make sure that this really is an SMB, that it is a response.
+        */
+       if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+               return 1;
+       return 0;
+}
+
+/*
+ *  The following table defines the expected "StructureSize" of SMB2 requests
+ *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS requests.
+ *
+ *  Note that commands are defined in smb2pdu.h in le16 but the array below is
+ *  indexed by command in host byte order
+ */
+static const __le16 smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
+       /* SMB2_NEGOTIATE */ cpu_to_le16(36),
+       /* SMB2_SESSION_SETUP */ cpu_to_le16(25),
+       /* SMB2_LOGOFF */ cpu_to_le16(4),
+       /* SMB2_TREE_CONNECT */ cpu_to_le16(9),
+       /* SMB2_TREE_DISCONNECT */ cpu_to_le16(4),
+       /* SMB2_CREATE */ cpu_to_le16(57),
+       /* SMB2_CLOSE */ cpu_to_le16(24),
+       /* SMB2_FLUSH */ cpu_to_le16(24),
+       /* SMB2_READ */ cpu_to_le16(49),
+       /* SMB2_WRITE */ cpu_to_le16(49),
+       /* SMB2_LOCK */ cpu_to_le16(48),
+       /* SMB2_IOCTL */ cpu_to_le16(57),
+       /* SMB2_CANCEL */ cpu_to_le16(4),
+       /* SMB2_ECHO */ cpu_to_le16(4),
+       /* SMB2_QUERY_DIRECTORY */ cpu_to_le16(33),
+       /* SMB2_CHANGE_NOTIFY */ cpu_to_le16(32),
+       /* SMB2_QUERY_INFO */ cpu_to_le16(41),
+       /* SMB2_SET_INFO */ cpu_to_le16(33),
+       /* use 44 for lease break */
+       /* SMB2_OPLOCK_BREAK */ cpu_to_le16(36)
+};
+
+/*
+ * The size of the variable area depends on the offset and length fields
+ * located in different fields for various SMB2 requests. SMB2 requests
+ * with no variable length info, show an offset of zero for the offset field.
+ */
+static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
+       /* SMB2_NEGOTIATE */ true,
+       /* SMB2_SESSION_SETUP */ true,
+       /* SMB2_LOGOFF */ false,
+       /* SMB2_TREE_CONNECT */ true,
+       /* SMB2_TREE_DISCONNECT */ false,
+       /* SMB2_CREATE */ true,
+       /* SMB2_CLOSE */ false,
+       /* SMB2_FLUSH */ false,
+       /* SMB2_READ */ true,
+       /* SMB2_WRITE */ true,
+       /* SMB2_LOCK */ true,
+       /* SMB2_IOCTL */ true,
+       /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
+       /* SMB2_ECHO */ false,
+       /* SMB2_QUERY_DIRECTORY */ true,
+       /* SMB2_CHANGE_NOTIFY */ false,
+       /* SMB2_QUERY_INFO */ true,
+       /* SMB2_SET_INFO */ true,
+       /* SMB2_OPLOCK_BREAK */ false
+};
+
+/*
+ * Returns the pointer to the beginning of the data area. Length of the data
+ * area and the offset to it (from the beginning of the smb are also returned.
+ */
+static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
+{
+       *off = 0;
+       *len = 0;
+
+       /* error reqeusts do not have data area */
+       if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
+           (((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
+               return NULL;
+
+       /*
+        * Following commands have data areas so we have to get the location
+        * of the data buffer offset and data buffer length for the particular
+        * command.
+        */
+       switch (hdr->Command) {
+       case SMB2_SESSION_SETUP:
+               *off = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferOffset);
+               *len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength);
+               break;
+       case SMB2_TREE_CONNECT:
+               *off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset);
+               *len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength);
+               break;
+       case SMB2_CREATE:
+       {
+               if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
+                       *off = le32_to_cpu(((struct smb2_create_req *)
+                               hdr)->CreateContextsOffset);
+                       *len = le32_to_cpu(((struct smb2_create_req *)
+                               hdr)->CreateContextsLength);
+                       break;
+               }
+
+               *off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
+               *len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
+               break;
+       }
+       case SMB2_QUERY_INFO:
+               *off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset);
+               *len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength);
+               break;
+       case SMB2_SET_INFO:
+               *off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset);
+               *len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength);
+               break;
+       case SMB2_READ:
+               *off = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoOffset);
+               *len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength);
+               break;
+       case SMB2_WRITE:
+               if (((struct smb2_write_req *)hdr)->DataOffset) {
+                       *off = le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset);
+                       *len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
+                       break;
+               }
+
+               *off = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoOffset);
+               *len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength);
+               break;
+       case SMB2_QUERY_DIRECTORY:
+               *off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset);
+               *len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength);
+               break;
+       case SMB2_LOCK:
+       {
+               int lock_count;
+
+               /*
+                * smb2_lock request size is 48 included single
+                * smb2_lock_element structure size.
+                */
+               lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount) - 1;
+               if (lock_count > 0) {
+                       *off = __SMB2_HEADER_STRUCTURE_SIZE + 48;
+                       *len = sizeof(struct smb2_lock_element) * lock_count;
+               }
+               break;
+       }
+       case SMB2_IOCTL:
+               *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
+               *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
+
+               break;
+       default:
+               ksmbd_debug(SMB, "no length check for command\n");
+               break;
+       }
+
+       /*
+        * Invalid length or offset probably means data area is invalid, but
+        * we have little choice but to ignore the data area in this case.
+        */
+       if (*off > 4096) {
+               ksmbd_debug(SMB, "offset %d too large, data area ignored\n",
+                           *off);
+               *len = 0;
+               *off = 0;
+       } else if (*off < 0) {
+               ksmbd_debug(SMB,
+                           "negative offset %d to data invalid ignore data area\n",
+                           *off);
+               *off = 0;
+               *len = 0;
+       } else if (*len < 0) {
+               ksmbd_debug(SMB,
+                           "negative data length %d invalid, data area ignored\n",
+                           *len);
+               *len = 0;
+       } else if (*len > 128 * 1024) {
+               ksmbd_debug(SMB, "data area larger than 128K: %d\n", *len);
+               *len = 0;
+       }
+
+       /* return pointer to beginning of data area, ie offset from SMB start */
+       if ((*off != 0) && (*len != 0))
+               return (char *)hdr + *off;
+       else
+               return NULL;
+}
+
+/*
+ * Calculate the size of the SMB message based on the fixed header
+ * portion, the number of word parameters and the data portion of the message.
+ */
+static unsigned int smb2_calc_size(void *buf)
+{
+       struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
+       struct smb2_hdr *hdr = &pdu->hdr;
+       int offset; /* the offset from the beginning of SMB to data area */
+       int data_length; /* the length of the variable length data area */
+       /* Structure Size has already been checked to make sure it is 64 */
+       int len = le16_to_cpu(hdr->StructureSize);
+
+       /*
+        * StructureSize2, ie length of fixed parameter area has already
+        * been checked to make sure it is the correct length.
+        */
+       len += le16_to_cpu(pdu->StructureSize2);
+
+       if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
+               goto calc_size_exit;
+
+       smb2_get_data_area_len(&offset, &data_length, hdr);
+       ksmbd_debug(SMB, "SMB2 data length %d offset %d\n", data_length,
+                   offset);
+
+       if (data_length > 0) {
+               /*
+                * Check to make sure that data area begins after fixed area,
+                * Note that last byte of the fixed area is part of data area
+                * for some commands, typically those with odd StructureSize,
+                * so we must add one to the calculation.
+                */
+               if (offset + 1 < len)
+                       ksmbd_debug(SMB,
+                                   "data area offset %d overlaps SMB2 header %d\n",
+                                   offset + 1, len);
+               else
+                       len = offset + data_length;
+       }
+calc_size_exit:
+       ksmbd_debug(SMB, "SMB2 len %d\n", len);
+       return len;
+}
+
+static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
+{
+       return le32_to_cpu(h->InputBufferLength) +
+               le32_to_cpu(h->OutputBufferLength);
+}
+
+static inline int smb2_set_info_req_len(struct smb2_set_info_req *h)
+{
+       return le32_to_cpu(h->BufferLength);
+}
+
+static inline int smb2_read_req_len(struct smb2_read_req *h)
+{
+       return le32_to_cpu(h->Length);
+}
+
+static inline int smb2_write_req_len(struct smb2_write_req *h)
+{
+       return le32_to_cpu(h->Length);
+}
+
+static inline int smb2_query_dir_req_len(struct smb2_query_directory_req *h)
+{
+       return le32_to_cpu(h->OutputBufferLength);
+}
+
+static inline int smb2_ioctl_req_len(struct smb2_ioctl_req *h)
+{
+       return le32_to_cpu(h->InputCount) +
+               le32_to_cpu(h->OutputCount);
+}
+
+static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h)
+{
+       return le32_to_cpu(h->MaxInputResponse) +
+               le32_to_cpu(h->MaxOutputResponse);
+}
+
+static int smb2_validate_credit_charge(struct smb2_hdr *hdr)
+{
+       int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
+       int credit_charge = le16_to_cpu(hdr->CreditCharge);
+       void *__hdr = hdr;
+
+       switch (hdr->Command) {
+       case SMB2_QUERY_INFO:
+               req_len = smb2_query_info_req_len(__hdr);
+               break;
+       case SMB2_SET_INFO:
+               req_len = smb2_set_info_req_len(__hdr);
+               break;
+       case SMB2_READ:
+               req_len = smb2_read_req_len(__hdr);
+               break;
+       case SMB2_WRITE:
+               req_len = smb2_write_req_len(__hdr);
+               break;
+       case SMB2_QUERY_DIRECTORY:
+               req_len = smb2_query_dir_req_len(__hdr);
+               break;
+       case SMB2_IOCTL:
+               req_len = smb2_ioctl_req_len(__hdr);
+               expect_resp_len = smb2_ioctl_resp_len(__hdr);
+               break;
+       default:
+               return 0;
+       }
+
+       credit_charge = max(1, credit_charge);
+       max_len = max(req_len, expect_resp_len);
+       calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE);
+
+       if (credit_charge < calc_credit_num) {
+               pr_err("Insufficient credit charge, given: %d, needed: %d\n",
+                      credit_charge, calc_credit_num);
+               return 1;
+       }
+
+       return 0;
+}
+
+int ksmbd_smb2_check_message(struct ksmbd_work *work)
+{
+       struct smb2_pdu *pdu = work->request_buf;
+       struct smb2_hdr *hdr = &pdu->hdr;
+       int command;
+       __u32 clc_len;  /* calculated length */
+       __u32 len = get_rfc1002_len(pdu);
+
+       if (work->next_smb2_rcv_hdr_off) {
+               pdu = ksmbd_req_buf_next(work);
+               hdr = &pdu->hdr;
+       }
+
+       if (le32_to_cpu(hdr->NextCommand) > 0) {
+               len = le32_to_cpu(hdr->NextCommand);
+       } else if (work->next_smb2_rcv_hdr_off) {
+               len -= work->next_smb2_rcv_hdr_off;
+               len = round_up(len, 8);
+       }
+
+       if (check_smb2_hdr(hdr))
+               return 1;
+
+       if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+               ksmbd_debug(SMB, "Illegal structure size %u\n",
+                           le16_to_cpu(hdr->StructureSize));
+               return 1;
+       }
+
+       command = le16_to_cpu(hdr->Command);
+       if (command >= NUMBER_OF_SMB2_COMMANDS) {
+               ksmbd_debug(SMB, "Illegal SMB2 command %d\n", command);
+               return 1;
+       }
+
+       if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
+               if (command != SMB2_OPLOCK_BREAK_HE &&
+                   (hdr->Status == 0 || pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
+                       /* error packets have 9 byte structure size */
+                       ksmbd_debug(SMB,
+                                   "Illegal request size %u for command %d\n",
+                                   le16_to_cpu(pdu->StructureSize2), command);
+                       return 1;
+               } else if (command == SMB2_OPLOCK_BREAK_HE &&
+                          hdr->Status == 0 &&
+                          le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
+                          le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
+                       /* special case for SMB2.1 lease break message */
+                       ksmbd_debug(SMB,
+                                   "Illegal request size %d for oplock break\n",
+                                   le16_to_cpu(pdu->StructureSize2));
+                       return 1;
+               }
+       }
+
+       if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
+           smb2_validate_credit_charge(hdr)) {
+               work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+               return 1;
+       }
+
+       clc_len = smb2_calc_size(hdr);
+       if (len != clc_len) {
+               /* server can return one byte more due to implied bcc[0] */
+               if (clc_len == len + 1)
+                       return 0;
+
+               /*
+                * Some windows servers (win2016) will pad also the final
+                * PDU in a compound to 8 bytes.
+                */
+               if (ALIGN(clc_len, 8) == len)
+                       return 0;
+
+               /*
+                * windows client also pad up to 8 bytes when compounding.
+                * If pad is longer than eight bytes, log the server behavior
+                * (once), since may indicate a problem but allow it and
+                * continue since the frame is parseable.
+                */
+               if (clc_len < len) {
+                       ksmbd_debug(SMB,
+                                   "cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+                                   len, clc_len, command,
+                                   le64_to_cpu(hdr->MessageId));
+                       return 0;
+               }
+
+               if (command == SMB2_LOCK_HE && len == 88)
+                       return 0;
+
+               ksmbd_debug(SMB,
+                           "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
+                           len, clc_len, command,
+                           le64_to_cpu(hdr->MessageId));
+
+               return 1;
+       }
+
+       return 0;
+}
+
+int smb2_negotiate_request(struct ksmbd_work *work)
+{
+       return ksmbd_smb_negotiate_common(work, SMB2_NEGOTIATE_HE);
+}
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
new file mode 100644 (file)
index 0000000..1974738
--- /dev/null
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+#include "glob.h"
+#include "smb2pdu.h"
+
+#include "auth.h"
+#include "connection.h"
+#include "smb_common.h"
+#include "server.h"
+
+static struct smb_version_values smb21_server_values = {
+       .version_string = SMB21_VERSION_STRING,
+       .protocol_id = SMB21_PROT_ID,
+       .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+       .max_read_size = SMB21_DEFAULT_IOSIZE,
+       .max_write_size = SMB21_DEFAULT_IOSIZE,
+       .max_trans_size = SMB21_DEFAULT_IOSIZE,
+       .large_lock_type = 0,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
+       .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+       .header_size = sizeof(struct smb2_hdr),
+       .max_header_size = MAX_SMB2_HDR_SIZE,
+       .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+       .lock_cmd = SMB2_LOCK,
+       .cap_unix = 0,
+       .cap_nt_find = SMB2_NT_FIND,
+       .cap_large_files = SMB2_LARGE_FILES,
+       .create_lease_size = sizeof(struct create_lease),
+       .create_durable_size = sizeof(struct create_durable_rsp),
+       .create_mxac_size = sizeof(struct create_mxac_rsp),
+       .create_disk_id_size = sizeof(struct create_disk_id_rsp),
+       .create_posix_size = sizeof(struct create_posix_rsp),
+};
+
+static struct smb_version_values smb30_server_values = {
+       .version_string = SMB30_VERSION_STRING,
+       .protocol_id = SMB30_PROT_ID,
+       .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+       .max_read_size = SMB3_DEFAULT_IOSIZE,
+       .max_write_size = SMB3_DEFAULT_IOSIZE,
+       .max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+       .large_lock_type = 0,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
+       .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+       .header_size = sizeof(struct smb2_hdr),
+       .max_header_size = MAX_SMB2_HDR_SIZE,
+       .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+       .lock_cmd = SMB2_LOCK,
+       .cap_unix = 0,
+       .cap_nt_find = SMB2_NT_FIND,
+       .cap_large_files = SMB2_LARGE_FILES,
+       .create_lease_size = sizeof(struct create_lease_v2),
+       .create_durable_size = sizeof(struct create_durable_rsp),
+       .create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
+       .create_mxac_size = sizeof(struct create_mxac_rsp),
+       .create_disk_id_size = sizeof(struct create_disk_id_rsp),
+       .create_posix_size = sizeof(struct create_posix_rsp),
+};
+
+static struct smb_version_values smb302_server_values = {
+       .version_string = SMB302_VERSION_STRING,
+       .protocol_id = SMB302_PROT_ID,
+       .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+       .max_read_size = SMB3_DEFAULT_IOSIZE,
+       .max_write_size = SMB3_DEFAULT_IOSIZE,
+       .max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+       .large_lock_type = 0,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
+       .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+       .header_size = sizeof(struct smb2_hdr),
+       .max_header_size = MAX_SMB2_HDR_SIZE,
+       .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+       .lock_cmd = SMB2_LOCK,
+       .cap_unix = 0,
+       .cap_nt_find = SMB2_NT_FIND,
+       .cap_large_files = SMB2_LARGE_FILES,
+       .create_lease_size = sizeof(struct create_lease_v2),
+       .create_durable_size = sizeof(struct create_durable_rsp),
+       .create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
+       .create_mxac_size = sizeof(struct create_mxac_rsp),
+       .create_disk_id_size = sizeof(struct create_disk_id_rsp),
+       .create_posix_size = sizeof(struct create_posix_rsp),
+};
+
+static struct smb_version_values smb311_server_values = {
+       .version_string = SMB311_VERSION_STRING,
+       .protocol_id = SMB311_PROT_ID,
+       .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+       .max_read_size = SMB3_DEFAULT_IOSIZE,
+       .max_write_size = SMB3_DEFAULT_IOSIZE,
+       .max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+       .large_lock_type = 0,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
+       .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+       .header_size = sizeof(struct smb2_hdr),
+       .max_header_size = MAX_SMB2_HDR_SIZE,
+       .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+       .lock_cmd = SMB2_LOCK,
+       .cap_unix = 0,
+       .cap_nt_find = SMB2_NT_FIND,
+       .cap_large_files = SMB2_LARGE_FILES,
+       .create_lease_size = sizeof(struct create_lease_v2),
+       .create_durable_size = sizeof(struct create_durable_rsp),
+       .create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
+       .create_mxac_size = sizeof(struct create_mxac_rsp),
+       .create_disk_id_size = sizeof(struct create_disk_id_rsp),
+       .create_posix_size = sizeof(struct create_posix_rsp),
+};
+
+static struct smb_version_ops smb2_0_server_ops = {
+       .get_cmd_val            =       get_smb2_cmd_val,
+       .init_rsp_hdr           =       init_smb2_rsp_hdr,
+       .set_rsp_status         =       set_smb2_rsp_status,
+       .allocate_rsp_buf       =       smb2_allocate_rsp_buf,
+       .set_rsp_credits        =       smb2_set_rsp_credits,
+       .check_user_session     =       smb2_check_user_session,
+       .get_ksmbd_tcon         =       smb2_get_ksmbd_tcon,
+       .is_sign_req            =       smb2_is_sign_req,
+       .check_sign_req         =       smb2_check_sign_req,
+       .set_sign_rsp           =       smb2_set_sign_rsp
+};
+
+static struct smb_version_ops smb3_0_server_ops = {
+       .get_cmd_val            =       get_smb2_cmd_val,
+       .init_rsp_hdr           =       init_smb2_rsp_hdr,
+       .set_rsp_status         =       set_smb2_rsp_status,
+       .allocate_rsp_buf       =       smb2_allocate_rsp_buf,
+       .set_rsp_credits        =       smb2_set_rsp_credits,
+       .check_user_session     =       smb2_check_user_session,
+       .get_ksmbd_tcon         =       smb2_get_ksmbd_tcon,
+       .is_sign_req            =       smb2_is_sign_req,
+       .check_sign_req         =       smb3_check_sign_req,
+       .set_sign_rsp           =       smb3_set_sign_rsp,
+       .generate_signingkey    =       ksmbd_gen_smb30_signingkey,
+       .generate_encryptionkey =       ksmbd_gen_smb30_encryptionkey,
+       .is_transform_hdr       =       smb3_is_transform_hdr,
+       .decrypt_req            =       smb3_decrypt_req,
+       .encrypt_resp           =       smb3_encrypt_resp
+};
+
+static struct smb_version_ops smb3_11_server_ops = {
+       .get_cmd_val            =       get_smb2_cmd_val,
+       .init_rsp_hdr           =       init_smb2_rsp_hdr,
+       .set_rsp_status         =       set_smb2_rsp_status,
+       .allocate_rsp_buf       =       smb2_allocate_rsp_buf,
+       .set_rsp_credits        =       smb2_set_rsp_credits,
+       .check_user_session     =       smb2_check_user_session,
+       .get_ksmbd_tcon         =       smb2_get_ksmbd_tcon,
+       .is_sign_req            =       smb2_is_sign_req,
+       .check_sign_req         =       smb3_check_sign_req,
+       .set_sign_rsp           =       smb3_set_sign_rsp,
+       .generate_signingkey    =       ksmbd_gen_smb311_signingkey,
+       .generate_encryptionkey =       ksmbd_gen_smb311_encryptionkey,
+       .is_transform_hdr       =       smb3_is_transform_hdr,
+       .decrypt_req            =       smb3_decrypt_req,
+       .encrypt_resp           =       smb3_encrypt_resp
+};
+
+static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
+       [SMB2_NEGOTIATE_HE]     =       { .proc = smb2_negotiate_request, },
+       [SMB2_SESSION_SETUP_HE] =       { .proc = smb2_sess_setup, },
+       [SMB2_TREE_CONNECT_HE]  =       { .proc = smb2_tree_connect,},
+       [SMB2_TREE_DISCONNECT_HE]  =    { .proc = smb2_tree_disconnect,},
+       [SMB2_LOGOFF_HE]        =       { .proc = smb2_session_logoff,},
+       [SMB2_CREATE_HE]        =       { .proc = smb2_open},
+       [SMB2_QUERY_INFO_HE]    =       { .proc = smb2_query_info},
+       [SMB2_QUERY_DIRECTORY_HE] =     { .proc = smb2_query_dir},
+       [SMB2_CLOSE_HE]         =       { .proc = smb2_close},
+       [SMB2_ECHO_HE]          =       { .proc = smb2_echo},
+       [SMB2_SET_INFO_HE]      =       { .proc = smb2_set_info},
+       [SMB2_READ_HE]          =       { .proc = smb2_read},
+       [SMB2_WRITE_HE]         =       { .proc = smb2_write},
+       [SMB2_FLUSH_HE]         =       { .proc = smb2_flush},
+       [SMB2_CANCEL_HE]        =       { .proc = smb2_cancel},
+       [SMB2_LOCK_HE]          =       { .proc = smb2_lock},
+       [SMB2_IOCTL_HE]         =       { .proc = smb2_ioctl},
+       [SMB2_OPLOCK_BREAK_HE]  =       { .proc = smb2_oplock_break},
+       [SMB2_CHANGE_NOTIFY_HE] =       { .proc = smb2_notify},
+};
+
+int init_smb2_0_server(struct ksmbd_conn *conn)
+{
+       return -EOPNOTSUPP;
+}
+
+/**
+ * init_smb2_1_server() - initialize a smb server connection with smb2.1
+ *                     command dispatcher
+ * @conn:      connection instance
+ */
+void init_smb2_1_server(struct ksmbd_conn *conn)
+{
+       conn->vals = &smb21_server_values;
+       conn->ops = &smb2_0_server_ops;
+       conn->cmds = smb2_0_server_cmds;
+       conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+       conn->max_credits = SMB2_MAX_CREDITS;
+       conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+}
+
+/**
+ * init_smb3_0_server() - initialize a smb server connection with smb3.0
+ *                     command dispatcher
+ * @conn:      connection instance
+ */
+void init_smb3_0_server(struct ksmbd_conn *conn)
+{
+       conn->vals = &smb30_server_values;
+       conn->ops = &smb3_0_server_ops;
+       conn->cmds = smb2_0_server_cmds;
+       conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+       conn->max_credits = SMB2_MAX_CREDITS;
+       conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
+           conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+}
+
+/**
+ * init_smb3_02_server() - initialize a smb server connection with smb3.02
+ *                     command dispatcher
+ * @conn:      connection instance
+ */
+void init_smb3_02_server(struct ksmbd_conn *conn)
+{
+       conn->vals = &smb302_server_values;
+       conn->ops = &smb3_0_server_ops;
+       conn->cmds = smb2_0_server_cmds;
+       conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+       conn->max_credits = SMB2_MAX_CREDITS;
+       conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
+           conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+}
+
+/**
+ * init_smb3_11_server() - initialize a smb server connection with smb3.11
+ *                     command dispatcher
+ * @conn:      connection instance
+ */
+int init_smb3_11_server(struct ksmbd_conn *conn)
+{
+       conn->vals = &smb311_server_values;
+       conn->ops = &smb3_11_server_ops;
+       conn->cmds = smb2_0_server_cmds;
+       conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+       conn->max_credits = SMB2_MAX_CREDITS;
+       conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+
+       if (conn->cipher_type)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+
+       INIT_LIST_HEAD(&conn->preauth_sess_table);
+       return 0;
+}
+
+void init_smb2_max_read_size(unsigned int sz)
+{
+       smb21_server_values.max_read_size = sz;
+       smb30_server_values.max_read_size = sz;
+       smb302_server_values.max_read_size = sz;
+       smb311_server_values.max_read_size = sz;
+}
+
+void init_smb2_max_write_size(unsigned int sz)
+{
+       smb21_server_values.max_write_size = sz;
+       smb30_server_values.max_write_size = sz;
+       smb302_server_values.max_write_size = sz;
+       smb311_server_values.max_write_size = sz;
+}
+
+void init_smb2_max_trans_size(unsigned int sz)
+{
+       smb21_server_values.max_trans_size = sz;
+       smb30_server_values.max_trans_size = sz;
+       smb302_server_values.max_trans_size = sz;
+       smb311_server_values.max_trans_size = sz;
+}
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
new file mode 100644 (file)
index 0000000..d329ea4
--- /dev/null
@@ -0,0 +1,8373 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/syscalls.h>
+#include <linux/namei.h>
+#include <linux/statfs.h>
+#include <linux/ethtool.h>
+#include <linux/falloc.h>
+
+#include "glob.h"
+#include "smb2pdu.h"
+#include "smbfsctl.h"
+#include "oplock.h"
+#include "smbacl.h"
+
+#include "auth.h"
+#include "asn1.h"
+#include "connection.h"
+#include "transport_ipc.h"
+#include "transport_rdma.h"
+#include "vfs.h"
+#include "vfs_cache.h"
+#include "misc.h"
+
+#include "server.h"
+#include "smb_common.h"
+#include "smbstatus.h"
+#include "ksmbd_work.h"
+#include "mgmt/user_config.h"
+#include "mgmt/share_config.h"
+#include "mgmt/tree_connect.h"
+#include "mgmt/user_session.h"
+#include "mgmt/ksmbd_ida.h"
+#include "ndr.h"
+
+static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
+{
+       if (work->next_smb2_rcv_hdr_off) {
+               *req = ksmbd_req_buf_next(work);
+               *rsp = ksmbd_resp_buf_next(work);
+       } else {
+               *req = work->request_buf;
+               *rsp = work->response_buf;
+       }
+}
+
+#define WORK_BUFFERS(w, rq, rs)        __wbuf((w), (void **)&(rq), (void **)&(rs))
+
+/**
+ * check_session_id() - check for valid session id in smb header
+ * @conn:      connection instance
+ * @id:                session id from smb header
+ *
+ * Return:      1 if valid session id, otherwise 0
+ */
+static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+{
+       struct ksmbd_session *sess;
+
+       if (id == 0 || id == -1)
+               return false;
+
+       sess = ksmbd_session_lookup_all(conn, id);
+       if (sess)
+               return true;
+       pr_err("Invalid user session id: %llu\n", id);
+       return false;
+}
+
+struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
+{
+       struct channel *chann;
+
+       list_for_each_entry(chann, &sess->ksmbd_chann_list, chann_list) {
+               if (chann->conn == conn)
+                       return chann;
+       }
+
+       return NULL;
+}
+
+/**
+ * smb2_get_ksmbd_tcon() - get tree connection information using a tree id.
+ * @work:      smb work
+ *
+ * Return:     0 if there is a tree connection matched or these are
+ *             skipable commands, otherwise error
+ */
+int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
+{
+       struct smb2_hdr *req_hdr = work->request_buf;
+       int tree_id;
+
+       work->tcon = NULL;
+       if (work->conn->ops->get_cmd_val(work) == SMB2_TREE_CONNECT_HE ||
+           work->conn->ops->get_cmd_val(work) ==  SMB2_CANCEL_HE ||
+           work->conn->ops->get_cmd_val(work) ==  SMB2_LOGOFF_HE) {
+               ksmbd_debug(SMB, "skip to check tree connect request\n");
+               return 0;
+       }
+
+       if (xa_empty(&work->sess->tree_conns)) {
+               ksmbd_debug(SMB, "NO tree connected\n");
+               return -ENOENT;
+       }
+
+       tree_id = le32_to_cpu(req_hdr->Id.SyncId.TreeId);
+       work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id);
+       if (!work->tcon) {
+               pr_err("Invalid tid %d\n", tree_id);
+               return -EINVAL;
+       }
+
+       return 1;
+}
+
+/**
+ * smb2_set_err_rsp() - set error response code on smb response
+ * @work:      smb work containing response buffer
+ */
+void smb2_set_err_rsp(struct ksmbd_work *work)
+{
+       struct smb2_err_rsp *err_rsp;
+
+       if (work->next_smb2_rcv_hdr_off)
+               err_rsp = ksmbd_resp_buf_next(work);
+       else
+               err_rsp = work->response_buf;
+
+       if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) {
+               err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE;
+               err_rsp->ErrorContextCount = 0;
+               err_rsp->Reserved = 0;
+               err_rsp->ByteCount = 0;
+               err_rsp->ErrorData[0] = 0;
+               inc_rfc1001_len(work->response_buf, SMB2_ERROR_STRUCTURE_SIZE2);
+       }
+}
+
+/**
+ * is_smb2_neg_cmd() - is it smb2 negotiation command
+ * @work:      smb work containing smb header
+ *
+ * Return:      true if smb2 negotiation command, otherwise false
+ */
+bool is_smb2_neg_cmd(struct ksmbd_work *work)
+{
+       struct smb2_hdr *hdr = work->request_buf;
+
+       /* is it SMB2 header ? */
+       if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
+               return false;
+
+       /* make sure it is request not response message */
+       if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+               return false;
+
+       if (hdr->Command != SMB2_NEGOTIATE)
+               return false;
+
+       return true;
+}
+
+/**
+ * is_smb2_rsp() - is it smb2 response
+ * @work:      smb work containing smb response buffer
+ *
+ * Return:      true if smb2 response, otherwise false
+ */
+bool is_smb2_rsp(struct ksmbd_work *work)
+{
+       struct smb2_hdr *hdr = work->response_buf;
+
+       /* is it SMB2 header ? */
+       if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
+               return false;
+
+       /* make sure it is response not request message */
+       if (!(hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR))
+               return false;
+
+       return true;
+}
+
+/**
+ * get_smb2_cmd_val() - get smb command code from smb header
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      smb2 request command value
+ */
+u16 get_smb2_cmd_val(struct ksmbd_work *work)
+{
+       struct smb2_hdr *rcv_hdr;
+
+       if (work->next_smb2_rcv_hdr_off)
+               rcv_hdr = ksmbd_req_buf_next(work);
+       else
+               rcv_hdr = work->request_buf;
+       return le16_to_cpu(rcv_hdr->Command);
+}
+
+/**
+ * set_smb2_rsp_status() - set error response code on smb2 header
+ * @work:      smb work containing response buffer
+ * @err:       error response code
+ */
+void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
+{
+       struct smb2_hdr *rsp_hdr;
+
+       if (work->next_smb2_rcv_hdr_off)
+               rsp_hdr = ksmbd_resp_buf_next(work);
+       else
+               rsp_hdr = work->response_buf;
+       rsp_hdr->Status = err;
+       smb2_set_err_rsp(work);
+}
+
+/**
+ * init_smb2_neg_rsp() - initialize smb2 response for negotiate command
+ * @work:      smb work containing smb request buffer
+ *
+ * smb2 negotiate response is sent in reply of smb1 negotiate command for
+ * dialect auto-negotiation.
+ */
+int init_smb2_neg_rsp(struct ksmbd_work *work)
+{
+       struct smb2_hdr *rsp_hdr;
+       struct smb2_negotiate_rsp *rsp;
+       struct ksmbd_conn *conn = work->conn;
+
+       if (conn->need_neg == false)
+               return -EINVAL;
+       if (!(conn->dialect >= SMB20_PROT_ID &&
+             conn->dialect <= SMB311_PROT_ID))
+               return -EINVAL;
+
+       rsp_hdr = work->response_buf;
+
+       memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+
+       rsp_hdr->smb2_buf_length =
+               cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+
+       rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+       rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+       rsp_hdr->CreditRequest = cpu_to_le16(2);
+       rsp_hdr->Command = SMB2_NEGOTIATE;
+       rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+       rsp_hdr->NextCommand = 0;
+       rsp_hdr->MessageId = 0;
+       rsp_hdr->Id.SyncId.ProcessId = 0;
+       rsp_hdr->Id.SyncId.TreeId = 0;
+       rsp_hdr->SessionId = 0;
+       memset(rsp_hdr->Signature, 0, 16);
+
+       rsp = work->response_buf;
+
+       WARN_ON(ksmbd_conn_good(work));
+
+       rsp->StructureSize = cpu_to_le16(65);
+       ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+       rsp->DialectRevision = cpu_to_le16(conn->dialect);
+       /* Not setting conn guid rsp->ServerGUID, as it
+        * not used by client for identifying connection
+        */
+       rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
+       /* Default Max Message Size till SMB2.0, 64K*/
+       rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size);
+       rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
+       rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
+
+       rsp->SystemTime = cpu_to_le64(ksmbd_systime());
+       rsp->ServerStartTime = 0;
+
+       rsp->SecurityBufferOffset = cpu_to_le16(128);
+       rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
+       ksmbd_copy_gss_neg_header(((char *)(&rsp->hdr) +
+               sizeof(rsp->hdr.smb2_buf_length)) +
+               le16_to_cpu(rsp->SecurityBufferOffset));
+       inc_rfc1001_len(rsp, sizeof(struct smb2_negotiate_rsp) -
+               sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+               AUTH_GSS_LENGTH);
+       rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
+       if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
+               rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+       conn->use_spnego = true;
+
+       ksmbd_conn_set_need_negotiate(work);
+       return 0;
+}
+
+static int smb2_consume_credit_charge(struct ksmbd_work *work,
+                                     unsigned short credit_charge)
+{
+       struct ksmbd_conn *conn = work->conn;
+       unsigned int rsp_credits = 1;
+
+       if (!conn->total_credits)
+               return 0;
+
+       if (credit_charge > 0)
+               rsp_credits = credit_charge;
+
+       conn->total_credits -= rsp_credits;
+       return rsp_credits;
+}
+
+/**
+ * smb2_set_rsp_credits() - set number of credits in response buffer
+ * @work:      smb work containing smb response buffer
+ */
+int smb2_set_rsp_credits(struct ksmbd_work *work)
+{
+       struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
+       struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
+       struct ksmbd_conn *conn = work->conn;
+       unsigned short credits_requested = le16_to_cpu(req_hdr->CreditRequest);
+       unsigned short credit_charge = 1, credits_granted = 0;
+       unsigned short aux_max, aux_credits, min_credits;
+       int rsp_credit_charge;
+
+       if (hdr->Command == SMB2_CANCEL)
+               goto out;
+
+       /* get default minimum credits by shifting maximum credits by 4 */
+       min_credits = conn->max_credits >> 4;
+
+       if (conn->total_credits >= conn->max_credits) {
+               pr_err("Total credits overflow: %d\n", conn->total_credits);
+               conn->total_credits = min_credits;
+       }
+
+       rsp_credit_charge =
+               smb2_consume_credit_charge(work, le16_to_cpu(req_hdr->CreditCharge));
+       if (rsp_credit_charge < 0)
+               return -EINVAL;
+
+       hdr->CreditCharge = cpu_to_le16(rsp_credit_charge);
+
+       if (credits_requested > 0) {
+               aux_credits = credits_requested - 1;
+               aux_max = 32;
+               if (hdr->Command == SMB2_NEGOTIATE)
+                       aux_max = 0;
+               aux_credits = (aux_credits < aux_max) ? aux_credits : aux_max;
+               credits_granted = aux_credits + credit_charge;
+
+               /* if credits granted per client is getting bigger than default
+                * minimum credits then we should wrap it up within the limits.
+                */
+               if ((conn->total_credits + credits_granted) > min_credits)
+                       credits_granted = min_credits - conn->total_credits;
+               /*
+                * TODO: Need to adjuct CreditRequest value according to
+                * current cpu load
+                */
+       } else if (conn->total_credits == 0) {
+               credits_granted = 1;
+       }
+
+       conn->total_credits += credits_granted;
+       work->credits_granted += credits_granted;
+
+       if (!req_hdr->NextCommand) {
+               /* Update CreditRequest in last request */
+               hdr->CreditRequest = cpu_to_le16(work->credits_granted);
+       }
+out:
+       ksmbd_debug(SMB,
+                   "credits: requested[%d] granted[%d] total_granted[%d]\n",
+                   credits_requested, credits_granted,
+                   conn->total_credits);
+       return 0;
+}
+
+/**
+ * init_chained_smb2_rsp() - initialize smb2 chained response
+ * @work:      smb work containing smb response buffer
+ */
+static void init_chained_smb2_rsp(struct ksmbd_work *work)
+{
+       struct smb2_hdr *req = ksmbd_req_buf_next(work);
+       struct smb2_hdr *rsp = ksmbd_resp_buf_next(work);
+       struct smb2_hdr *rsp_hdr;
+       struct smb2_hdr *rcv_hdr;
+       int next_hdr_offset = 0;
+       int len, new_len;
+
+       /* Len of this response = updated RFC len - offset of previous cmd
+        * in the compound rsp
+        */
+
+       /* Storing the current local FID which may be needed by subsequent
+        * command in the compound request
+        */
+       if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) {
+               work->compound_fid =
+                       le64_to_cpu(((struct smb2_create_rsp *)rsp)->
+                               VolatileFileId);
+               work->compound_pfid =
+                       le64_to_cpu(((struct smb2_create_rsp *)rsp)->
+                               PersistentFileId);
+               work->compound_sid = le64_to_cpu(rsp->SessionId);
+       }
+
+       len = get_rfc1002_len(work->response_buf) - work->next_smb2_rsp_hdr_off;
+       next_hdr_offset = le32_to_cpu(req->NextCommand);
+
+       new_len = ALIGN(len, 8);
+       inc_rfc1001_len(work->response_buf, ((sizeof(struct smb2_hdr) - 4)
+                       + new_len - len));
+       rsp->NextCommand = cpu_to_le32(new_len);
+
+       work->next_smb2_rcv_hdr_off += next_hdr_offset;
+       work->next_smb2_rsp_hdr_off += new_len;
+       ksmbd_debug(SMB,
+                   "Compound req new_len = %d rcv off = %d rsp off = %d\n",
+                   new_len, work->next_smb2_rcv_hdr_off,
+                   work->next_smb2_rsp_hdr_off);
+
+       rsp_hdr = ksmbd_resp_buf_next(work);
+       rcv_hdr = ksmbd_req_buf_next(work);
+
+       if (!(rcv_hdr->Flags & SMB2_FLAGS_RELATED_OPERATIONS)) {
+               ksmbd_debug(SMB, "related flag should be set\n");
+               work->compound_fid = KSMBD_NO_FID;
+               work->compound_pfid = KSMBD_NO_FID;
+       }
+       memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);
+       rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+       rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+       rsp_hdr->Command = rcv_hdr->Command;
+
+       /*
+        * Message is response. We don't grant oplock yet.
+        */
+       rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR |
+                               SMB2_FLAGS_RELATED_OPERATIONS);
+       rsp_hdr->NextCommand = 0;
+       rsp_hdr->MessageId = rcv_hdr->MessageId;
+       rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId;
+       rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId;
+       rsp_hdr->SessionId = rcv_hdr->SessionId;
+       memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
+}
+
+/**
+ * is_chained_smb2_message() - check for chained command
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      true if chained request, otherwise false
+ */
+bool is_chained_smb2_message(struct ksmbd_work *work)
+{
+       struct smb2_hdr *hdr = work->request_buf;
+       unsigned int len;
+
+       if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
+               return false;
+
+       hdr = ksmbd_req_buf_next(work);
+       if (le32_to_cpu(hdr->NextCommand) > 0) {
+               ksmbd_debug(SMB, "got SMB2 chained command\n");
+               init_chained_smb2_rsp(work);
+               return true;
+       } else if (work->next_smb2_rcv_hdr_off) {
+               /*
+                * This is last request in chained command,
+                * align response to 8 byte
+                */
+               len = ALIGN(get_rfc1002_len(work->response_buf), 8);
+               len = len - get_rfc1002_len(work->response_buf);
+               if (len) {
+                       ksmbd_debug(SMB, "padding len %u\n", len);
+                       inc_rfc1001_len(work->response_buf, len);
+                       if (work->aux_payload_sz)
+                               work->aux_payload_sz += len;
+               }
+       }
+       return false;
+}
+
+/**
+ * init_smb2_rsp_hdr() - initialize smb2 response
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      0
+ */
+int init_smb2_rsp_hdr(struct ksmbd_work *work)
+{
+       struct smb2_hdr *rsp_hdr = work->response_buf;
+       struct smb2_hdr *rcv_hdr = work->request_buf;
+       struct ksmbd_conn *conn = work->conn;
+
+       memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+       rsp_hdr->smb2_buf_length =
+               cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+       rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+       rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+       rsp_hdr->Command = rcv_hdr->Command;
+
+       /*
+        * Message is response. We don't grant oplock yet.
+        */
+       rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+       rsp_hdr->NextCommand = 0;
+       rsp_hdr->MessageId = rcv_hdr->MessageId;
+       rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId;
+       rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId;
+       rsp_hdr->SessionId = rcv_hdr->SessionId;
+       memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
+
+       work->syncronous = true;
+       if (work->async_id) {
+               ksmbd_release_id(&conn->async_ida, work->async_id);
+               work->async_id = 0;
+       }
+
+       return 0;
+}
+
+/**
+ * smb2_allocate_rsp_buf() - allocate smb2 response buffer
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      0 on success, otherwise -ENOMEM
+ */
+int smb2_allocate_rsp_buf(struct ksmbd_work *work)
+{
+       struct smb2_hdr *hdr = work->request_buf;
+       size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+       size_t large_sz = work->conn->vals->max_trans_size + MAX_SMB2_HDR_SIZE;
+       size_t sz = small_sz;
+       int cmd = le16_to_cpu(hdr->Command);
+
+       if (cmd == SMB2_IOCTL_HE || cmd == SMB2_QUERY_DIRECTORY_HE)
+               sz = large_sz;
+
+       if (cmd == SMB2_QUERY_INFO_HE) {
+               struct smb2_query_info_req *req;
+
+               req = work->request_buf;
+               if (req->InfoType == SMB2_O_INFO_FILE &&
+                   (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
+                    req->FileInfoClass == FILE_ALL_INFORMATION))
+                       sz = large_sz;
+       }
+
+       /* allocate large response buf for chained commands */
+       if (le32_to_cpu(hdr->NextCommand) > 0)
+               sz = large_sz;
+
+       work->response_buf = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+       if (!work->response_buf)
+               return -ENOMEM;
+
+       work->response_sz = sz;
+       return 0;
+}
+
+/**
+ * smb2_check_user_session() - check for valid session for a user
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb2_check_user_session(struct ksmbd_work *work)
+{
+       struct smb2_hdr *req_hdr = work->request_buf;
+       struct ksmbd_conn *conn = work->conn;
+       unsigned int cmd = conn->ops->get_cmd_val(work);
+       unsigned long long sess_id;
+
+       work->sess = NULL;
+       /*
+        * SMB2_ECHO, SMB2_NEGOTIATE, SMB2_SESSION_SETUP command do not
+        * require a session id, so no need to validate user session's for
+        * these commands.
+        */
+       if (cmd == SMB2_ECHO_HE || cmd == SMB2_NEGOTIATE_HE ||
+           cmd == SMB2_SESSION_SETUP_HE)
+               return 0;
+
+       if (!ksmbd_conn_good(work))
+               return -EINVAL;
+
+       sess_id = le64_to_cpu(req_hdr->SessionId);
+       /* Check for validity of user session */
+       work->sess = ksmbd_session_lookup_all(conn, sess_id);
+       if (work->sess)
+               return 1;
+       ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
+       return -EINVAL;
+}
+
+static void destroy_previous_session(struct ksmbd_user *user, u64 id)
+{
+       struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
+       struct ksmbd_user *prev_user;
+
+       if (!prev_sess)
+               return;
+
+       prev_user = prev_sess->user;
+
+       if (!prev_user ||
+           strcmp(user->name, prev_user->name) ||
+           user->passkey_sz != prev_user->passkey_sz ||
+           memcmp(user->passkey, prev_user->passkey, user->passkey_sz)) {
+               put_session(prev_sess);
+               return;
+       }
+
+       put_session(prev_sess);
+       ksmbd_session_destroy(prev_sess);
+}
+
+/**
+ * smb2_get_name() - get filename string from on the wire smb format
+ * @share:     ksmbd_share_config pointer
+ * @src:       source buffer
+ * @maxlen:    maxlen of source string
+ * @nls_table: nls_table pointer
+ *
+ * Return:      matching converted filename on success, otherwise error ptr
+ */
+static char *
+smb2_get_name(struct ksmbd_share_config *share, const char *src,
+             const int maxlen, struct nls_table *local_nls)
+{
+       char *name, *unixname;
+
+       name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
+       if (IS_ERR(name)) {
+               pr_err("failed to get name %ld\n", PTR_ERR(name));
+               return name;
+       }
+
+       /* change it to absolute unix name */
+       ksmbd_conv_path_to_unix(name);
+       ksmbd_strip_last_slash(name);
+
+       unixname = convert_to_unix_name(share, name);
+       kfree(name);
+       if (!unixname) {
+               pr_err("can not convert absolute name\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       ksmbd_debug(SMB, "absolute name = %s\n", unixname);
+       return unixname;
+}
+
+int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
+{
+       struct smb2_hdr *rsp_hdr;
+       struct ksmbd_conn *conn = work->conn;
+       int id;
+
+       rsp_hdr = work->response_buf;
+       rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
+
+       id = ksmbd_acquire_async_msg_id(&conn->async_ida);
+       if (id < 0) {
+               pr_err("Failed to alloc async message id\n");
+               return id;
+       }
+       work->syncronous = false;
+       work->async_id = id;
+       rsp_hdr->Id.AsyncId = cpu_to_le64(id);
+
+       ksmbd_debug(SMB,
+                   "Send interim Response to inform async request id : %d\n",
+                   work->async_id);
+
+       work->cancel_fn = fn;
+       work->cancel_argv = arg;
+
+       if (list_empty(&work->async_request_entry)) {
+               spin_lock(&conn->request_lock);
+               list_add_tail(&work->async_request_entry, &conn->async_requests);
+               spin_unlock(&conn->request_lock);
+       }
+
+       return 0;
+}
+
+void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
+{
+       struct smb2_hdr *rsp_hdr;
+
+       rsp_hdr = work->response_buf;
+       smb2_set_err_rsp(work);
+       rsp_hdr->Status = status;
+
+       work->multiRsp = 1;
+       ksmbd_conn_write(work);
+       rsp_hdr->Status = 0;
+       work->multiRsp = 0;
+}
+
+static __le32 smb2_get_reparse_tag_special_file(umode_t mode)
+{
+       if (S_ISDIR(mode) || S_ISREG(mode))
+               return 0;
+
+       if (S_ISLNK(mode))
+               return IO_REPARSE_TAG_LX_SYMLINK_LE;
+       else if (S_ISFIFO(mode))
+               return IO_REPARSE_TAG_LX_FIFO_LE;
+       else if (S_ISSOCK(mode))
+               return IO_REPARSE_TAG_AF_UNIX_LE;
+       else if (S_ISCHR(mode))
+               return IO_REPARSE_TAG_LX_CHR_LE;
+       else if (S_ISBLK(mode))
+               return IO_REPARSE_TAG_LX_BLK_LE;
+
+       return 0;
+}
+
+/**
+ * smb2_get_dos_mode() - get file mode in dos format from unix mode
+ * @stat:      kstat containing file mode
+ * @attribute: attribute flags
+ *
+ * Return:      converted dos mode
+ */
+static int smb2_get_dos_mode(struct kstat *stat, int attribute)
+{
+       int attr = 0;
+
+       if (S_ISDIR(stat->mode)) {
+               attr = ATTR_DIRECTORY |
+                       (attribute & (ATTR_HIDDEN | ATTR_SYSTEM));
+       } else {
+               attr = (attribute & 0x00005137) | ATTR_ARCHIVE;
+               attr &= ~(ATTR_DIRECTORY);
+               if (S_ISREG(stat->mode) && (server_conf.share_fake_fscaps &
+                               FILE_SUPPORTS_SPARSE_FILES))
+                       attr |= ATTR_SPARSE;
+
+               if (smb2_get_reparse_tag_special_file(stat->mode))
+                       attr |= ATTR_REPARSE;
+       }
+
+       return attr;
+}
+
+static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt,
+                              __le16 hash_id)
+{
+       pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
+       pneg_ctxt->DataLength = cpu_to_le16(38);
+       pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
+       pneg_ctxt->Reserved = cpu_to_le32(0);
+       pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
+       get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
+       pneg_ctxt->HashAlgorithms = hash_id;
+}
+
+static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt,
+                              __le16 cipher_type)
+{
+       pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
+       pneg_ctxt->DataLength = cpu_to_le16(4);
+       pneg_ctxt->Reserved = cpu_to_le32(0);
+       pneg_ctxt->CipherCount = cpu_to_le16(1);
+       pneg_ctxt->Ciphers[0] = cipher_type;
+}
+
+static void build_compression_ctxt(struct smb2_compression_ctx *pneg_ctxt,
+                                  __le16 comp_algo)
+{
+       pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
+       pneg_ctxt->DataLength =
+               cpu_to_le16(sizeof(struct smb2_compression_ctx)
+                       - sizeof(struct smb2_neg_context));
+       pneg_ctxt->Reserved = cpu_to_le32(0);
+       pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(1);
+       pneg_ctxt->Reserved1 = cpu_to_le32(0);
+       pneg_ctxt->CompressionAlgorithms[0] = comp_algo;
+}
+
+static void build_sign_cap_ctxt(struct smb2_signing_capabilities *pneg_ctxt,
+                               __le16 sign_algo)
+{
+       pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
+       pneg_ctxt->DataLength =
+               cpu_to_le16((sizeof(struct smb2_signing_capabilities) + 2)
+                       - sizeof(struct smb2_neg_context));
+       pneg_ctxt->Reserved = cpu_to_le32(0);
+       pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(1);
+       pneg_ctxt->SigningAlgorithms[0] = sign_algo;
+}
+
+static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
+{
+       pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
+       pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
+       /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
+       pneg_ctxt->Name[0] = 0x93;
+       pneg_ctxt->Name[1] = 0xAD;
+       pneg_ctxt->Name[2] = 0x25;
+       pneg_ctxt->Name[3] = 0x50;
+       pneg_ctxt->Name[4] = 0x9C;
+       pneg_ctxt->Name[5] = 0xB4;
+       pneg_ctxt->Name[6] = 0x11;
+       pneg_ctxt->Name[7] = 0xE7;
+       pneg_ctxt->Name[8] = 0xB4;
+       pneg_ctxt->Name[9] = 0x23;
+       pneg_ctxt->Name[10] = 0x83;
+       pneg_ctxt->Name[11] = 0xDE;
+       pneg_ctxt->Name[12] = 0x96;
+       pneg_ctxt->Name[13] = 0x8B;
+       pneg_ctxt->Name[14] = 0xCD;
+       pneg_ctxt->Name[15] = 0x7C;
+}
+
+static void assemble_neg_contexts(struct ksmbd_conn *conn,
+                                 struct smb2_negotiate_rsp *rsp)
+{
+       /* +4 is to account for the RFC1001 len field */
+       char *pneg_ctxt = (char *)rsp +
+                       le32_to_cpu(rsp->NegotiateContextOffset) + 4;
+       int neg_ctxt_cnt = 1;
+       int ctxt_size;
+
+       ksmbd_debug(SMB,
+                   "assemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
+       build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt,
+                          conn->preauth_info->Preauth_HashId);
+       rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
+       inc_rfc1001_len(rsp, AUTH_GSS_PADDING);
+       ctxt_size = sizeof(struct smb2_preauth_neg_context);
+       /* Round to 8 byte boundary */
+       pneg_ctxt += round_up(sizeof(struct smb2_preauth_neg_context), 8);
+
+       if (conn->cipher_type) {
+               ctxt_size = round_up(ctxt_size, 8);
+               ksmbd_debug(SMB,
+                           "assemble SMB2_ENCRYPTION_CAPABILITIES context\n");
+               build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt,
+                                  conn->cipher_type);
+               rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+               ctxt_size += sizeof(struct smb2_encryption_neg_context) + 2;
+               /* Round to 8 byte boundary */
+               pneg_ctxt +=
+                       round_up(sizeof(struct smb2_encryption_neg_context) + 2,
+                                8);
+       }
+
+       if (conn->compress_algorithm) {
+               ctxt_size = round_up(ctxt_size, 8);
+               ksmbd_debug(SMB,
+                           "assemble SMB2_COMPRESSION_CAPABILITIES context\n");
+               /* Temporarily set to SMB3_COMPRESS_NONE */
+               build_compression_ctxt((struct smb2_compression_ctx *)pneg_ctxt,
+                                      conn->compress_algorithm);
+               rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+               ctxt_size += sizeof(struct smb2_compression_ctx) + 2;
+               /* Round to 8 byte boundary */
+               pneg_ctxt += round_up(sizeof(struct smb2_compression_ctx) + 2,
+                                     8);
+       }
+
+       if (conn->posix_ext_supported) {
+               ctxt_size = round_up(ctxt_size, 8);
+               ksmbd_debug(SMB,
+                           "assemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n");
+               build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
+               rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+               ctxt_size += sizeof(struct smb2_posix_neg_context);
+               /* Round to 8 byte boundary */
+               pneg_ctxt += round_up(sizeof(struct smb2_posix_neg_context), 8);
+       }
+
+       if (conn->signing_negotiated) {
+               ctxt_size = round_up(ctxt_size, 8);
+               ksmbd_debug(SMB,
+                           "assemble SMB2_SIGNING_CAPABILITIES context\n");
+               build_sign_cap_ctxt((struct smb2_signing_capabilities *)pneg_ctxt,
+                                   conn->signing_algorithm);
+               rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+               ctxt_size += sizeof(struct smb2_signing_capabilities) + 2;
+       }
+
+       inc_rfc1001_len(rsp, ctxt_size);
+}
+
+static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
+                                 struct smb2_preauth_neg_context *pneg_ctxt)
+{
+       __le32 err = STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
+
+       if (pneg_ctxt->HashAlgorithms == SMB2_PREAUTH_INTEGRITY_SHA512) {
+               conn->preauth_info->Preauth_HashId =
+                       SMB2_PREAUTH_INTEGRITY_SHA512;
+               err = STATUS_SUCCESS;
+       }
+
+       return err;
+}
+
+static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
+                               struct smb2_encryption_neg_context *pneg_ctxt,
+                               int len_of_ctxts)
+{
+       int cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
+       int i, cphs_size = cph_cnt * sizeof(__le16);
+
+       conn->cipher_type = 0;
+
+       if (sizeof(struct smb2_encryption_neg_context) + cphs_size >
+           len_of_ctxts) {
+               pr_err("Invalid cipher count(%d)\n", cph_cnt);
+               return;
+       }
+
+       if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION))
+               return;
+
+       for (i = 0; i < cph_cnt; i++) {
+               if (pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_GCM ||
+                   pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_CCM ||
+                   pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_CCM ||
+                   pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_GCM) {
+                       ksmbd_debug(SMB, "Cipher ID = 0x%x\n",
+                                   pneg_ctxt->Ciphers[i]);
+                       conn->cipher_type = pneg_ctxt->Ciphers[i];
+                       break;
+               }
+       }
+}
+
+static void decode_compress_ctxt(struct ksmbd_conn *conn,
+                                struct smb2_compression_ctx *pneg_ctxt)
+{
+       conn->compress_algorithm = SMB3_COMPRESS_NONE;
+}
+
+static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
+                                struct smb2_signing_capabilities *pneg_ctxt,
+                                int len_of_ctxts)
+{
+       int sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
+       int i, sign_alos_size = sign_algo_cnt * sizeof(__le16);
+
+       conn->signing_negotiated = false;
+
+       if (sizeof(struct smb2_signing_capabilities) + sign_alos_size >
+           len_of_ctxts) {
+               pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt);
+               return;
+       }
+
+       for (i = 0; i < sign_algo_cnt; i++) {
+               if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256 ||
+                   pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC) {
+                       ksmbd_debug(SMB, "Signing Algorithm ID = 0x%x\n",
+                                   pneg_ctxt->SigningAlgorithms[i]);
+                       conn->signing_negotiated = true;
+                       conn->signing_algorithm =
+                               pneg_ctxt->SigningAlgorithms[i];
+                       break;
+               }
+       }
+}
+
+static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+                                     struct smb2_negotiate_req *req)
+{
+       /* +4 is to account for the RFC1001 len field */
+       struct smb2_neg_context *pctx = (struct smb2_neg_context *)((char *)req + 4);
+       int i = 0, len_of_ctxts;
+       int offset = le32_to_cpu(req->NegotiateContextOffset);
+       int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
+       int len_of_smb = be32_to_cpu(req->hdr.smb2_buf_length);
+       __le32 status = STATUS_INVALID_PARAMETER;
+
+       ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt);
+       if (len_of_smb <= offset) {
+               ksmbd_debug(SMB, "Invalid response: negotiate context offset\n");
+               return status;
+       }
+
+       len_of_ctxts = len_of_smb - offset;
+
+       while (i++ < neg_ctxt_cnt) {
+               int clen;
+
+               /* check that offset is not beyond end of SMB */
+               if (len_of_ctxts == 0)
+                       break;
+
+               if (len_of_ctxts < sizeof(struct smb2_neg_context))
+                       break;
+
+               pctx = (struct smb2_neg_context *)((char *)pctx + offset);
+               clen = le16_to_cpu(pctx->DataLength);
+               if (clen + sizeof(struct smb2_neg_context) > len_of_ctxts)
+                       break;
+
+               if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) {
+                       ksmbd_debug(SMB,
+                                   "deassemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
+                       if (conn->preauth_info->Preauth_HashId)
+                               break;
+
+                       status = decode_preauth_ctxt(conn,
+                                                    (struct smb2_preauth_neg_context *)pctx);
+                       if (status != STATUS_SUCCESS)
+                               break;
+               } else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
+                       ksmbd_debug(SMB,
+                                   "deassemble SMB2_ENCRYPTION_CAPABILITIES context\n");
+                       if (conn->cipher_type)
+                               break;
+
+                       decode_encrypt_ctxt(conn,
+                                           (struct smb2_encryption_neg_context *)pctx,
+                                           len_of_ctxts);
+               } else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) {
+                       ksmbd_debug(SMB,
+                                   "deassemble SMB2_COMPRESSION_CAPABILITIES context\n");
+                       if (conn->compress_algorithm)
+                               break;
+
+                       decode_compress_ctxt(conn,
+                                            (struct smb2_compression_ctx *)pctx);
+               } else if (pctx->ContextType == SMB2_NETNAME_NEGOTIATE_CONTEXT_ID) {
+                       ksmbd_debug(SMB,
+                                   "deassemble SMB2_NETNAME_NEGOTIATE_CONTEXT_ID context\n");
+               } else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) {
+                       ksmbd_debug(SMB,
+                                   "deassemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n");
+                       conn->posix_ext_supported = true;
+               } else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) {
+                       ksmbd_debug(SMB,
+                                   "deassemble SMB2_SIGNING_CAPABILITIES context\n");
+                       decode_sign_cap_ctxt(conn,
+                                            (struct smb2_signing_capabilities *)pctx,
+                                            len_of_ctxts);
+               }
+
+               /* offsets must be 8 byte aligned */
+               clen = (clen + 7) & ~0x7;
+               offset = clen + sizeof(struct smb2_neg_context);
+               len_of_ctxts -= clen + sizeof(struct smb2_neg_context);
+       }
+       return status;
+}
+
+/**
+ * smb2_handle_negotiate() - handler for smb2 negotiate command
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      0
+ */
+int smb2_handle_negotiate(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_negotiate_req *req = work->request_buf;
+       struct smb2_negotiate_rsp *rsp = work->response_buf;
+       int rc = 0;
+       __le32 status;
+
+       ksmbd_debug(SMB, "Received negotiate request\n");
+       conn->need_neg = false;
+       if (ksmbd_conn_good(work)) {
+               pr_err("conn->tcp_status is already in CifsGood State\n");
+               work->send_no_response = 1;
+               return rc;
+       }
+
+       if (req->DialectCount == 0) {
+               pr_err("malformed packet\n");
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               rc = -EINVAL;
+               goto err_out;
+       }
+
+       conn->cli_cap = le32_to_cpu(req->Capabilities);
+       switch (conn->dialect) {
+       case SMB311_PROT_ID:
+               conn->preauth_info =
+                       kzalloc(sizeof(struct preauth_integrity_info),
+                               GFP_KERNEL);
+               if (!conn->preauth_info) {
+                       rc = -ENOMEM;
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       goto err_out;
+               }
+
+               status = deassemble_neg_contexts(conn, req);
+               if (status != STATUS_SUCCESS) {
+                       pr_err("deassemble_neg_contexts error(0x%x)\n",
+                              status);
+                       rsp->hdr.Status = status;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+
+               rc = init_smb3_11_server(conn);
+               if (rc < 0) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       goto err_out;
+               }
+
+               ksmbd_gen_preauth_integrity_hash(conn,
+                                                work->request_buf,
+                                                conn->preauth_info->Preauth_HashValue);
+               rsp->NegotiateContextOffset =
+                               cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
+               assemble_neg_contexts(conn, rsp);
+               break;
+       case SMB302_PROT_ID:
+               init_smb3_02_server(conn);
+               break;
+       case SMB30_PROT_ID:
+               init_smb3_0_server(conn);
+               break;
+       case SMB21_PROT_ID:
+               init_smb2_1_server(conn);
+               break;
+       case SMB20_PROT_ID:
+               rc = init_smb2_0_server(conn);
+               if (rc) {
+                       rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+                       goto err_out;
+               }
+               break;
+       case SMB2X_PROT_ID:
+       case BAD_PROT_ID:
+       default:
+               ksmbd_debug(SMB, "Server dialect :0x%x not supported\n",
+                           conn->dialect);
+               rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+               rc = -EINVAL;
+               goto err_out;
+       }
+       rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
+
+       /* For stats */
+       conn->connection_type = conn->dialect;
+
+       rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size);
+       rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
+       rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
+
+       if (conn->dialect > SMB20_PROT_ID) {
+               memcpy(conn->ClientGUID, req->ClientGUID,
+                      SMB2_CLIENT_GUID_SIZE);
+               conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
+       }
+
+       rsp->StructureSize = cpu_to_le16(65);
+       rsp->DialectRevision = cpu_to_le16(conn->dialect);
+       /* Not setting conn guid rsp->ServerGUID, as it
+        * not used by client for identifying server
+        */
+       memset(rsp->ServerGUID, 0, SMB2_CLIENT_GUID_SIZE);
+
+       rsp->SystemTime = cpu_to_le64(ksmbd_systime());
+       rsp->ServerStartTime = 0;
+       ksmbd_debug(SMB, "negotiate context offset %d, count %d\n",
+                   le32_to_cpu(rsp->NegotiateContextOffset),
+                   le16_to_cpu(rsp->NegotiateContextCount));
+
+       rsp->SecurityBufferOffset = cpu_to_le16(128);
+       rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
+       ksmbd_copy_gss_neg_header(((char *)(&rsp->hdr) +
+                                 sizeof(rsp->hdr.smb2_buf_length)) +
+                                  le16_to_cpu(rsp->SecurityBufferOffset));
+       inc_rfc1001_len(rsp, sizeof(struct smb2_negotiate_rsp) -
+                       sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+                        AUTH_GSS_LENGTH);
+       rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
+       conn->use_spnego = true;
+
+       if ((server_conf.signing == KSMBD_CONFIG_OPT_AUTO ||
+            server_conf.signing == KSMBD_CONFIG_OPT_DISABLED) &&
+           req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED_LE)
+               conn->sign = true;
+       else if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY) {
+               server_conf.enforced_signing = true;
+               rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+               conn->sign = true;
+       }
+
+       conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+       ksmbd_conn_set_need_negotiate(work);
+
+err_out:
+       if (rc < 0)
+               smb2_set_err_rsp(work);
+
+       return rc;
+}
+
+static int alloc_preauth_hash(struct ksmbd_session *sess,
+                             struct ksmbd_conn *conn)
+{
+       if (sess->Preauth_HashValue)
+               return 0;
+
+       sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
+                                         PREAUTH_HASHVALUE_SIZE, GFP_KERNEL);
+       if (!sess->Preauth_HashValue)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int generate_preauth_hash(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct ksmbd_session *sess = work->sess;
+       u8 *preauth_hash;
+
+       if (conn->dialect != SMB311_PROT_ID)
+               return 0;
+
+       if (conn->binding) {
+               struct preauth_session *preauth_sess;
+
+               preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
+               if (!preauth_sess) {
+                       preauth_sess = ksmbd_preauth_session_alloc(conn, sess->id);
+                       if (!preauth_sess)
+                               return -ENOMEM;
+               }
+
+               preauth_hash = preauth_sess->Preauth_HashValue;
+       } else {
+               if (!sess->Preauth_HashValue)
+                       if (alloc_preauth_hash(sess, conn))
+                               return -ENOMEM;
+               preauth_hash = sess->Preauth_HashValue;
+       }
+
+       ksmbd_gen_preauth_integrity_hash(conn, work->request_buf, preauth_hash);
+       return 0;
+}
+
+static int decode_negotiation_token(struct ksmbd_work *work,
+                                   struct negotiate_message *negblob)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_sess_setup_req *req;
+       int sz;
+
+       if (!conn->use_spnego)
+               return -EINVAL;
+
+       req = work->request_buf;
+       sz = le16_to_cpu(req->SecurityBufferLength);
+
+       if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
+               if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
+                       conn->auth_mechs |= KSMBD_AUTH_NTLMSSP;
+                       conn->preferred_auth_mech = KSMBD_AUTH_NTLMSSP;
+                       conn->use_spnego = false;
+               }
+       }
+       return 0;
+}
+
+static int ntlm_negotiate(struct ksmbd_work *work,
+                         struct negotiate_message *negblob)
+{
+       struct smb2_sess_setup_req *req = work->request_buf;
+       struct smb2_sess_setup_rsp *rsp = work->response_buf;
+       struct challenge_message *chgblob;
+       unsigned char *spnego_blob = NULL;
+       u16 spnego_blob_len;
+       char *neg_blob;
+       int sz, rc;
+
+       ksmbd_debug(SMB, "negotiate phase\n");
+       sz = le16_to_cpu(req->SecurityBufferLength);
+       rc = ksmbd_decode_ntlmssp_neg_blob(negblob, sz, work->sess);
+       if (rc)
+               return rc;
+
+       sz = le16_to_cpu(rsp->SecurityBufferOffset);
+       chgblob =
+               (struct challenge_message *)((char *)&rsp->hdr.ProtocolId + sz);
+       memset(chgblob, 0, sizeof(struct challenge_message));
+
+       if (!work->conn->use_spnego) {
+               sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->sess);
+               if (sz < 0)
+                       return -ENOMEM;
+
+               rsp->SecurityBufferLength = cpu_to_le16(sz);
+               return 0;
+       }
+
+       sz = sizeof(struct challenge_message);
+       sz += (strlen(ksmbd_netbios_name()) * 2 + 1 + 4) * 6;
+
+       neg_blob = kzalloc(sz, GFP_KERNEL);
+       if (!neg_blob)
+               return -ENOMEM;
+
+       chgblob = (struct challenge_message *)neg_blob;
+       sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->sess);
+       if (sz < 0) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       rc = build_spnego_ntlmssp_neg_blob(&spnego_blob, &spnego_blob_len,
+                                          neg_blob, sz);
+       if (rc) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       sz = le16_to_cpu(rsp->SecurityBufferOffset);
+       memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
+       rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
+
+out:
+       kfree(spnego_blob);
+       kfree(neg_blob);
+       return rc;
+}
+
+static struct authenticate_message *user_authblob(struct ksmbd_conn *conn,
+                                                 struct smb2_sess_setup_req *req)
+{
+       int sz;
+
+       if (conn->use_spnego && conn->mechToken)
+               return (struct authenticate_message *)conn->mechToken;
+
+       sz = le16_to_cpu(req->SecurityBufferOffset);
+       return (struct authenticate_message *)((char *)&req->hdr.ProtocolId
+                                              + sz);
+}
+
+static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
+                                      struct smb2_sess_setup_req *req)
+{
+       struct authenticate_message *authblob;
+       struct ksmbd_user *user;
+       char *name;
+       int sz;
+
+       authblob = user_authblob(conn, req);
+       sz = le32_to_cpu(authblob->UserName.BufferOffset);
+       name = smb_strndup_from_utf16((const char *)authblob + sz,
+                                     le16_to_cpu(authblob->UserName.Length),
+                                     true,
+                                     conn->local_nls);
+       if (IS_ERR(name)) {
+               pr_err("cannot allocate memory\n");
+               return NULL;
+       }
+
+       ksmbd_debug(SMB, "session setup request for user %s\n", name);
+       user = ksmbd_login_user(name);
+       kfree(name);
+       return user;
+}
+
+static int ntlm_authenticate(struct ksmbd_work *work)
+{
+       struct smb2_sess_setup_req *req = work->request_buf;
+       struct smb2_sess_setup_rsp *rsp = work->response_buf;
+       struct ksmbd_conn *conn = work->conn;
+       struct ksmbd_session *sess = work->sess;
+       struct channel *chann = NULL;
+       struct ksmbd_user *user;
+       u64 prev_id;
+       int sz, rc;
+
+       ksmbd_debug(SMB, "authenticate phase\n");
+       if (conn->use_spnego) {
+               unsigned char *spnego_blob;
+               u16 spnego_blob_len;
+
+               rc = build_spnego_ntlmssp_auth_blob(&spnego_blob,
+                                                   &spnego_blob_len,
+                                                   0);
+               if (rc)
+                       return -ENOMEM;
+
+               sz = le16_to_cpu(rsp->SecurityBufferOffset);
+               memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
+               rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
+               kfree(spnego_blob);
+               inc_rfc1001_len(rsp, spnego_blob_len - 1);
+       }
+
+       user = session_user(conn, req);
+       if (!user) {
+               ksmbd_debug(SMB, "Unknown user name or an error\n");
+               return -EPERM;
+       }
+
+       /* Check for previous session */
+       prev_id = le64_to_cpu(req->PreviousSessionId);
+       if (prev_id && prev_id != sess->id)
+               destroy_previous_session(user, prev_id);
+
+       if (sess->state == SMB2_SESSION_VALID) {
+               /*
+                * Reuse session if anonymous try to connect
+                * on reauthetication.
+                */
+               if (ksmbd_anonymous_user(user)) {
+                       ksmbd_free_user(user);
+                       return 0;
+               }
+               ksmbd_free_user(sess->user);
+       }
+
+       sess->user = user;
+       if (user_guest(sess->user)) {
+               if (conn->sign) {
+                       ksmbd_debug(SMB, "Guest login not allowed when signing enabled\n");
+                       return -EPERM;
+               }
+
+               rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
+       } else {
+               struct authenticate_message *authblob;
+
+               authblob = user_authblob(conn, req);
+               sz = le16_to_cpu(req->SecurityBufferLength);
+               rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, sess);
+               if (rc) {
+                       set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
+                       ksmbd_debug(SMB, "authentication failed\n");
+                       return -EPERM;
+               }
+
+               /*
+                * If session state is SMB2_SESSION_VALID, We can assume
+                * that it is reauthentication. And the user/password
+                * has been verified, so return it here.
+                */
+               if (sess->state == SMB2_SESSION_VALID) {
+                       if (conn->binding)
+                               goto binding_session;
+                       return 0;
+               }
+
+               if ((conn->sign || server_conf.enforced_signing) ||
+                   (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
+                       sess->sign = true;
+
+               if (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION &&
+                   conn->ops->generate_encryptionkey &&
+                   !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+                       rc = conn->ops->generate_encryptionkey(sess);
+                       if (rc) {
+                               ksmbd_debug(SMB,
+                                           "SMB3 encryption key generation failed\n");
+                               return -EINVAL;
+                       }
+                       sess->enc = true;
+                       rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
+                       /*
+                        * signing is disable if encryption is enable
+                        * on this session
+                        */
+                       sess->sign = false;
+               }
+       }
+
+binding_session:
+       if (conn->dialect >= SMB30_PROT_ID) {
+               chann = lookup_chann_list(sess, conn);
+               if (!chann) {
+                       chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+                       if (!chann)
+                               return -ENOMEM;
+
+                       chann->conn = conn;
+                       INIT_LIST_HEAD(&chann->chann_list);
+                       list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+               }
+       }
+
+       if (conn->ops->generate_signingkey) {
+               rc = conn->ops->generate_signingkey(sess, conn);
+               if (rc) {
+                       ksmbd_debug(SMB, "SMB3 signing key generation failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (conn->dialect > SMB20_PROT_ID) {
+               if (!ksmbd_conn_lookup_dialect(conn)) {
+                       pr_err("fail to verify the dialect\n");
+                       return -ENOENT;
+               }
+       }
+       return 0;
+}
+
+#ifdef CONFIG_SMB_SERVER_KERBEROS5
+static int krb5_authenticate(struct ksmbd_work *work)
+{
+       struct smb2_sess_setup_req *req = work->request_buf;
+       struct smb2_sess_setup_rsp *rsp = work->response_buf;
+       struct ksmbd_conn *conn = work->conn;
+       struct ksmbd_session *sess = work->sess;
+       char *in_blob, *out_blob;
+       struct channel *chann = NULL;
+       u64 prev_sess_id;
+       int in_len, out_len;
+       int retval;
+
+       in_blob = (char *)&req->hdr.ProtocolId +
+               le16_to_cpu(req->SecurityBufferOffset);
+       in_len = le16_to_cpu(req->SecurityBufferLength);
+       out_blob = (char *)&rsp->hdr.ProtocolId +
+               le16_to_cpu(rsp->SecurityBufferOffset);
+       out_len = work->response_sz -
+               offsetof(struct smb2_hdr, smb2_buf_length) -
+               le16_to_cpu(rsp->SecurityBufferOffset);
+
+       /* Check previous session */
+       prev_sess_id = le64_to_cpu(req->PreviousSessionId);
+       if (prev_sess_id && prev_sess_id != sess->id)
+               destroy_previous_session(sess->user, prev_sess_id);
+
+       if (sess->state == SMB2_SESSION_VALID)
+               ksmbd_free_user(sess->user);
+
+       retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
+                                        out_blob, &out_len);
+       if (retval) {
+               ksmbd_debug(SMB, "krb5 authentication failed\n");
+               return -EINVAL;
+       }
+       rsp->SecurityBufferLength = cpu_to_le16(out_len);
+       inc_rfc1001_len(rsp, out_len - 1);
+
+       if ((conn->sign || server_conf.enforced_signing) ||
+           (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
+               sess->sign = true;
+
+       if ((conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) &&
+           conn->ops->generate_encryptionkey) {
+               retval = conn->ops->generate_encryptionkey(sess);
+               if (retval) {
+                       ksmbd_debug(SMB,
+                                   "SMB3 encryption key generation failed\n");
+                       return -EINVAL;
+               }
+               sess->enc = true;
+               rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
+               sess->sign = false;
+       }
+
+       if (conn->dialect >= SMB30_PROT_ID) {
+               chann = lookup_chann_list(sess, conn);
+               if (!chann) {
+                       chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+                       if (!chann)
+                               return -ENOMEM;
+
+                       chann->conn = conn;
+                       INIT_LIST_HEAD(&chann->chann_list);
+                       list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+               }
+       }
+
+       if (conn->ops->generate_signingkey) {
+               retval = conn->ops->generate_signingkey(sess, conn);
+               if (retval) {
+                       ksmbd_debug(SMB, "SMB3 signing key generation failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (conn->dialect > SMB20_PROT_ID) {
+               if (!ksmbd_conn_lookup_dialect(conn)) {
+                       pr_err("fail to verify the dialect\n");
+                       return -ENOENT;
+               }
+       }
+       return 0;
+}
+#else
+static int krb5_authenticate(struct ksmbd_work *work)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
+int smb2_sess_setup(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_sess_setup_req *req = work->request_buf;
+       struct smb2_sess_setup_rsp *rsp = work->response_buf;
+       struct ksmbd_session *sess;
+       struct negotiate_message *negblob;
+       int rc = 0;
+
+       ksmbd_debug(SMB, "Received request for session setup\n");
+
+       rsp->StructureSize = cpu_to_le16(9);
+       rsp->SessionFlags = 0;
+       rsp->SecurityBufferOffset = cpu_to_le16(72);
+       rsp->SecurityBufferLength = 0;
+       inc_rfc1001_len(rsp, 9);
+
+       if (!req->hdr.SessionId) {
+               sess = ksmbd_smb2_session_create();
+               if (!sess) {
+                       rc = -ENOMEM;
+                       goto out_err;
+               }
+               rsp->hdr.SessionId = cpu_to_le64(sess->id);
+               ksmbd_session_register(conn, sess);
+       } else if (conn->dialect >= SMB30_PROT_ID &&
+                  (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+                  req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
+               u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+
+               sess = ksmbd_session_lookup_slowpath(sess_id);
+               if (!sess) {
+                       rc = -ENOENT;
+                       goto out_err;
+               }
+
+               if (conn->dialect != sess->conn->dialect) {
+                       rc = -EINVAL;
+                       goto out_err;
+               }
+
+               if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) {
+                       rc = -EINVAL;
+                       goto out_err;
+               }
+
+               if (strncmp(conn->ClientGUID, sess->conn->ClientGUID,
+                           SMB2_CLIENT_GUID_SIZE)) {
+                       rc = -ENOENT;
+                       goto out_err;
+               }
+
+               if (sess->state == SMB2_SESSION_IN_PROGRESS) {
+                       rc = -EACCES;
+                       goto out_err;
+               }
+
+               if (sess->state == SMB2_SESSION_EXPIRED) {
+                       rc = -EFAULT;
+                       goto out_err;
+               }
+
+               if (ksmbd_session_lookup(conn, sess_id)) {
+                       rc = -EACCES;
+                       goto out_err;
+               }
+
+               conn->binding = true;
+       } else if ((conn->dialect < SMB30_PROT_ID ||
+                   server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+                  (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+               sess = NULL;
+               rc = -EACCES;
+               goto out_err;
+       } else {
+               sess = ksmbd_session_lookup(conn,
+                                           le64_to_cpu(req->hdr.SessionId));
+               if (!sess) {
+                       rc = -ENOENT;
+                       goto out_err;
+               }
+       }
+       work->sess = sess;
+
+       if (sess->state == SMB2_SESSION_EXPIRED)
+               sess->state = SMB2_SESSION_IN_PROGRESS;
+
+       negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
+                       le16_to_cpu(req->SecurityBufferOffset));
+
+       if (decode_negotiation_token(work, negblob) == 0) {
+               if (conn->mechToken)
+                       negblob = (struct negotiate_message *)conn->mechToken;
+       }
+
+       if (server_conf.auth_mechs & conn->auth_mechs) {
+               rc = generate_preauth_hash(work);
+               if (rc)
+                       goto out_err;
+
+               if (conn->preferred_auth_mech &
+                               (KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5)) {
+                       rc = krb5_authenticate(work);
+                       if (rc) {
+                               rc = -EINVAL;
+                               goto out_err;
+                       }
+
+                       ksmbd_conn_set_good(work);
+                       sess->state = SMB2_SESSION_VALID;
+                       kfree(sess->Preauth_HashValue);
+                       sess->Preauth_HashValue = NULL;
+               } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+                       if (negblob->MessageType == NtLmNegotiate) {
+                               rc = ntlm_negotiate(work, negblob);
+                               if (rc)
+                                       goto out_err;
+                               rsp->hdr.Status =
+                                       STATUS_MORE_PROCESSING_REQUIRED;
+                               /*
+                                * Note: here total size -1 is done as an
+                                * adjustment for 0 size blob
+                                */
+                               inc_rfc1001_len(rsp, le16_to_cpu(rsp->SecurityBufferLength) - 1);
+
+                       } else if (negblob->MessageType == NtLmAuthenticate) {
+                               rc = ntlm_authenticate(work);
+                               if (rc)
+                                       goto out_err;
+
+                               ksmbd_conn_set_good(work);
+                               sess->state = SMB2_SESSION_VALID;
+                               if (conn->binding) {
+                                       struct preauth_session *preauth_sess;
+
+                                       preauth_sess =
+                                               ksmbd_preauth_session_lookup(conn, sess->id);
+                                       if (preauth_sess) {
+                                               list_del(&preauth_sess->preauth_entry);
+                                               kfree(preauth_sess);
+                                       }
+                               }
+                               kfree(sess->Preauth_HashValue);
+                               sess->Preauth_HashValue = NULL;
+                       }
+               } else {
+                       /* TODO: need one more negotiation */
+                       pr_err("Not support the preferred authentication\n");
+                       rc = -EINVAL;
+               }
+       } else {
+               pr_err("Not support authentication\n");
+               rc = -EINVAL;
+       }
+
+out_err:
+       if (rc == -EINVAL)
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+       else if (rc == -ENOENT)
+               rsp->hdr.Status = STATUS_USER_SESSION_DELETED;
+       else if (rc == -EACCES)
+               rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED;
+       else if (rc == -EFAULT)
+               rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED;
+       else if (rc == -ENOMEM)
+               rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+       else if (rc)
+               rsp->hdr.Status = STATUS_LOGON_FAILURE;
+
+       if (conn->use_spnego && conn->mechToken) {
+               kfree(conn->mechToken);
+               conn->mechToken = NULL;
+       }
+
+       if (rc < 0 && sess) {
+               ksmbd_session_destroy(sess);
+               work->sess = NULL;
+       }
+
+       return rc;
+}
+
+/**
+ * smb2_tree_connect() - handler for smb2 tree connect command
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb2_tree_connect(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_tree_connect_req *req = work->request_buf;
+       struct smb2_tree_connect_rsp *rsp = work->response_buf;
+       struct ksmbd_session *sess = work->sess;
+       char *treename = NULL, *name = NULL;
+       struct ksmbd_tree_conn_status status;
+       struct ksmbd_share_config *share;
+       int rc = -EINVAL;
+
+       treename = smb_strndup_from_utf16(req->Buffer,
+                                         le16_to_cpu(req->PathLength), true,
+                                         conn->local_nls);
+       if (IS_ERR(treename)) {
+               pr_err("treename is NULL\n");
+               status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+               goto out_err1;
+       }
+
+       name = ksmbd_extract_sharename(treename);
+       if (IS_ERR(name)) {
+               status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+               goto out_err1;
+       }
+
+       ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
+                   name, treename);
+
+       status = ksmbd_tree_conn_connect(sess, name);
+       if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
+               rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
+       else
+               goto out_err1;
+
+       share = status.tree_conn->share_conf;
+       if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+               ksmbd_debug(SMB, "IPC share path request\n");
+               rsp->ShareType = SMB2_SHARE_TYPE_PIPE;
+               rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE |
+                       FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE |
+                       FILE_DELETE_LE | FILE_READ_CONTROL_LE |
+                       FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE |
+                       FILE_SYNCHRONIZE_LE;
+       } else {
+               rsp->ShareType = SMB2_SHARE_TYPE_DISK;
+               rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE |
+                       FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE;
+               if (test_tree_conn_flag(status.tree_conn,
+                                       KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+                       rsp->MaximalAccess |= FILE_WRITE_DATA_LE |
+                               FILE_APPEND_DATA_LE | FILE_WRITE_EA_LE |
+                               FILE_DELETE_LE | FILE_WRITE_ATTRIBUTES_LE |
+                               FILE_DELETE_CHILD_LE | FILE_READ_CONTROL_LE |
+                               FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE |
+                               FILE_SYNCHRONIZE_LE;
+               }
+       }
+
+       status.tree_conn->maximal_access = le32_to_cpu(rsp->MaximalAccess);
+       if (conn->posix_ext_supported)
+               status.tree_conn->posix_extensions = true;
+
+out_err1:
+       rsp->StructureSize = cpu_to_le16(16);
+       rsp->Capabilities = 0;
+       rsp->Reserved = 0;
+       /* default manual caching */
+       rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
+       inc_rfc1001_len(rsp, 16);
+
+       if (!IS_ERR(treename))
+               kfree(treename);
+       if (!IS_ERR(name))
+               kfree(name);
+
+       switch (status.ret) {
+       case KSMBD_TREE_CONN_STATUS_OK:
+               rsp->hdr.Status = STATUS_SUCCESS;
+               rc = 0;
+               break;
+       case KSMBD_TREE_CONN_STATUS_NO_SHARE:
+               rsp->hdr.Status = STATUS_BAD_NETWORK_PATH;
+               break;
+       case -ENOMEM:
+       case KSMBD_TREE_CONN_STATUS_NOMEM:
+               rsp->hdr.Status = STATUS_NO_MEMORY;
+               break;
+       case KSMBD_TREE_CONN_STATUS_ERROR:
+       case KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS:
+       case KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS:
+               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+               break;
+       case -EINVAL:
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               break;
+       default:
+               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+       }
+
+       return rc;
+}
+
+/**
+ * smb2_create_open_flags() - convert smb open flags to unix open flags
+ * @file_present:      is file already present
+ * @access:            file access flags
+ * @disposition:       file disposition flags
+ * @may_flags:         set with MAY_ flags
+ *
+ * Return:      file open flags
+ */
+static int smb2_create_open_flags(bool file_present, __le32 access,
+                                 __le32 disposition,
+                                 int *may_flags)
+{
+       int oflags = O_NONBLOCK | O_LARGEFILE;
+
+       if (access & FILE_READ_DESIRED_ACCESS_LE &&
+           access & FILE_WRITE_DESIRE_ACCESS_LE) {
+               oflags |= O_RDWR;
+               *may_flags = MAY_OPEN | MAY_READ | MAY_WRITE;
+       } else if (access & FILE_WRITE_DESIRE_ACCESS_LE) {
+               oflags |= O_WRONLY;
+               *may_flags = MAY_OPEN | MAY_WRITE;
+       } else {
+               oflags |= O_RDONLY;
+               *may_flags = MAY_OPEN | MAY_READ;
+       }
+
+       if (access == FILE_READ_ATTRIBUTES_LE)
+               oflags |= O_PATH;
+
+       if (file_present) {
+               switch (disposition & FILE_CREATE_MASK_LE) {
+               case FILE_OPEN_LE:
+               case FILE_CREATE_LE:
+                       break;
+               case FILE_SUPERSEDE_LE:
+               case FILE_OVERWRITE_LE:
+               case FILE_OVERWRITE_IF_LE:
+                       oflags |= O_TRUNC;
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               switch (disposition & FILE_CREATE_MASK_LE) {
+               case FILE_SUPERSEDE_LE:
+               case FILE_CREATE_LE:
+               case FILE_OPEN_IF_LE:
+               case FILE_OVERWRITE_IF_LE:
+                       oflags |= O_CREAT;
+                       break;
+               case FILE_OPEN_LE:
+               case FILE_OVERWRITE_LE:
+                       oflags &= ~O_CREAT;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return oflags;
+}
+
+/**
+ * smb2_tree_disconnect() - handler for smb tree connect request
+ * @work:      smb work containing request buffer
+ *
+ * Return:      0
+ */
+int smb2_tree_disconnect(struct ksmbd_work *work)
+{
+       struct smb2_tree_disconnect_rsp *rsp = work->response_buf;
+       struct ksmbd_session *sess = work->sess;
+       struct ksmbd_tree_connect *tcon = work->tcon;
+
+       rsp->StructureSize = cpu_to_le16(4);
+       inc_rfc1001_len(rsp, 4);
+
+       ksmbd_debug(SMB, "request\n");
+
+       if (!tcon) {
+               struct smb2_tree_disconnect_req *req = work->request_buf;
+
+               ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+               rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+               smb2_set_err_rsp(work);
+               return 0;
+       }
+
+       ksmbd_close_tree_conn_fds(work);
+       ksmbd_tree_conn_disconnect(sess, tcon);
+       return 0;
+}
+
+/**
+ * smb2_session_logoff() - handler for session log off request
+ * @work:      smb work containing request buffer
+ *
+ * Return:      0
+ */
+int smb2_session_logoff(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_logoff_rsp *rsp = work->response_buf;
+       struct ksmbd_session *sess = work->sess;
+
+       rsp->StructureSize = cpu_to_le16(4);
+       inc_rfc1001_len(rsp, 4);
+
+       ksmbd_debug(SMB, "request\n");
+
+       /* Got a valid session, set connection state */
+       WARN_ON(sess->conn != conn);
+
+       /* setting CifsExiting here may race with start_tcp_sess */
+       ksmbd_conn_set_need_reconnect(work);
+       ksmbd_close_session_fds(work);
+       ksmbd_conn_wait_idle(conn);
+
+       if (ksmbd_tree_conn_session_logoff(sess)) {
+               struct smb2_logoff_req *req = work->request_buf;
+
+               ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+               rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+               smb2_set_err_rsp(work);
+               return 0;
+       }
+
+       ksmbd_destroy_file_table(&sess->file_table);
+       sess->state = SMB2_SESSION_EXPIRED;
+
+       ksmbd_free_user(sess->user);
+       sess->user = NULL;
+
+       /* let start_tcp_sess free connection info now */
+       ksmbd_conn_set_need_negotiate(work);
+       return 0;
+}
+
+/**
+ * create_smb2_pipe() - create IPC pipe
+ * @work:      smb work containing request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+static noinline int create_smb2_pipe(struct ksmbd_work *work)
+{
+       struct smb2_create_rsp *rsp = work->response_buf;
+       struct smb2_create_req *req = work->request_buf;
+       int id;
+       int err;
+       char *name;
+
+       name = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->NameLength),
+                                     1, work->conn->local_nls);
+       if (IS_ERR(name)) {
+               rsp->hdr.Status = STATUS_NO_MEMORY;
+               err = PTR_ERR(name);
+               goto out;
+       }
+
+       id = ksmbd_session_rpc_open(work->sess, name);
+       if (id < 0) {
+               pr_err("Unable to open RPC pipe: %d\n", id);
+               err = id;
+               goto out;
+       }
+
+       rsp->hdr.Status = STATUS_SUCCESS;
+       rsp->StructureSize = cpu_to_le16(89);
+       rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+       rsp->Reserved = 0;
+       rsp->CreateAction = cpu_to_le32(FILE_OPENED);
+
+       rsp->CreationTime = cpu_to_le64(0);
+       rsp->LastAccessTime = cpu_to_le64(0);
+       rsp->ChangeTime = cpu_to_le64(0);
+       rsp->AllocationSize = cpu_to_le64(0);
+       rsp->EndofFile = cpu_to_le64(0);
+       rsp->FileAttributes = ATTR_NORMAL_LE;
+       rsp->Reserved2 = 0;
+       rsp->VolatileFileId = cpu_to_le64(id);
+       rsp->PersistentFileId = 0;
+       rsp->CreateContextsOffset = 0;
+       rsp->CreateContextsLength = 0;
+
+       inc_rfc1001_len(rsp, 88); /* StructureSize - 1*/
+       kfree(name);
+       return 0;
+
+out:
+       switch (err) {
+       case -EINVAL:
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               break;
+       case -ENOSPC:
+       case -ENOMEM:
+               rsp->hdr.Status = STATUS_NO_MEMORY;
+               break;
+       }
+
+       if (!IS_ERR(name))
+               kfree(name);
+
+       smb2_set_err_rsp(work);
+       return err;
+}
+
+/**
+ * smb2_set_ea() - handler for setting extended attributes using set
+ *             info command
+ * @eabuf:     set info command buffer
+ * @path:      dentry path for get ea
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
+{
+       struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+       char *attr_name = NULL, *value;
+       int rc = 0;
+       int next = 0;
+
+       attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
+       if (!attr_name)
+               return -ENOMEM;
+
+       do {
+               if (!eabuf->EaNameLength)
+                       goto next;
+
+               ksmbd_debug(SMB,
+                           "name : <%s>, name_len : %u, value_len : %u, next : %u\n",
+                           eabuf->name, eabuf->EaNameLength,
+                           le16_to_cpu(eabuf->EaValueLength),
+                           le32_to_cpu(eabuf->NextEntryOffset));
+
+               if (eabuf->EaNameLength >
+                   (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
+                       rc = -EINVAL;
+                       break;
+               }
+
+               memcpy(attr_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+               memcpy(&attr_name[XATTR_USER_PREFIX_LEN], eabuf->name,
+                      eabuf->EaNameLength);
+               attr_name[XATTR_USER_PREFIX_LEN + eabuf->EaNameLength] = '\0';
+               value = (char *)&eabuf->name + eabuf->EaNameLength + 1;
+
+               if (!eabuf->EaValueLength) {
+                       rc = ksmbd_vfs_casexattr_len(user_ns,
+                                                    path->dentry,
+                                                    attr_name,
+                                                    XATTR_USER_PREFIX_LEN +
+                                                    eabuf->EaNameLength);
+
+                       /* delete the EA only when it exits */
+                       if (rc > 0) {
+                               rc = ksmbd_vfs_remove_xattr(user_ns,
+                                                           path->dentry,
+                                                           attr_name);
+
+                               if (rc < 0) {
+                                       ksmbd_debug(SMB,
+                                                   "remove xattr failed(%d)\n",
+                                                   rc);
+                                       break;
+                               }
+                       }
+
+                       /* if the EA doesn't exist, just do nothing. */
+                       rc = 0;
+               } else {
+                       rc = ksmbd_vfs_setxattr(user_ns,
+                                               path->dentry, attr_name, value,
+                                               le16_to_cpu(eabuf->EaValueLength), 0);
+                       if (rc < 0) {
+                               ksmbd_debug(SMB,
+                                           "ksmbd_vfs_setxattr is failed(%d)\n",
+                                           rc);
+                               break;
+                       }
+               }
+
+next:
+               next = le32_to_cpu(eabuf->NextEntryOffset);
+               eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+       } while (next != 0);
+
+       kfree(attr_name);
+       return rc;
+}
+
+static noinline int smb2_set_stream_name_xattr(struct path *path,
+                                              struct ksmbd_file *fp,
+                                              char *stream_name, int s_type)
+{
+       struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+       size_t xattr_stream_size;
+       char *xattr_stream_name;
+       int rc;
+
+       rc = ksmbd_vfs_xattr_stream_name(stream_name,
+                                        &xattr_stream_name,
+                                        &xattr_stream_size,
+                                        s_type);
+       if (rc)
+               return rc;
+
+       fp->stream.name = xattr_stream_name;
+       fp->stream.size = xattr_stream_size;
+
+       /* Check if there is stream prefix in xattr space */
+       rc = ksmbd_vfs_casexattr_len(user_ns,
+                                    path->dentry,
+                                    xattr_stream_name,
+                                    xattr_stream_size);
+       if (rc >= 0)
+               return 0;
+
+       if (fp->cdoption == FILE_OPEN_LE) {
+               ksmbd_debug(SMB, "XATTR stream name lookup failed: %d\n", rc);
+               return -EBADF;
+       }
+
+       rc = ksmbd_vfs_setxattr(user_ns, path->dentry,
+                               xattr_stream_name, NULL, 0, 0);
+       if (rc < 0)
+               pr_err("Failed to store XATTR stream name :%d\n", rc);
+       return 0;
+}
+
+static int smb2_remove_smb_xattrs(struct path *path)
+{
+       struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+       char *name, *xattr_list = NULL;
+       ssize_t xattr_list_len;
+       int err = 0;
+
+       xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+       if (xattr_list_len < 0) {
+               goto out;
+       } else if (!xattr_list_len) {
+               ksmbd_debug(SMB, "empty xattr in the file\n");
+               goto out;
+       }
+
+       for (name = xattr_list; name - xattr_list < xattr_list_len;
+                       name += strlen(name) + 1) {
+               ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+
+               if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+                   strncmp(&name[XATTR_USER_PREFIX_LEN], DOS_ATTRIBUTE_PREFIX,
+                           DOS_ATTRIBUTE_PREFIX_LEN) &&
+                   strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX, STREAM_PREFIX_LEN))
+                       continue;
+
+               err = ksmbd_vfs_remove_xattr(user_ns, path->dentry, name);
+               if (err)
+                       ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+       }
+out:
+       kvfree(xattr_list);
+       return err;
+}
+
+static int smb2_create_truncate(struct path *path)
+{
+       int rc = vfs_truncate(path, 0);
+
+       if (rc) {
+               pr_err("vfs_truncate failed, rc %d\n", rc);
+               return rc;
+       }
+
+       rc = smb2_remove_smb_xattrs(path);
+       if (rc == -EOPNOTSUPP)
+               rc = 0;
+       if (rc)
+               ksmbd_debug(SMB,
+                           "ksmbd_truncate_stream_name_xattr failed, rc %d\n",
+                           rc);
+       return rc;
+}
+
+static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, struct path *path,
+                           struct ksmbd_file *fp)
+{
+       struct xattr_dos_attrib da = {0};
+       int rc;
+
+       if (!test_share_config_flag(tcon->share_conf,
+                                   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS))
+               return;
+
+       da.version = 4;
+       da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+       da.itime = da.create_time = fp->create_time;
+       da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+               XATTR_DOSINFO_ITIME;
+
+       rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_user_ns(path->mnt),
+                                           path->dentry, &da);
+       if (rc)
+               ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
+}
+
+static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon,
+                              struct path *path, struct ksmbd_file *fp)
+{
+       struct xattr_dos_attrib da;
+       int rc;
+
+       fp->f_ci->m_fattr &= ~(ATTR_HIDDEN_LE | ATTR_SYSTEM_LE);
+
+       /* get FileAttributes from XATTR_NAME_DOS_ATTRIBUTE */
+       if (!test_share_config_flag(tcon->share_conf,
+                                   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS))
+               return;
+
+       rc = ksmbd_vfs_get_dos_attrib_xattr(mnt_user_ns(path->mnt),
+                                           path->dentry, &da);
+       if (rc > 0) {
+               fp->f_ci->m_fattr = cpu_to_le32(da.attr);
+               fp->create_time = da.create_time;
+               fp->itime = da.itime;
+       }
+}
+
+static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
+                     int open_flags, umode_t posix_mode, bool is_dir)
+{
+       struct ksmbd_tree_connect *tcon = work->tcon;
+       struct ksmbd_share_config *share = tcon->share_conf;
+       umode_t mode;
+       int rc;
+
+       if (!(open_flags & O_CREAT))
+               return -EBADF;
+
+       ksmbd_debug(SMB, "file does not exist, so creating\n");
+       if (is_dir == true) {
+               ksmbd_debug(SMB, "creating directory\n");
+
+               mode = share_config_directory_mode(share, posix_mode);
+               rc = ksmbd_vfs_mkdir(work, name, mode);
+               if (rc)
+                       return rc;
+       } else {
+               ksmbd_debug(SMB, "creating regular file\n");
+
+               mode = share_config_create_mode(share, posix_mode);
+               rc = ksmbd_vfs_create(work, name, mode);
+               if (rc)
+                       return rc;
+       }
+
+       rc = ksmbd_vfs_kern_path(name, 0, path, 0);
+       if (rc) {
+               pr_err("cannot get linux path (%s), err = %d\n",
+                      name, rc);
+               return rc;
+       }
+       return 0;
+}
+
+static int smb2_create_sd_buffer(struct ksmbd_work *work,
+                                struct smb2_create_req *req,
+                                struct path *path)
+{
+       struct create_context *context;
+       struct create_sd_buf_req *sd_buf;
+
+       if (!req->CreateContextsOffset)
+               return -ENOENT;
+
+       /* Parse SD BUFFER create contexts */
+       context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER);
+       if (!context)
+               return -ENOENT;
+       else if (IS_ERR(context))
+               return PTR_ERR(context);
+
+       ksmbd_debug(SMB,
+                   "Set ACLs using SMB2_CREATE_SD_BUFFER context\n");
+       sd_buf = (struct create_sd_buf_req *)context;
+       return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
+                           le32_to_cpu(sd_buf->ccontext.DataLength), true);
+}
+
+static void ksmbd_acls_fattr(struct smb_fattr *fattr, struct inode *inode)
+{
+       fattr->cf_uid = inode->i_uid;
+       fattr->cf_gid = inode->i_gid;
+       fattr->cf_mode = inode->i_mode;
+       fattr->cf_acls = NULL;
+       fattr->cf_dacls = NULL;
+
+       if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+               fattr->cf_acls = get_acl(inode, ACL_TYPE_ACCESS);
+               if (S_ISDIR(inode->i_mode))
+                       fattr->cf_dacls = get_acl(inode, ACL_TYPE_DEFAULT);
+       }
+}
+
+/**
+ * smb2_open() - handler for smb file open request
+ * @work:      smb work containing request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb2_open(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct ksmbd_session *sess = work->sess;
+       struct ksmbd_tree_connect *tcon = work->tcon;
+       struct smb2_create_req *req;
+       struct smb2_create_rsp *rsp, *rsp_org;
+       struct path path;
+       struct ksmbd_share_config *share = tcon->share_conf;
+       struct ksmbd_file *fp = NULL;
+       struct file *filp = NULL;
+       struct user_namespace *user_ns = NULL;
+       struct kstat stat;
+       struct create_context *context;
+       struct lease_ctx_info *lc = NULL;
+       struct create_ea_buf_req *ea_buf = NULL;
+       struct oplock_info *opinfo;
+       __le32 *next_ptr = NULL;
+       int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
+       int rc = 0, len = 0;
+       int contxt_cnt = 0, query_disk_id = 0;
+       int maximal_access_ctxt = 0, posix_ctxt = 0;
+       int s_type = 0;
+       int next_off = 0;
+       char *name = NULL;
+       char *stream_name = NULL;
+       bool file_present = false, created = false, already_permitted = false;
+       int share_ret, need_truncate = 0;
+       u64 time;
+       umode_t posix_mode = 0;
+       __le32 daccess, maximal_access = 0;
+
+       rsp_org = work->response_buf;
+       WORK_BUFFERS(work, req, rsp);
+
+       if (req->hdr.NextCommand && !work->next_smb2_rcv_hdr_off &&
+           (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)) {
+               ksmbd_debug(SMB, "invalid flag in chained command\n");
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               smb2_set_err_rsp(work);
+               return -EINVAL;
+       }
+
+       if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+               ksmbd_debug(SMB, "IPC pipe create request\n");
+               return create_smb2_pipe(work);
+       }
+
+       if (req->NameLength) {
+               if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
+                   *(char *)req->Buffer == '\\') {
+                       pr_err("not allow directory name included leading slash\n");
+                       rc = -EINVAL;
+                       goto err_out1;
+               }
+
+               name = smb2_get_name(share,
+                                    req->Buffer,
+                                    le16_to_cpu(req->NameLength),
+                                    work->conn->local_nls);
+               if (IS_ERR(name)) {
+                       rc = PTR_ERR(name);
+                       if (rc != -ENOMEM)
+                               rc = -ENOENT;
+                       name = NULL;
+                       goto err_out1;
+               }
+
+               ksmbd_debug(SMB, "converted name = %s\n", name);
+               if (strchr(name, ':')) {
+                       if (!test_share_config_flag(work->tcon->share_conf,
+                                                   KSMBD_SHARE_FLAG_STREAMS)) {
+                               rc = -EBADF;
+                               goto err_out1;
+                       }
+                       rc = parse_stream_name(name, &stream_name, &s_type);
+                       if (rc < 0)
+                               goto err_out1;
+               }
+
+               rc = ksmbd_validate_filename(name);
+               if (rc < 0)
+                       goto err_out1;
+
+               if (ksmbd_share_veto_filename(share, name)) {
+                       rc = -ENOENT;
+                       ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n",
+                                   name);
+                       goto err_out1;
+               }
+       } else {
+               len = strlen(share->path);
+               ksmbd_debug(SMB, "share path len %d\n", len);
+               name = kmalloc(len + 1, GFP_KERNEL);
+               if (!name) {
+                       rsp->hdr.Status = STATUS_NO_MEMORY;
+                       rc = -ENOMEM;
+                       goto err_out1;
+               }
+
+               memcpy(name, share->path, len);
+               *(name + len) = '\0';
+       }
+
+       req_op_level = req->RequestedOplockLevel;
+       if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
+               lc = parse_lease_state(req);
+
+       if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE_LE)) {
+               pr_err("Invalid impersonationlevel : 0x%x\n",
+                      le32_to_cpu(req->ImpersonationLevel));
+               rc = -EIO;
+               rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL;
+               goto err_out1;
+       }
+
+       if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK)) {
+               pr_err("Invalid create options : 0x%x\n",
+                      le32_to_cpu(req->CreateOptions));
+               rc = -EINVAL;
+               goto err_out1;
+       } else {
+               if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE &&
+                   req->CreateOptions & FILE_RANDOM_ACCESS_LE)
+                       req->CreateOptions = ~(FILE_SEQUENTIAL_ONLY_LE);
+
+               if (req->CreateOptions &
+                   (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION |
+                    FILE_RESERVE_OPFILTER_LE)) {
+                       rc = -EOPNOTSUPP;
+                       goto err_out1;
+               }
+
+               if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+                       if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) {
+                               rc = -EINVAL;
+                               goto err_out1;
+                       } else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) {
+                               req->CreateOptions = ~(FILE_NO_COMPRESSION_LE);
+                       }
+               }
+       }
+
+       if (le32_to_cpu(req->CreateDisposition) >
+           le32_to_cpu(FILE_OVERWRITE_IF_LE)) {
+               pr_err("Invalid create disposition : 0x%x\n",
+                      le32_to_cpu(req->CreateDisposition));
+               rc = -EINVAL;
+               goto err_out1;
+       }
+
+       if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) {
+               pr_err("Invalid desired access : 0x%x\n",
+                      le32_to_cpu(req->DesiredAccess));
+               rc = -EACCES;
+               goto err_out1;
+       }
+
+       if (req->FileAttributes && !(req->FileAttributes & ATTR_MASK_LE)) {
+               pr_err("Invalid file attribute : 0x%x\n",
+                      le32_to_cpu(req->FileAttributes));
+               rc = -EINVAL;
+               goto err_out1;
+       }
+
+       if (req->CreateContextsOffset) {
+               /* Parse non-durable handle create contexts */
+               context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER);
+               if (IS_ERR(context)) {
+                       rc = PTR_ERR(context);
+                       goto err_out1;
+               } else if (context) {
+                       ea_buf = (struct create_ea_buf_req *)context;
+                       if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
+                               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+                               rc = -EACCES;
+                               goto err_out1;
+                       }
+               }
+
+               context = smb2_find_context_vals(req,
+                                                SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST);
+               if (IS_ERR(context)) {
+                       rc = PTR_ERR(context);
+                       goto err_out1;
+               } else if (context) {
+                       ksmbd_debug(SMB,
+                                   "get query maximal access context\n");
+                       maximal_access_ctxt = 1;
+               }
+
+               context = smb2_find_context_vals(req,
+                                                SMB2_CREATE_TIMEWARP_REQUEST);
+               if (IS_ERR(context)) {
+                       rc = PTR_ERR(context);
+                       goto err_out1;
+               } else if (context) {
+                       ksmbd_debug(SMB, "get timewarp context\n");
+                       rc = -EBADF;
+                       goto err_out1;
+               }
+
+               if (tcon->posix_extensions) {
+                       context = smb2_find_context_vals(req,
+                                                        SMB2_CREATE_TAG_POSIX);
+                       if (IS_ERR(context)) {
+                               rc = PTR_ERR(context);
+                               goto err_out1;
+                       } else if (context) {
+                               struct create_posix *posix =
+                                       (struct create_posix *)context;
+                               ksmbd_debug(SMB, "get posix context\n");
+
+                               posix_mode = le32_to_cpu(posix->Mode);
+                               posix_ctxt = 1;
+                       }
+               }
+       }
+
+       if (ksmbd_override_fsids(work)) {
+               rc = -ENOMEM;
+               goto err_out1;
+       }
+
+       if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
+               /*
+                * On delete request, instead of following up, need to
+                * look the current entity
+                */
+               rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
+               if (!rc) {
+                       /*
+                        * If file exists with under flags, return access
+                        * denied error.
+                        */
+                       if (req->CreateDisposition == FILE_OVERWRITE_IF_LE ||
+                           req->CreateDisposition == FILE_OPEN_IF_LE) {
+                               rc = -EACCES;
+                               path_put(&path);
+                               goto err_out;
+                       }
+
+                       if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+                               ksmbd_debug(SMB,
+                                           "User does not have write permission\n");
+                               rc = -EACCES;
+                               path_put(&path);
+                               goto err_out;
+                       }
+               }
+       } else {
+               if (test_share_config_flag(work->tcon->share_conf,
+                                          KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) {
+                       /*
+                        * Use LOOKUP_FOLLOW to follow the path of
+                        * symlink in path buildup
+                        */
+                       rc = ksmbd_vfs_kern_path(name, LOOKUP_FOLLOW, &path, 1);
+                       if (rc) { /* Case for broken link ?*/
+                               rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
+                       }
+               } else {
+                       rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
+                       if (!rc && d_is_symlink(path.dentry)) {
+                               rc = -EACCES;
+                               path_put(&path);
+                               goto err_out;
+                       }
+               }
+       }
+
+       if (rc) {
+               if (rc == -EACCES) {
+                       ksmbd_debug(SMB,
+                                   "User does not have right permission\n");
+                       goto err_out;
+               }
+               ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
+                           name, rc);
+               rc = 0;
+       } else {
+               file_present = true;
+               user_ns = mnt_user_ns(path.mnt);
+               generic_fillattr(user_ns, d_inode(path.dentry), &stat);
+       }
+       if (stream_name) {
+               if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+                       if (s_type == DATA_STREAM) {
+                               rc = -EIO;
+                               rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
+                       }
+               } else {
+                       if (S_ISDIR(stat.mode) && s_type == DATA_STREAM) {
+                               rc = -EIO;
+                               rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
+                       }
+               }
+
+               if (req->CreateOptions & FILE_DIRECTORY_FILE_LE &&
+                   req->FileAttributes & ATTR_NORMAL_LE) {
+                       rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
+                       rc = -EIO;
+               }
+
+               if (rc < 0)
+                       goto err_out;
+       }
+
+       if (file_present && req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE &&
+           S_ISDIR(stat.mode) && !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
+               ksmbd_debug(SMB, "open() argument is a directory: %s, %x\n",
+                           name, req->CreateOptions);
+               rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
+               rc = -EIO;
+               goto err_out;
+       }
+
+       if (file_present && (req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
+           !(req->CreateDisposition == FILE_CREATE_LE) &&
+           !S_ISDIR(stat.mode)) {
+               rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
+               rc = -EIO;
+               goto err_out;
+       }
+
+       if (!stream_name && file_present &&
+           req->CreateDisposition == FILE_CREATE_LE) {
+               rc = -EEXIST;
+               goto err_out;
+       }
+
+       daccess = smb_map_generic_desired_access(req->DesiredAccess);
+
+       if (file_present && !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
+               rc = smb_check_perm_dacl(conn, &path, &daccess,
+                                        sess->user->uid);
+               if (rc)
+                       goto err_out;
+       }
+
+       if (daccess & FILE_MAXIMAL_ACCESS_LE) {
+               if (!file_present) {
+                       daccess = cpu_to_le32(GENERIC_ALL_FLAGS);
+               } else {
+                       rc = ksmbd_vfs_query_maximal_access(user_ns,
+                                                           path.dentry,
+                                                           &daccess);
+                       if (rc)
+                               goto err_out;
+                       already_permitted = true;
+               }
+               maximal_access = daccess;
+       }
+
+       open_flags = smb2_create_open_flags(file_present, daccess,
+                                           req->CreateDisposition,
+                                           &may_flags);
+
+       if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+               if (open_flags & O_CREAT) {
+                       ksmbd_debug(SMB,
+                                   "User does not have write permission\n");
+                       rc = -EACCES;
+                       goto err_out;
+               }
+       }
+
+       /*create file if not present */
+       if (!file_present) {
+               rc = smb2_creat(work, &path, name, open_flags, posix_mode,
+                               req->CreateOptions & FILE_DIRECTORY_FILE_LE);
+               if (rc) {
+                       if (rc == -ENOENT) {
+                               rc = -EIO;
+                               rsp->hdr.Status = STATUS_OBJECT_PATH_NOT_FOUND;
+                       }
+                       goto err_out;
+               }
+
+               created = true;
+               user_ns = mnt_user_ns(path.mnt);
+               if (ea_buf) {
+                       rc = smb2_set_ea(&ea_buf->ea, &path);
+                       if (rc == -EOPNOTSUPP)
+                               rc = 0;
+                       else if (rc)
+                               goto err_out;
+               }
+       } else if (!already_permitted) {
+               /* FILE_READ_ATTRIBUTE is allowed without inode_permission,
+                * because execute(search) permission on a parent directory,
+                * is already granted.
+                */
+               if (daccess & ~(FILE_READ_ATTRIBUTES_LE | FILE_READ_CONTROL_LE)) {
+                       rc = inode_permission(user_ns,
+                                             d_inode(path.dentry),
+                                             may_flags);
+                       if (rc)
+                               goto err_out;
+
+                       if ((daccess & FILE_DELETE_LE) ||
+                           (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
+                               rc = ksmbd_vfs_may_delete(user_ns,
+                                                         path.dentry);
+                               if (rc)
+                                       goto err_out;
+                       }
+               }
+       }
+
+       rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent));
+       if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
+               rc = -EBUSY;
+               goto err_out;
+       }
+
+       rc = 0;
+       filp = dentry_open(&path, open_flags, current_cred());
+       if (IS_ERR(filp)) {
+               rc = PTR_ERR(filp);
+               pr_err("dentry open for dir failed, rc %d\n", rc);
+               goto err_out;
+       }
+
+       if (file_present) {
+               if (!(open_flags & O_TRUNC))
+                       file_info = FILE_OPENED;
+               else
+                       file_info = FILE_OVERWRITTEN;
+
+               if ((req->CreateDisposition & FILE_CREATE_MASK_LE) ==
+                   FILE_SUPERSEDE_LE)
+                       file_info = FILE_SUPERSEDED;
+       } else if (open_flags & O_CREAT) {
+               file_info = FILE_CREATED;
+       }
+
+       ksmbd_vfs_set_fadvise(filp, req->CreateOptions);
+
+       /* Obtain Volatile-ID */
+       fp = ksmbd_open_fd(work, filp);
+       if (IS_ERR(fp)) {
+               fput(filp);
+               rc = PTR_ERR(fp);
+               fp = NULL;
+               goto err_out;
+       }
+
+       /* Get Persistent-ID */
+       ksmbd_open_durable_fd(fp);
+       if (!has_file_id(fp->persistent_id)) {
+               rc = -ENOMEM;
+               goto err_out;
+       }
+
+       fp->filename = name;
+       fp->cdoption = req->CreateDisposition;
+       fp->daccess = daccess;
+       fp->saccess = req->ShareAccess;
+       fp->coption = req->CreateOptions;
+
+       /* Set default windows and posix acls if creating new file */
+       if (created) {
+               int posix_acl_rc;
+               struct inode *inode = d_inode(path.dentry);
+
+               posix_acl_rc = ksmbd_vfs_inherit_posix_acl(user_ns,
+                                                          inode,
+                                                          d_inode(path.dentry->d_parent));
+               if (posix_acl_rc)
+                       ksmbd_debug(SMB, "inherit posix acl failed : %d\n", posix_acl_rc);
+
+               if (test_share_config_flag(work->tcon->share_conf,
+                                          KSMBD_SHARE_FLAG_ACL_XATTR)) {
+                       rc = smb_inherit_dacl(conn, &path, sess->user->uid,
+                                             sess->user->gid);
+               }
+
+               if (rc) {
+                       rc = smb2_create_sd_buffer(work, req, &path);
+                       if (rc) {
+                               if (posix_acl_rc)
+                                       ksmbd_vfs_set_init_posix_acl(user_ns,
+                                                                    inode);
+
+                               if (test_share_config_flag(work->tcon->share_conf,
+                                                          KSMBD_SHARE_FLAG_ACL_XATTR)) {
+                                       struct smb_fattr fattr;
+                                       struct smb_ntsd *pntsd;
+                                       int pntsd_size, ace_num = 0;
+
+                                       ksmbd_acls_fattr(&fattr, inode);
+                                       if (fattr.cf_acls)
+                                               ace_num = fattr.cf_acls->a_count;
+                                       if (fattr.cf_dacls)
+                                               ace_num += fattr.cf_dacls->a_count;
+
+                                       pntsd = kmalloc(sizeof(struct smb_ntsd) +
+                                                       sizeof(struct smb_sid) * 3 +
+                                                       sizeof(struct smb_acl) +
+                                                       sizeof(struct smb_ace) * ace_num * 2,
+                                                       GFP_KERNEL);
+                                       if (!pntsd)
+                                               goto err_out;
+
+                                       rc = build_sec_desc(user_ns,
+                                                           pntsd, NULL,
+                                                           OWNER_SECINFO |
+                                                           GROUP_SECINFO |
+                                                           DACL_SECINFO,
+                                                           &pntsd_size, &fattr);
+                                       posix_acl_release(fattr.cf_acls);
+                                       posix_acl_release(fattr.cf_dacls);
+
+                                       rc = ksmbd_vfs_set_sd_xattr(conn,
+                                                                   user_ns,
+                                                                   path.dentry,
+                                                                   pntsd,
+                                                                   pntsd_size);
+                                       kfree(pntsd);
+                                       if (rc)
+                                               pr_err("failed to store ntacl in xattr : %d\n",
+                                                      rc);
+                               }
+                       }
+               }
+               rc = 0;
+       }
+
+       if (stream_name) {
+               rc = smb2_set_stream_name_xattr(&path,
+                                               fp,
+                                               stream_name,
+                                               s_type);
+               if (rc)
+                       goto err_out;
+               file_info = FILE_CREATED;
+       }
+
+       fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE |
+                       FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE));
+       if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
+           !fp->attrib_only && !stream_name) {
+               smb_break_all_oplock(work, fp);
+               need_truncate = 1;
+       }
+
+       /* fp should be searchable through ksmbd_inode.m_fp_list
+        * after daccess, saccess, attrib_only, and stream are
+        * initialized.
+        */
+       write_lock(&fp->f_ci->m_lock);
+       list_add(&fp->node, &fp->f_ci->m_fp_list);
+       write_unlock(&fp->f_ci->m_lock);
+
+       rc = ksmbd_vfs_getattr(&path, &stat);
+       if (rc) {
+               generic_fillattr(user_ns, d_inode(path.dentry), &stat);
+               rc = 0;
+       }
+
+       /* Check delete pending among previous fp before oplock break */
+       if (ksmbd_inode_pending_delete(fp)) {
+               rc = -EBUSY;
+               goto err_out;
+       }
+
+       share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+       if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
+           (req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
+            !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) {
+               if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) {
+                       rc = share_ret;
+                       goto err_out;
+               }
+       } else {
+               if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
+                       req_op_level = smb2_map_lease_to_oplock(lc->req_state);
+                       ksmbd_debug(SMB,
+                                   "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n",
+                                   name, req_op_level, lc->req_state);
+                       rc = find_same_lease_key(sess, fp->f_ci, lc);
+                       if (rc)
+                               goto err_out;
+               } else if (open_flags == O_RDONLY &&
+                          (req_op_level == SMB2_OPLOCK_LEVEL_BATCH ||
+                           req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
+                       req_op_level = SMB2_OPLOCK_LEVEL_II;
+
+               rc = smb_grant_oplock(work, req_op_level,
+                                     fp->persistent_id, fp,
+                                     le32_to_cpu(req->hdr.Id.SyncId.TreeId),
+                                     lc, share_ret);
+               if (rc < 0)
+                       goto err_out;
+       }
+
+       if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
+               ksmbd_fd_set_delete_on_close(fp, file_info);
+
+       if (need_truncate) {
+               rc = smb2_create_truncate(&path);
+               if (rc)
+                       goto err_out;
+       }
+
+       if (req->CreateContextsOffset) {
+               struct create_alloc_size_req *az_req;
+
+               az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req,
+                                       SMB2_CREATE_ALLOCATION_SIZE);
+               if (IS_ERR(az_req)) {
+                       rc = PTR_ERR(az_req);
+                       goto err_out;
+               } else if (az_req) {
+                       loff_t alloc_size = le64_to_cpu(az_req->AllocationSize);
+                       int err;
+
+                       ksmbd_debug(SMB,
+                                   "request smb2 create allocate size : %llu\n",
+                                   alloc_size);
+                       smb_break_all_levII_oplock(work, fp, 1);
+                       err = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
+                                           alloc_size);
+                       if (err < 0)
+                               ksmbd_debug(SMB,
+                                           "vfs_fallocate is failed : %d\n",
+                                           err);
+               }
+
+               context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID);
+               if (IS_ERR(context)) {
+                       rc = PTR_ERR(context);
+                       goto err_out;
+               } else if (context) {
+                       ksmbd_debug(SMB, "get query on disk id context\n");
+                       query_disk_id = 1;
+               }
+       }
+
+       if (stat.result_mask & STATX_BTIME)
+               fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+       else
+               fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+       if (req->FileAttributes || fp->f_ci->m_fattr == 0)
+               fp->f_ci->m_fattr =
+                       cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
+
+       if (!created)
+               smb2_update_xattrs(tcon, &path, fp);
+       else
+               smb2_new_xattrs(tcon, &path, fp);
+
+       memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
+
+       generic_fillattr(user_ns, file_inode(fp->filp),
+                        &stat);
+
+       rsp->StructureSize = cpu_to_le16(89);
+       rcu_read_lock();
+       opinfo = rcu_dereference(fp->f_opinfo);
+       rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
+       rcu_read_unlock();
+       rsp->Reserved = 0;
+       rsp->CreateAction = cpu_to_le32(file_info);
+       rsp->CreationTime = cpu_to_le64(fp->create_time);
+       time = ksmbd_UnixTimeToNT(stat.atime);
+       rsp->LastAccessTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.mtime);
+       rsp->LastWriteTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.ctime);
+       rsp->ChangeTime = cpu_to_le64(time);
+       rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 :
+               cpu_to_le64(stat.blocks << 9);
+       rsp->EndofFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+       rsp->FileAttributes = fp->f_ci->m_fattr;
+
+       rsp->Reserved2 = 0;
+
+       rsp->PersistentFileId = cpu_to_le64(fp->persistent_id);
+       rsp->VolatileFileId = cpu_to_le64(fp->volatile_id);
+
+       rsp->CreateContextsOffset = 0;
+       rsp->CreateContextsLength = 0;
+       inc_rfc1001_len(rsp_org, 88); /* StructureSize - 1*/
+
+       /* If lease is request send lease context response */
+       if (opinfo && opinfo->is_lease) {
+               struct create_context *lease_ccontext;
+
+               ksmbd_debug(SMB, "lease granted on(%s) lease state 0x%x\n",
+                           name, opinfo->o_lease->state);
+               rsp->OplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
+
+               lease_ccontext = (struct create_context *)rsp->Buffer;
+               contxt_cnt++;
+               create_lease_buf(rsp->Buffer, opinfo->o_lease);
+               le32_add_cpu(&rsp->CreateContextsLength,
+                            conn->vals->create_lease_size);
+               inc_rfc1001_len(rsp_org, conn->vals->create_lease_size);
+               next_ptr = &lease_ccontext->Next;
+               next_off = conn->vals->create_lease_size;
+       }
+
+       if (maximal_access_ctxt) {
+               struct create_context *mxac_ccontext;
+
+               if (maximal_access == 0)
+                       ksmbd_vfs_query_maximal_access(user_ns,
+                                                      path.dentry,
+                                                      &maximal_access);
+               mxac_ccontext = (struct create_context *)(rsp->Buffer +
+                               le32_to_cpu(rsp->CreateContextsLength));
+               contxt_cnt++;
+               create_mxac_rsp_buf(rsp->Buffer +
+                               le32_to_cpu(rsp->CreateContextsLength),
+                               le32_to_cpu(maximal_access));
+               le32_add_cpu(&rsp->CreateContextsLength,
+                            conn->vals->create_mxac_size);
+               inc_rfc1001_len(rsp_org, conn->vals->create_mxac_size);
+               if (next_ptr)
+                       *next_ptr = cpu_to_le32(next_off);
+               next_ptr = &mxac_ccontext->Next;
+               next_off = conn->vals->create_mxac_size;
+       }
+
+       if (query_disk_id) {
+               struct create_context *disk_id_ccontext;
+
+               disk_id_ccontext = (struct create_context *)(rsp->Buffer +
+                               le32_to_cpu(rsp->CreateContextsLength));
+               contxt_cnt++;
+               create_disk_id_rsp_buf(rsp->Buffer +
+                               le32_to_cpu(rsp->CreateContextsLength),
+                               stat.ino, tcon->id);
+               le32_add_cpu(&rsp->CreateContextsLength,
+                            conn->vals->create_disk_id_size);
+               inc_rfc1001_len(rsp_org, conn->vals->create_disk_id_size);
+               if (next_ptr)
+                       *next_ptr = cpu_to_le32(next_off);
+               next_ptr = &disk_id_ccontext->Next;
+               next_off = conn->vals->create_disk_id_size;
+       }
+
+       if (posix_ctxt) {
+               contxt_cnt++;
+               create_posix_rsp_buf(rsp->Buffer +
+                               le32_to_cpu(rsp->CreateContextsLength),
+                               fp);
+               le32_add_cpu(&rsp->CreateContextsLength,
+                            conn->vals->create_posix_size);
+               inc_rfc1001_len(rsp_org, conn->vals->create_posix_size);
+               if (next_ptr)
+                       *next_ptr = cpu_to_le32(next_off);
+       }
+
+       if (contxt_cnt > 0) {
+               rsp->CreateContextsOffset =
+                       cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer)
+                       - 4);
+       }
+
+err_out:
+       if (file_present || created)
+               path_put(&path);
+       ksmbd_revert_fsids(work);
+err_out1:
+       if (rc) {
+               if (rc == -EINVAL)
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               else if (rc == -EOPNOTSUPP)
+                       rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+               else if (rc == -EACCES || rc == -ESTALE)
+                       rsp->hdr.Status = STATUS_ACCESS_DENIED;
+               else if (rc == -ENOENT)
+                       rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
+               else if (rc == -EPERM)
+                       rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+               else if (rc == -EBUSY)
+                       rsp->hdr.Status = STATUS_DELETE_PENDING;
+               else if (rc == -EBADF)
+                       rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
+               else if (rc == -ENOEXEC)
+                       rsp->hdr.Status = STATUS_DUPLICATE_OBJECTID;
+               else if (rc == -ENXIO)
+                       rsp->hdr.Status = STATUS_NO_SUCH_DEVICE;
+               else if (rc == -EEXIST)
+                       rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION;
+               else if (rc == -EMFILE)
+                       rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+               if (!rsp->hdr.Status)
+                       rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+
+               if (!fp || !fp->filename)
+                       kfree(name);
+               if (fp)
+                       ksmbd_fd_put(work, fp);
+               smb2_set_err_rsp(work);
+               ksmbd_debug(SMB, "Error response: %x\n", rsp->hdr.Status);
+       }
+
+       kfree(lc);
+
+       return 0;
+}
+
+static int readdir_info_level_struct_sz(int info_level)
+{
+       switch (info_level) {
+       case FILE_FULL_DIRECTORY_INFORMATION:
+               return sizeof(struct file_full_directory_info);
+       case FILE_BOTH_DIRECTORY_INFORMATION:
+               return sizeof(struct file_both_directory_info);
+       case FILE_DIRECTORY_INFORMATION:
+               return sizeof(struct file_directory_info);
+       case FILE_NAMES_INFORMATION:
+               return sizeof(struct file_names_info);
+       case FILEID_FULL_DIRECTORY_INFORMATION:
+               return sizeof(struct file_id_full_dir_info);
+       case FILEID_BOTH_DIRECTORY_INFORMATION:
+               return sizeof(struct file_id_both_directory_info);
+       case SMB_FIND_FILE_POSIX_INFO:
+               return sizeof(struct smb2_posix_info);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int dentry_name(struct ksmbd_dir_info *d_info, int info_level)
+{
+       switch (info_level) {
+       case FILE_FULL_DIRECTORY_INFORMATION:
+       {
+               struct file_full_directory_info *ffdinfo;
+
+               ffdinfo = (struct file_full_directory_info *)d_info->rptr;
+               d_info->rptr += le32_to_cpu(ffdinfo->NextEntryOffset);
+               d_info->name = ffdinfo->FileName;
+               d_info->name_len = le32_to_cpu(ffdinfo->FileNameLength);
+               return 0;
+       }
+       case FILE_BOTH_DIRECTORY_INFORMATION:
+       {
+               struct file_both_directory_info *fbdinfo;
+
+               fbdinfo = (struct file_both_directory_info *)d_info->rptr;
+               d_info->rptr += le32_to_cpu(fbdinfo->NextEntryOffset);
+               d_info->name = fbdinfo->FileName;
+               d_info->name_len = le32_to_cpu(fbdinfo->FileNameLength);
+               return 0;
+       }
+       case FILE_DIRECTORY_INFORMATION:
+       {
+               struct file_directory_info *fdinfo;
+
+               fdinfo = (struct file_directory_info *)d_info->rptr;
+               d_info->rptr += le32_to_cpu(fdinfo->NextEntryOffset);
+               d_info->name = fdinfo->FileName;
+               d_info->name_len = le32_to_cpu(fdinfo->FileNameLength);
+               return 0;
+       }
+       case FILE_NAMES_INFORMATION:
+       {
+               struct file_names_info *fninfo;
+
+               fninfo = (struct file_names_info *)d_info->rptr;
+               d_info->rptr += le32_to_cpu(fninfo->NextEntryOffset);
+               d_info->name = fninfo->FileName;
+               d_info->name_len = le32_to_cpu(fninfo->FileNameLength);
+               return 0;
+       }
+       case FILEID_FULL_DIRECTORY_INFORMATION:
+       {
+               struct file_id_full_dir_info *dinfo;
+
+               dinfo = (struct file_id_full_dir_info *)d_info->rptr;
+               d_info->rptr += le32_to_cpu(dinfo->NextEntryOffset);
+               d_info->name = dinfo->FileName;
+               d_info->name_len = le32_to_cpu(dinfo->FileNameLength);
+               return 0;
+       }
+       case FILEID_BOTH_DIRECTORY_INFORMATION:
+       {
+               struct file_id_both_directory_info *fibdinfo;
+
+               fibdinfo = (struct file_id_both_directory_info *)d_info->rptr;
+               d_info->rptr += le32_to_cpu(fibdinfo->NextEntryOffset);
+               d_info->name = fibdinfo->FileName;
+               d_info->name_len = le32_to_cpu(fibdinfo->FileNameLength);
+               return 0;
+       }
+       case SMB_FIND_FILE_POSIX_INFO:
+       {
+               struct smb2_posix_info *posix_info;
+
+               posix_info = (struct smb2_posix_info *)d_info->rptr;
+               d_info->rptr += le32_to_cpu(posix_info->NextEntryOffset);
+               d_info->name = posix_info->name;
+               d_info->name_len = le32_to_cpu(posix_info->name_len);
+               return 0;
+       }
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * smb2_populate_readdir_entry() - encode directory entry in smb2 response
+ * buffer
+ * @conn:      connection instance
+ * @info_level:        smb information level
+ * @d_info:    structure included variables for query dir
+ * @user_ns:   user namespace
+ * @ksmbd_kstat:       ksmbd wrapper of dirent stat information
+ *
+ * if directory has many entries, find first can't read it fully.
+ * find next might be called multiple times to read remaining dir entries
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+                                      struct ksmbd_dir_info *d_info,
+                                      struct user_namespace *user_ns,
+                                      struct ksmbd_kstat *ksmbd_kstat)
+{
+       int next_entry_offset = 0;
+       char *conv_name;
+       int conv_len;
+       void *kstat;
+       int struct_sz, rc = 0;
+
+       conv_name = ksmbd_convert_dir_info_name(d_info,
+                                               conn->local_nls,
+                                               &conv_len);
+       if (!conv_name)
+               return -ENOMEM;
+
+       /* Somehow the name has only terminating NULL bytes */
+       if (conv_len < 0) {
+               rc = -EINVAL;
+               goto free_conv_name;
+       }
+
+       struct_sz = readdir_info_level_struct_sz(info_level);
+       next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
+                                 KSMBD_DIR_INFO_ALIGNMENT);
+
+       if (next_entry_offset > d_info->out_buf_len) {
+               d_info->out_buf_len = 0;
+               rc = -ENOSPC;
+               goto free_conv_name;
+       }
+
+       kstat = d_info->wptr;
+       if (info_level != FILE_NAMES_INFORMATION)
+               kstat = ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+
+       switch (info_level) {
+       case FILE_FULL_DIRECTORY_INFORMATION:
+       {
+               struct file_full_directory_info *ffdinfo;
+
+               ffdinfo = (struct file_full_directory_info *)kstat;
+               ffdinfo->FileNameLength = cpu_to_le32(conv_len);
+               ffdinfo->EaSize =
+                       smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+               if (ffdinfo->EaSize)
+                       ffdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+               if (d_info->hide_dot_file && d_info->name[0] == '.')
+                       ffdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+               memcpy(ffdinfo->FileName, conv_name, conv_len);
+               ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILE_BOTH_DIRECTORY_INFORMATION:
+       {
+               struct file_both_directory_info *fbdinfo;
+
+               fbdinfo = (struct file_both_directory_info *)kstat;
+               fbdinfo->FileNameLength = cpu_to_le32(conv_len);
+               fbdinfo->EaSize =
+                       smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+               if (fbdinfo->EaSize)
+                       fbdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+               fbdinfo->ShortNameLength = 0;
+               fbdinfo->Reserved = 0;
+               if (d_info->hide_dot_file && d_info->name[0] == '.')
+                       fbdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+               memcpy(fbdinfo->FileName, conv_name, conv_len);
+               fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILE_DIRECTORY_INFORMATION:
+       {
+               struct file_directory_info *fdinfo;
+
+               fdinfo = (struct file_directory_info *)kstat;
+               fdinfo->FileNameLength = cpu_to_le32(conv_len);
+               if (d_info->hide_dot_file && d_info->name[0] == '.')
+                       fdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+               memcpy(fdinfo->FileName, conv_name, conv_len);
+               fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILE_NAMES_INFORMATION:
+       {
+               struct file_names_info *fninfo;
+
+               fninfo = (struct file_names_info *)kstat;
+               fninfo->FileNameLength = cpu_to_le32(conv_len);
+               memcpy(fninfo->FileName, conv_name, conv_len);
+               fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILEID_FULL_DIRECTORY_INFORMATION:
+       {
+               struct file_id_full_dir_info *dinfo;
+
+               dinfo = (struct file_id_full_dir_info *)kstat;
+               dinfo->FileNameLength = cpu_to_le32(conv_len);
+               dinfo->EaSize =
+                       smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+               if (dinfo->EaSize)
+                       dinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+               dinfo->Reserved = 0;
+               dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+               if (d_info->hide_dot_file && d_info->name[0] == '.')
+                       dinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+               memcpy(dinfo->FileName, conv_name, conv_len);
+               dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILEID_BOTH_DIRECTORY_INFORMATION:
+       {
+               struct file_id_both_directory_info *fibdinfo;
+
+               fibdinfo = (struct file_id_both_directory_info *)kstat;
+               fibdinfo->FileNameLength = cpu_to_le32(conv_len);
+               fibdinfo->EaSize =
+                       smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+               if (fibdinfo->EaSize)
+                       fibdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+               fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+               fibdinfo->ShortNameLength = 0;
+               fibdinfo->Reserved = 0;
+               fibdinfo->Reserved2 = cpu_to_le16(0);
+               if (d_info->hide_dot_file && d_info->name[0] == '.')
+                       fibdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+               memcpy(fibdinfo->FileName, conv_name, conv_len);
+               fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case SMB_FIND_FILE_POSIX_INFO:
+       {
+               struct smb2_posix_info *posix_info;
+               u64 time;
+
+               posix_info = (struct smb2_posix_info *)kstat;
+               posix_info->Ignored = 0;
+               posix_info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time);
+               time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
+               posix_info->ChangeTime = cpu_to_le64(time);
+               time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->atime);
+               posix_info->LastAccessTime = cpu_to_le64(time);
+               time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->mtime);
+               posix_info->LastWriteTime = cpu_to_le64(time);
+               posix_info->EndOfFile = cpu_to_le64(ksmbd_kstat->kstat->size);
+               posix_info->AllocationSize = cpu_to_le64(ksmbd_kstat->kstat->blocks << 9);
+               posix_info->DeviceId = cpu_to_le32(ksmbd_kstat->kstat->rdev);
+               posix_info->HardLinks = cpu_to_le32(ksmbd_kstat->kstat->nlink);
+               posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode);
+               posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
+               posix_info->DosAttributes =
+                       S_ISDIR(ksmbd_kstat->kstat->mode) ? ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+               if (d_info->hide_dot_file && d_info->name[0] == '.')
+                       posix_info->DosAttributes |= ATTR_HIDDEN_LE;
+               id_to_sid(from_kuid(user_ns, ksmbd_kstat->kstat->uid),
+                         SIDNFS_USER, (struct smb_sid *)&posix_info->SidBuffer[0]);
+               id_to_sid(from_kgid(user_ns, ksmbd_kstat->kstat->gid),
+                         SIDNFS_GROUP, (struct smb_sid *)&posix_info->SidBuffer[20]);
+               memcpy(posix_info->name, conv_name, conv_len);
+               posix_info->name_len = cpu_to_le32(conv_len);
+               posix_info->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+
+       } /* switch (info_level) */
+
+       d_info->last_entry_offset = d_info->data_count;
+       d_info->data_count += next_entry_offset;
+       d_info->out_buf_len -= next_entry_offset;
+       d_info->wptr += next_entry_offset;
+
+       ksmbd_debug(SMB,
+                   "info_level : %d, buf_len :%d, next_offset : %d, data_count : %d\n",
+                   info_level, d_info->out_buf_len,
+                   next_entry_offset, d_info->data_count);
+
+free_conv_name:
+       kfree(conv_name);
+       return rc;
+}
+
+struct smb2_query_dir_private {
+       struct ksmbd_work       *work;
+       char                    *search_pattern;
+       struct ksmbd_file       *dir_fp;
+
+       struct ksmbd_dir_info   *d_info;
+       int                     info_level;
+};
+
+static void lock_dir(struct ksmbd_file *dir_fp)
+{
+       struct dentry *dir = dir_fp->filp->f_path.dentry;
+
+       inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
+}
+
+static void unlock_dir(struct ksmbd_file *dir_fp)
+{
+       struct dentry *dir = dir_fp->filp->f_path.dentry;
+
+       inode_unlock(d_inode(dir));
+}
+
+static int process_query_dir_entries(struct smb2_query_dir_private *priv)
+{
+       struct user_namespace   *user_ns = file_mnt_user_ns(priv->dir_fp->filp);
+       struct kstat            kstat;
+       struct ksmbd_kstat      ksmbd_kstat;
+       int                     rc;
+       int                     i;
+
+       for (i = 0; i < priv->d_info->num_entry; i++) {
+               struct dentry *dent;
+
+               if (dentry_name(priv->d_info, priv->info_level))
+                       return -EINVAL;
+
+               lock_dir(priv->dir_fp);
+               dent = lookup_one_len(priv->d_info->name,
+                                     priv->dir_fp->filp->f_path.dentry,
+                                     priv->d_info->name_len);
+               unlock_dir(priv->dir_fp);
+
+               if (IS_ERR(dent)) {
+                       ksmbd_debug(SMB, "Cannot lookup `%s' [%ld]\n",
+                                   priv->d_info->name,
+                                   PTR_ERR(dent));
+                       continue;
+               }
+               if (unlikely(d_is_negative(dent))) {
+                       dput(dent);
+                       ksmbd_debug(SMB, "Negative dentry `%s'\n",
+                                   priv->d_info->name);
+                       continue;
+               }
+
+               ksmbd_kstat.kstat = &kstat;
+               if (priv->info_level != FILE_NAMES_INFORMATION)
+                       ksmbd_vfs_fill_dentry_attrs(priv->work,
+                                                   user_ns,
+                                                   dent,
+                                                   &ksmbd_kstat);
+
+               rc = smb2_populate_readdir_entry(priv->work->conn,
+                                                priv->info_level,
+                                                priv->d_info,
+                                                user_ns,
+                                                &ksmbd_kstat);
+               dput(dent);
+               if (rc)
+                       return rc;
+       }
+       return 0;
+}
+
+static int reserve_populate_dentry(struct ksmbd_dir_info *d_info,
+                                  int info_level)
+{
+       int struct_sz;
+       int conv_len;
+       int next_entry_offset;
+
+       struct_sz = readdir_info_level_struct_sz(info_level);
+       if (struct_sz == -EOPNOTSUPP)
+               return -EOPNOTSUPP;
+
+       conv_len = (d_info->name_len + 1) * 2;
+       next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
+                                 KSMBD_DIR_INFO_ALIGNMENT);
+
+       if (next_entry_offset > d_info->out_buf_len) {
+               d_info->out_buf_len = 0;
+               return -ENOSPC;
+       }
+
+       switch (info_level) {
+       case FILE_FULL_DIRECTORY_INFORMATION:
+       {
+               struct file_full_directory_info *ffdinfo;
+
+               ffdinfo = (struct file_full_directory_info *)d_info->wptr;
+               memcpy(ffdinfo->FileName, d_info->name, d_info->name_len);
+               ffdinfo->FileName[d_info->name_len] = 0x00;
+               ffdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+               ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILE_BOTH_DIRECTORY_INFORMATION:
+       {
+               struct file_both_directory_info *fbdinfo;
+
+               fbdinfo = (struct file_both_directory_info *)d_info->wptr;
+               memcpy(fbdinfo->FileName, d_info->name, d_info->name_len);
+               fbdinfo->FileName[d_info->name_len] = 0x00;
+               fbdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+               fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILE_DIRECTORY_INFORMATION:
+       {
+               struct file_directory_info *fdinfo;
+
+               fdinfo = (struct file_directory_info *)d_info->wptr;
+               memcpy(fdinfo->FileName, d_info->name, d_info->name_len);
+               fdinfo->FileName[d_info->name_len] = 0x00;
+               fdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+               fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILE_NAMES_INFORMATION:
+       {
+               struct file_names_info *fninfo;
+
+               fninfo = (struct file_names_info *)d_info->wptr;
+               memcpy(fninfo->FileName, d_info->name, d_info->name_len);
+               fninfo->FileName[d_info->name_len] = 0x00;
+               fninfo->FileNameLength = cpu_to_le32(d_info->name_len);
+               fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILEID_FULL_DIRECTORY_INFORMATION:
+       {
+               struct file_id_full_dir_info *dinfo;
+
+               dinfo = (struct file_id_full_dir_info *)d_info->wptr;
+               memcpy(dinfo->FileName, d_info->name, d_info->name_len);
+               dinfo->FileName[d_info->name_len] = 0x00;
+               dinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+               dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case FILEID_BOTH_DIRECTORY_INFORMATION:
+       {
+               struct file_id_both_directory_info *fibdinfo;
+
+               fibdinfo = (struct file_id_both_directory_info *)d_info->wptr;
+               memcpy(fibdinfo->FileName, d_info->name, d_info->name_len);
+               fibdinfo->FileName[d_info->name_len] = 0x00;
+               fibdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+               fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+               break;
+       }
+       case SMB_FIND_FILE_POSIX_INFO:
+       {
+               struct smb2_posix_info *posix_info;
+
+               posix_info = (struct smb2_posix_info *)d_info->wptr;
+               memcpy(posix_info->name, d_info->name, d_info->name_len);
+               posix_info->name[d_info->name_len] = 0x00;
+               posix_info->name_len = cpu_to_le32(d_info->name_len);
+               posix_info->NextEntryOffset =
+                       cpu_to_le32(next_entry_offset);
+               break;
+       }
+       } /* switch (info_level) */
+
+       d_info->num_entry++;
+       d_info->out_buf_len -= next_entry_offset;
+       d_info->wptr += next_entry_offset;
+       return 0;
+}
+
+static int __query_dir(struct dir_context *ctx, const char *name, int namlen,
+                      loff_t offset, u64 ino, unsigned int d_type)
+{
+       struct ksmbd_readdir_data       *buf;
+       struct smb2_query_dir_private   *priv;
+       struct ksmbd_dir_info           *d_info;
+       int                             rc;
+
+       buf     = container_of(ctx, struct ksmbd_readdir_data, ctx);
+       priv    = buf->private;
+       d_info  = priv->d_info;
+
+       /* dot and dotdot entries are already reserved */
+       if (!strcmp(".", name) || !strcmp("..", name))
+               return 0;
+       if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name))
+               return 0;
+       if (!match_pattern(name, namlen, priv->search_pattern))
+               return 0;
+
+       d_info->name            = name;
+       d_info->name_len        = namlen;
+       rc = reserve_populate_dentry(d_info, priv->info_level);
+       if (rc)
+               return rc;
+       if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY) {
+               d_info->out_buf_len = 0;
+               return 0;
+       }
+       return 0;
+}
+
+static void restart_ctx(struct dir_context *ctx)
+{
+       ctx->pos = 0;
+}
+
+static int verify_info_level(int info_level)
+{
+       switch (info_level) {
+       case FILE_FULL_DIRECTORY_INFORMATION:
+       case FILE_BOTH_DIRECTORY_INFORMATION:
+       case FILE_DIRECTORY_INFORMATION:
+       case FILE_NAMES_INFORMATION:
+       case FILEID_FULL_DIRECTORY_INFORMATION:
+       case FILEID_BOTH_DIRECTORY_INFORMATION:
+       case SMB_FIND_FILE_POSIX_INFO:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+int smb2_query_dir(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_query_directory_req *req;
+       struct smb2_query_directory_rsp *rsp, *rsp_org;
+       struct ksmbd_share_config *share = work->tcon->share_conf;
+       struct ksmbd_file *dir_fp = NULL;
+       struct ksmbd_dir_info d_info;
+       int rc = 0;
+       char *srch_ptr = NULL;
+       unsigned char srch_flag;
+       int buffer_sz;
+       struct smb2_query_dir_private query_dir_private = {NULL, };
+
+       rsp_org = work->response_buf;
+       WORK_BUFFERS(work, req, rsp);
+
+       if (ksmbd_override_fsids(work)) {
+               rsp->hdr.Status = STATUS_NO_MEMORY;
+               smb2_set_err_rsp(work);
+               return -ENOMEM;
+       }
+
+       rc = verify_info_level(req->FileInformationClass);
+       if (rc) {
+               rc = -EFAULT;
+               goto err_out2;
+       }
+
+       dir_fp = ksmbd_lookup_fd_slow(work,
+                                     le64_to_cpu(req->VolatileFileId),
+                                     le64_to_cpu(req->PersistentFileId));
+       if (!dir_fp) {
+               rc = -EBADF;
+               goto err_out2;
+       }
+
+       if (!(dir_fp->daccess & FILE_LIST_DIRECTORY_LE) ||
+           inode_permission(file_mnt_user_ns(dir_fp->filp),
+                            file_inode(dir_fp->filp),
+                            MAY_READ | MAY_EXEC)) {
+               pr_err("no right to enumerate directory (%pd)\n",
+                      dir_fp->filp->f_path.dentry);
+               rc = -EACCES;
+               goto err_out2;
+       }
+
+       if (!S_ISDIR(file_inode(dir_fp->filp)->i_mode)) {
+               pr_err("can't do query dir for a file\n");
+               rc = -EINVAL;
+               goto err_out2;
+       }
+
+       srch_flag = req->Flags;
+       srch_ptr = smb_strndup_from_utf16(req->Buffer,
+                                         le16_to_cpu(req->FileNameLength), 1,
+                                         conn->local_nls);
+       if (IS_ERR(srch_ptr)) {
+               ksmbd_debug(SMB, "Search Pattern not found\n");
+               rc = -EINVAL;
+               goto err_out2;
+       } else {
+               ksmbd_debug(SMB, "Search pattern is %s\n", srch_ptr);
+       }
+
+       ksmbd_debug(SMB, "Directory name is %s\n", dir_fp->filename);
+
+       if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
+               ksmbd_debug(SMB, "Restart directory scan\n");
+               generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
+               restart_ctx(&dir_fp->readdir_data.ctx);
+       }
+
+       memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
+       d_info.wptr = (char *)rsp->Buffer;
+       d_info.rptr = (char *)rsp->Buffer;
+       d_info.out_buf_len = (work->response_sz - (get_rfc1002_len(rsp_org) + 4));
+       d_info.out_buf_len = min_t(int, d_info.out_buf_len, le32_to_cpu(req->OutputBufferLength)) -
+               sizeof(struct smb2_query_directory_rsp);
+       d_info.flags = srch_flag;
+
+       /*
+        * reserve dot and dotdot entries in head of buffer
+        * in first response
+        */
+       rc = ksmbd_populate_dot_dotdot_entries(work, req->FileInformationClass,
+                                              dir_fp, &d_info, srch_ptr,
+                                              smb2_populate_readdir_entry);
+       if (rc == -ENOSPC)
+               rc = 0;
+       else if (rc)
+               goto err_out;
+
+       if (test_share_config_flag(share, KSMBD_SHARE_FLAG_HIDE_DOT_FILES))
+               d_info.hide_dot_file = true;
+
+       buffer_sz                               = d_info.out_buf_len;
+       d_info.rptr                             = d_info.wptr;
+       query_dir_private.work                  = work;
+       query_dir_private.search_pattern        = srch_ptr;
+       query_dir_private.dir_fp                = dir_fp;
+       query_dir_private.d_info                = &d_info;
+       query_dir_private.info_level            = req->FileInformationClass;
+       dir_fp->readdir_data.private            = &query_dir_private;
+       set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
+
+       rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
+       if (rc == 0)
+               restart_ctx(&dir_fp->readdir_data.ctx);
+       if (rc == -ENOSPC)
+               rc = 0;
+       if (rc)
+               goto err_out;
+
+       d_info.wptr = d_info.rptr;
+       d_info.out_buf_len = buffer_sz;
+       rc = process_query_dir_entries(&query_dir_private);
+       if (rc)
+               goto err_out;
+
+       if (!d_info.data_count && d_info.out_buf_len >= 0) {
+               if (srch_flag & SMB2_RETURN_SINGLE_ENTRY && !is_asterisk(srch_ptr)) {
+                       rsp->hdr.Status = STATUS_NO_SUCH_FILE;
+               } else {
+                       dir_fp->dot_dotdot[0] = dir_fp->dot_dotdot[1] = 0;
+                       rsp->hdr.Status = STATUS_NO_MORE_FILES;
+               }
+               rsp->StructureSize = cpu_to_le16(9);
+               rsp->OutputBufferOffset = cpu_to_le16(0);
+               rsp->OutputBufferLength = cpu_to_le32(0);
+               rsp->Buffer[0] = 0;
+               inc_rfc1001_len(rsp_org, 9);
+       } else {
+               ((struct file_directory_info *)
+               ((char *)rsp->Buffer + d_info.last_entry_offset))
+               ->NextEntryOffset = 0;
+
+               rsp->StructureSize = cpu_to_le16(9);
+               rsp->OutputBufferOffset = cpu_to_le16(72);
+               rsp->OutputBufferLength = cpu_to_le32(d_info.data_count);
+               inc_rfc1001_len(rsp_org, 8 + d_info.data_count);
+       }
+
+       kfree(srch_ptr);
+       ksmbd_fd_put(work, dir_fp);
+       ksmbd_revert_fsids(work);
+       return 0;
+
+err_out:
+       pr_err("error while processing smb2 query dir rc = %d\n", rc);
+       kfree(srch_ptr);
+
+err_out2:
+       if (rc == -EINVAL)
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+       else if (rc == -EACCES)
+               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+       else if (rc == -ENOENT)
+               rsp->hdr.Status = STATUS_NO_SUCH_FILE;
+       else if (rc == -EBADF)
+               rsp->hdr.Status = STATUS_FILE_CLOSED;
+       else if (rc == -ENOMEM)
+               rsp->hdr.Status = STATUS_NO_MEMORY;
+       else if (rc == -EFAULT)
+               rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
+       if (!rsp->hdr.Status)
+               rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+
+       smb2_set_err_rsp(work);
+       ksmbd_fd_put(work, dir_fp);
+       ksmbd_revert_fsids(work);
+       return 0;
+}
+
+/**
+ * buffer_check_err() - helper function to check buffer errors
+ * @reqOutputBufferLength:     max buffer length expected in command response
+ * @rsp:               query info response buffer contains output buffer length
+ * @infoclass_size:    query info class response buffer size
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int buffer_check_err(int reqOutputBufferLength,
+                           struct smb2_query_info_rsp *rsp, int infoclass_size)
+{
+       if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) {
+               if (reqOutputBufferLength < infoclass_size) {
+                       pr_err("Invalid Buffer Size Requested\n");
+                       rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
+                       rsp->hdr.smb2_buf_length = cpu_to_be32(sizeof(struct smb2_hdr) - 4);
+                       return -EINVAL;
+               }
+
+               ksmbd_debug(SMB, "Buffer Overflow\n");
+               rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
+               rsp->hdr.smb2_buf_length = cpu_to_be32(sizeof(struct smb2_hdr) - 4 +
+                               reqOutputBufferLength);
+               rsp->OutputBufferLength = cpu_to_le32(reqOutputBufferLength);
+       }
+       return 0;
+}
+
+static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp)
+{
+       struct smb2_file_standard_info *sinfo;
+
+       sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
+
+       sinfo->AllocationSize = cpu_to_le64(4096);
+       sinfo->EndOfFile = cpu_to_le64(0);
+       sinfo->NumberOfLinks = cpu_to_le32(1);
+       sinfo->DeletePending = 1;
+       sinfo->Directory = 0;
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_standard_info));
+       inc_rfc1001_len(rsp, sizeof(struct smb2_file_standard_info));
+}
+
+static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num)
+{
+       struct smb2_file_internal_info *file_info;
+
+       file_info = (struct smb2_file_internal_info *)rsp->Buffer;
+
+       /* any unique number */
+       file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63));
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_internal_info));
+       inc_rfc1001_len(rsp, sizeof(struct smb2_file_internal_info));
+}
+
+static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
+                                  struct smb2_query_info_req *req,
+                                  struct smb2_query_info_rsp *rsp)
+{
+       u64 id;
+       int rc;
+
+       /*
+        * Windows can sometime send query file info request on
+        * pipe without opening it, checking error condition here
+        */
+       id = le64_to_cpu(req->VolatileFileId);
+       if (!ksmbd_session_rpc_method(sess, id))
+               return -ENOENT;
+
+       ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
+                   req->FileInfoClass, le64_to_cpu(req->VolatileFileId));
+
+       switch (req->FileInfoClass) {
+       case FILE_STANDARD_INFORMATION:
+               get_standard_info_pipe(rsp);
+               rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+                                     rsp, FILE_STANDARD_INFORMATION_SIZE);
+               break;
+       case FILE_INTERNAL_INFORMATION:
+               get_internal_info_pipe(rsp, id);
+               rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+                                     rsp, FILE_INTERNAL_INFORMATION_SIZE);
+               break;
+       default:
+               ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n",
+                           req->FileInfoClass);
+               rc = -EOPNOTSUPP;
+       }
+       return rc;
+}
+
+/**
+ * smb2_get_ea() - handler for smb2 get extended attribute command
+ * @work:      smb work containing query info command buffer
+ * @fp:                ksmbd_file pointer
+ * @req:       get extended attribute request
+ * @rsp:       response buffer pointer
+ * @rsp_org:   base response buffer pointer in case of chained response
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
+                      struct smb2_query_info_req *req,
+                      struct smb2_query_info_rsp *rsp, void *rsp_org)
+{
+       struct smb2_ea_info *eainfo, *prev_eainfo;
+       char *name, *ptr, *xattr_list = NULL, *buf;
+       int rc, name_len, value_len, xattr_list_len, idx;
+       ssize_t buf_free_len, alignment_bytes, next_offset, rsp_data_cnt = 0;
+       struct smb2_ea_info_req *ea_req = NULL;
+       struct path *path;
+       struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+
+       if (!(fp->daccess & FILE_READ_EA_LE)) {
+               pr_err("Not permitted to read ext attr : 0x%x\n",
+                      fp->daccess);
+               return -EACCES;
+       }
+
+       path = &fp->filp->f_path;
+       /* single EA entry is requested with given user.* name */
+       if (req->InputBufferLength) {
+               ea_req = (struct smb2_ea_info_req *)req->Buffer;
+       } else {
+               /* need to send all EAs, if no specific EA is requested*/
+               if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY)
+                       ksmbd_debug(SMB,
+                                   "All EAs are requested but need to send single EA entry in rsp flags 0x%x\n",
+                                   le32_to_cpu(req->Flags));
+       }
+
+       buf_free_len = work->response_sz -
+                       (get_rfc1002_len(rsp_org) + 4) -
+                       sizeof(struct smb2_query_info_rsp);
+
+       if (le32_to_cpu(req->OutputBufferLength) < buf_free_len)
+               buf_free_len = le32_to_cpu(req->OutputBufferLength);
+
+       rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+       if (rc < 0) {
+               rsp->hdr.Status = STATUS_INVALID_HANDLE;
+               goto out;
+       } else if (!rc) { /* there is no EA in the file */
+               ksmbd_debug(SMB, "no ea data in the file\n");
+               goto done;
+       }
+       xattr_list_len = rc;
+
+       ptr = (char *)rsp->Buffer;
+       eainfo = (struct smb2_ea_info *)ptr;
+       prev_eainfo = eainfo;
+       idx = 0;
+
+       while (idx < xattr_list_len) {
+               name = xattr_list + idx;
+               name_len = strlen(name);
+
+               ksmbd_debug(SMB, "%s, len %d\n", name, name_len);
+               idx += name_len + 1;
+
+               /*
+                * CIFS does not support EA other than user.* namespace,
+                * still keep the framework generic, to list other attrs
+                * in future.
+                */
+               if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+                       continue;
+
+               if (!strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+                            STREAM_PREFIX_LEN))
+                       continue;
+
+               if (req->InputBufferLength &&
+                   strncmp(&name[XATTR_USER_PREFIX_LEN], ea_req->name,
+                           ea_req->EaNameLength))
+                       continue;
+
+               if (!strncmp(&name[XATTR_USER_PREFIX_LEN],
+                            DOS_ATTRIBUTE_PREFIX, DOS_ATTRIBUTE_PREFIX_LEN))
+                       continue;
+
+               if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+                       name_len -= XATTR_USER_PREFIX_LEN;
+
+               ptr = (char *)(&eainfo->name + name_len + 1);
+               buf_free_len -= (offsetof(struct smb2_ea_info, name) +
+                               name_len + 1);
+               /* bailout if xattr can't fit in buf_free_len */
+               value_len = ksmbd_vfs_getxattr(user_ns, path->dentry,
+                                              name, &buf);
+               if (value_len <= 0) {
+                       rc = -ENOENT;
+                       rsp->hdr.Status = STATUS_INVALID_HANDLE;
+                       goto out;
+               }
+
+               buf_free_len -= value_len;
+               if (buf_free_len < 0) {
+                       kfree(buf);
+                       break;
+               }
+
+               memcpy(ptr, buf, value_len);
+               kfree(buf);
+
+               ptr += value_len;
+               eainfo->Flags = 0;
+               eainfo->EaNameLength = name_len;
+
+               if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+                       memcpy(eainfo->name, &name[XATTR_USER_PREFIX_LEN],
+                              name_len);
+               else
+                       memcpy(eainfo->name, name, name_len);
+
+               eainfo->name[name_len] = '\0';
+               eainfo->EaValueLength = cpu_to_le16(value_len);
+               next_offset = offsetof(struct smb2_ea_info, name) +
+                       name_len + 1 + value_len;
+
+               /* align next xattr entry at 4 byte bundary */
+               alignment_bytes = ((next_offset + 3) & ~3) - next_offset;
+               if (alignment_bytes) {
+                       memset(ptr, '\0', alignment_bytes);
+                       ptr += alignment_bytes;
+                       next_offset += alignment_bytes;
+                       buf_free_len -= alignment_bytes;
+               }
+               eainfo->NextEntryOffset = cpu_to_le32(next_offset);
+               prev_eainfo = eainfo;
+               eainfo = (struct smb2_ea_info *)ptr;
+               rsp_data_cnt += next_offset;
+
+               if (req->InputBufferLength) {
+                       ksmbd_debug(SMB, "single entry requested\n");
+                       break;
+               }
+       }
+
+       /* no more ea entries */
+       prev_eainfo->NextEntryOffset = 0;
+done:
+       rc = 0;
+       if (rsp_data_cnt == 0)
+               rsp->hdr.Status = STATUS_NO_EAS_ON_FILE;
+       rsp->OutputBufferLength = cpu_to_le32(rsp_data_cnt);
+       inc_rfc1001_len(rsp_org, rsp_data_cnt);
+out:
+       kvfree(xattr_list);
+       return rc;
+}
+
+static void get_file_access_info(struct smb2_query_info_rsp *rsp,
+                                struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_access_info *file_info;
+
+       file_info = (struct smb2_file_access_info *)rsp->Buffer;
+       file_info->AccessFlags = fp->daccess;
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_access_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_access_info));
+}
+
+static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+                              struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_all_info *basic_info;
+       struct kstat stat;
+       u64 time;
+
+       if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+               pr_err("no right to read the attributes : 0x%x\n",
+                      fp->daccess);
+               return -EACCES;
+       }
+
+       basic_info = (struct smb2_file_all_info *)rsp->Buffer;
+       generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+                        &stat);
+       basic_info->CreationTime = cpu_to_le64(fp->create_time);
+       time = ksmbd_UnixTimeToNT(stat.atime);
+       basic_info->LastAccessTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.mtime);
+       basic_info->LastWriteTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.ctime);
+       basic_info->ChangeTime = cpu_to_le64(time);
+       basic_info->Attributes = fp->f_ci->m_fattr;
+       basic_info->Pad1 = 0;
+       rsp->OutputBufferLength =
+               cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize));
+       inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info,
+                                         AllocationSize));
+       return 0;
+}
+
+static unsigned long long get_allocation_size(struct inode *inode,
+                                             struct kstat *stat)
+{
+       unsigned long long alloc_size = 0;
+
+       if (!S_ISDIR(stat->mode)) {
+               if ((inode->i_blocks << 9) <= stat->size)
+                       alloc_size = stat->size;
+               else
+                       alloc_size = inode->i_blocks << 9;
+       }
+
+       return alloc_size;
+}
+
+static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
+                                  struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_standard_info *sinfo;
+       unsigned int delete_pending;
+       struct inode *inode;
+       struct kstat stat;
+
+       inode = file_inode(fp->filp);
+       generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
+
+       sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
+       delete_pending = ksmbd_inode_pending_delete(fp);
+
+       sinfo->AllocationSize = cpu_to_le64(get_allocation_size(inode, &stat));
+       sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+       sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
+       sinfo->DeletePending = delete_pending;
+       sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_standard_info));
+       inc_rfc1001_len(rsp_org,
+                       sizeof(struct smb2_file_standard_info));
+}
+
+static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
+                                   void *rsp_org)
+{
+       struct smb2_file_alignment_info *file_info;
+
+       file_info = (struct smb2_file_alignment_info *)rsp->Buffer;
+       file_info->AlignmentRequirement = 0;
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_alignment_info));
+       inc_rfc1001_len(rsp_org,
+                       sizeof(struct smb2_file_alignment_info));
+}
+
+static int get_file_all_info(struct ksmbd_work *work,
+                            struct smb2_query_info_rsp *rsp,
+                            struct ksmbd_file *fp,
+                            void *rsp_org)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_file_all_info *file_info;
+       unsigned int delete_pending;
+       struct inode *inode;
+       struct kstat stat;
+       int conv_len;
+       char *filename;
+       u64 time;
+
+       if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+               ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n",
+                           fp->daccess);
+               return -EACCES;
+       }
+
+       filename = convert_to_nt_pathname(fp->filename,
+                                         work->tcon->share_conf->path);
+       if (!filename)
+               return -ENOMEM;
+
+       inode = file_inode(fp->filp);
+       generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
+
+       ksmbd_debug(SMB, "filename = %s\n", filename);
+       delete_pending = ksmbd_inode_pending_delete(fp);
+       file_info = (struct smb2_file_all_info *)rsp->Buffer;
+
+       file_info->CreationTime = cpu_to_le64(fp->create_time);
+       time = ksmbd_UnixTimeToNT(stat.atime);
+       file_info->LastAccessTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.mtime);
+       file_info->LastWriteTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.ctime);
+       file_info->ChangeTime = cpu_to_le64(time);
+       file_info->Attributes = fp->f_ci->m_fattr;
+       file_info->Pad1 = 0;
+       file_info->AllocationSize =
+               cpu_to_le64(get_allocation_size(inode, &stat));
+       file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+       file_info->NumberOfLinks =
+                       cpu_to_le32(get_nlink(&stat) - delete_pending);
+       file_info->DeletePending = delete_pending;
+       file_info->Directory = S_ISDIR(stat.mode) ? 1 : 0;
+       file_info->Pad2 = 0;
+       file_info->IndexNumber = cpu_to_le64(stat.ino);
+       file_info->EASize = 0;
+       file_info->AccessFlags = fp->daccess;
+       file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+       file_info->Mode = fp->coption;
+       file_info->AlignmentRequirement = 0;
+       conv_len = smbConvertToUTF16((__le16 *)file_info->FileName, filename,
+                                    PATH_MAX, conn->local_nls, 0);
+       conv_len *= 2;
+       file_info->FileNameLength = cpu_to_le32(conv_len);
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_all_info) + conv_len - 1);
+       kfree(filename);
+       inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
+       return 0;
+}
+
+static void get_file_alternate_info(struct ksmbd_work *work,
+                                   struct smb2_query_info_rsp *rsp,
+                                   struct ksmbd_file *fp,
+                                   void *rsp_org)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_file_alt_name_info *file_info;
+       struct dentry *dentry = fp->filp->f_path.dentry;
+       int conv_len;
+
+       spin_lock(&dentry->d_lock);
+       file_info = (struct smb2_file_alt_name_info *)rsp->Buffer;
+       conv_len = ksmbd_extract_shortname(conn,
+                                          dentry->d_name.name,
+                                          file_info->FileName);
+       spin_unlock(&dentry->d_lock);
+       file_info->FileNameLength = cpu_to_le32(conv_len);
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len);
+       inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
+}
+
+static void get_file_stream_info(struct ksmbd_work *work,
+                                struct smb2_query_info_rsp *rsp,
+                                struct ksmbd_file *fp,
+                                void *rsp_org)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_file_stream_info *file_info;
+       char *stream_name, *xattr_list = NULL, *stream_buf;
+       struct kstat stat;
+       struct path *path = &fp->filp->f_path;
+       ssize_t xattr_list_len;
+       int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
+
+       generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+                        &stat);
+       file_info = (struct smb2_file_stream_info *)rsp->Buffer;
+
+       xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+       if (xattr_list_len < 0) {
+               goto out;
+       } else if (!xattr_list_len) {
+               ksmbd_debug(SMB, "empty xattr in the file\n");
+               goto out;
+       }
+
+       while (idx < xattr_list_len) {
+               stream_name = xattr_list + idx;
+               streamlen = strlen(stream_name);
+               idx += streamlen + 1;
+
+               ksmbd_debug(SMB, "%s, len %d\n", stream_name, streamlen);
+
+               if (strncmp(&stream_name[XATTR_USER_PREFIX_LEN],
+                           STREAM_PREFIX, STREAM_PREFIX_LEN))
+                       continue;
+
+               stream_name_len = streamlen - (XATTR_USER_PREFIX_LEN +
+                               STREAM_PREFIX_LEN);
+               streamlen = stream_name_len;
+
+               /* plus : size */
+               streamlen += 1;
+               stream_buf = kmalloc(streamlen + 1, GFP_KERNEL);
+               if (!stream_buf)
+                       break;
+
+               streamlen = snprintf(stream_buf, streamlen + 1,
+                                    ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
+
+               file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
+               streamlen  = smbConvertToUTF16((__le16 *)file_info->StreamName,
+                                              stream_buf, streamlen,
+                                              conn->local_nls, 0);
+               streamlen *= 2;
+               kfree(stream_buf);
+               file_info->StreamNameLength = cpu_to_le32(streamlen);
+               file_info->StreamSize = cpu_to_le64(stream_name_len);
+               file_info->StreamAllocationSize = cpu_to_le64(stream_name_len);
+
+               next = sizeof(struct smb2_file_stream_info) + streamlen;
+               nbytes += next;
+               file_info->NextEntryOffset = cpu_to_le32(next);
+       }
+
+       if (nbytes) {
+               file_info = (struct smb2_file_stream_info *)
+                       &rsp->Buffer[nbytes];
+               streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
+                                             "::$DATA", 7, conn->local_nls, 0);
+               streamlen *= 2;
+               file_info->StreamNameLength = cpu_to_le32(streamlen);
+               file_info->StreamSize = S_ISDIR(stat.mode) ? 0 :
+                       cpu_to_le64(stat.size);
+               file_info->StreamAllocationSize = S_ISDIR(stat.mode) ? 0 :
+                       cpu_to_le64(stat.size);
+               nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
+       }
+
+       /* last entry offset should be 0 */
+       file_info->NextEntryOffset = 0;
+out:
+       kvfree(xattr_list);
+
+       rsp->OutputBufferLength = cpu_to_le32(nbytes);
+       inc_rfc1001_len(rsp_org, nbytes);
+}
+
+static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
+                                  struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_internal_info *file_info;
+       struct kstat stat;
+
+       generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+                        &stat);
+       file_info = (struct smb2_file_internal_info *)rsp->Buffer;
+       file_info->IndexNumber = cpu_to_le64(stat.ino);
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_internal_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
+}
+
+static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+                                     struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_ntwrk_info *file_info;
+       struct inode *inode;
+       struct kstat stat;
+       u64 time;
+
+       if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+               pr_err("no right to read the attributes : 0x%x\n",
+                      fp->daccess);
+               return -EACCES;
+       }
+
+       file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
+
+       inode = file_inode(fp->filp);
+       generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
+
+       file_info->CreationTime = cpu_to_le64(fp->create_time);
+       time = ksmbd_UnixTimeToNT(stat.atime);
+       file_info->LastAccessTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.mtime);
+       file_info->LastWriteTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(stat.ctime);
+       file_info->ChangeTime = cpu_to_le64(time);
+       file_info->Attributes = fp->f_ci->m_fattr;
+       file_info->AllocationSize =
+               cpu_to_le64(get_allocation_size(inode, &stat));
+       file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+       file_info->Reserved = cpu_to_le32(0);
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_ntwrk_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ntwrk_info));
+       return 0;
+}
+
+static void get_file_ea_info(struct smb2_query_info_rsp *rsp, void *rsp_org)
+{
+       struct smb2_file_ea_info *file_info;
+
+       file_info = (struct smb2_file_ea_info *)rsp->Buffer;
+       file_info->EASize = 0;
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_ea_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ea_info));
+}
+
+static void get_file_position_info(struct smb2_query_info_rsp *rsp,
+                                  struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_pos_info *file_info;
+
+       file_info = (struct smb2_file_pos_info *)rsp->Buffer;
+       file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_pos_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_pos_info));
+}
+
+static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
+                              struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_mode_info *file_info;
+
+       file_info = (struct smb2_file_mode_info *)rsp->Buffer;
+       file_info->Mode = fp->coption & FILE_MODE_INFO_MASK;
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_mode_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_mode_info));
+}
+
+static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
+                                     struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_comp_info *file_info;
+       struct kstat stat;
+
+       generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+                        &stat);
+
+       file_info = (struct smb2_file_comp_info *)rsp->Buffer;
+       file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9);
+       file_info->CompressionFormat = COMPRESSION_FORMAT_NONE;
+       file_info->CompressionUnitShift = 0;
+       file_info->ChunkShift = 0;
+       file_info->ClusterShift = 0;
+       memset(&file_info->Reserved[0], 0, 3);
+
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_comp_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_comp_info));
+}
+
+static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
+                                      struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb2_file_attr_tag_info *file_info;
+
+       if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+               pr_err("no right to read the attributes : 0x%x\n",
+                      fp->daccess);
+               return -EACCES;
+       }
+
+       file_info = (struct smb2_file_attr_tag_info *)rsp->Buffer;
+       file_info->FileAttributes = fp->f_ci->m_fattr;
+       file_info->ReparseTag = 0;
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb2_file_attr_tag_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_attr_tag_info));
+       return 0;
+}
+
+static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
+                               struct ksmbd_file *fp, void *rsp_org)
+{
+       struct smb311_posix_qinfo *file_info;
+       struct inode *inode = file_inode(fp->filp);
+       u64 time;
+
+       file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
+       file_info->CreationTime = cpu_to_le64(fp->create_time);
+       time = ksmbd_UnixTimeToNT(inode->i_atime);
+       file_info->LastAccessTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(inode->i_mtime);
+       file_info->LastWriteTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(inode->i_ctime);
+       file_info->ChangeTime = cpu_to_le64(time);
+       file_info->DosAttributes = fp->f_ci->m_fattr;
+       file_info->Inode = cpu_to_le64(inode->i_ino);
+       file_info->EndOfFile = cpu_to_le64(inode->i_size);
+       file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
+       file_info->HardLinks = cpu_to_le32(inode->i_nlink);
+       file_info->Mode = cpu_to_le32(inode->i_mode);
+       file_info->DeviceId = cpu_to_le32(inode->i_rdev);
+       rsp->OutputBufferLength =
+               cpu_to_le32(sizeof(struct smb311_posix_qinfo));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb311_posix_qinfo));
+       return 0;
+}
+
+static int smb2_get_info_file(struct ksmbd_work *work,
+                             struct smb2_query_info_req *req,
+                             struct smb2_query_info_rsp *rsp, void *rsp_org)
+{
+       struct ksmbd_file *fp;
+       int fileinfoclass = 0;
+       int rc = 0;
+       int file_infoclass_size;
+       unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_PIPE)) {
+               /* smb2 info file called for pipe */
+               return smb2_get_info_file_pipe(work->sess, req, rsp);
+       }
+
+       if (work->next_smb2_rcv_hdr_off) {
+               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+                       ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+                                   work->compound_fid);
+                       id = work->compound_fid;
+                       pid = work->compound_pfid;
+               }
+       }
+
+       if (!has_file_id(id)) {
+               id = le64_to_cpu(req->VolatileFileId);
+               pid = le64_to_cpu(req->PersistentFileId);
+       }
+
+       fp = ksmbd_lookup_fd_slow(work, id, pid);
+       if (!fp)
+               return -ENOENT;
+
+       fileinfoclass = req->FileInfoClass;
+
+       switch (fileinfoclass) {
+       case FILE_ACCESS_INFORMATION:
+               get_file_access_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_ACCESS_INFORMATION_SIZE;
+               break;
+
+       case FILE_BASIC_INFORMATION:
+               rc = get_file_basic_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_BASIC_INFORMATION_SIZE;
+               break;
+
+       case FILE_STANDARD_INFORMATION:
+               get_file_standard_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_STANDARD_INFORMATION_SIZE;
+               break;
+
+       case FILE_ALIGNMENT_INFORMATION:
+               get_file_alignment_info(rsp, rsp_org);
+               file_infoclass_size = FILE_ALIGNMENT_INFORMATION_SIZE;
+               break;
+
+       case FILE_ALL_INFORMATION:
+               rc = get_file_all_info(work, rsp, fp, rsp_org);
+               file_infoclass_size = FILE_ALL_INFORMATION_SIZE;
+               break;
+
+       case FILE_ALTERNATE_NAME_INFORMATION:
+               get_file_alternate_info(work, rsp, fp, rsp_org);
+               file_infoclass_size = FILE_ALTERNATE_NAME_INFORMATION_SIZE;
+               break;
+
+       case FILE_STREAM_INFORMATION:
+               get_file_stream_info(work, rsp, fp, rsp_org);
+               file_infoclass_size = FILE_STREAM_INFORMATION_SIZE;
+               break;
+
+       case FILE_INTERNAL_INFORMATION:
+               get_file_internal_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_INTERNAL_INFORMATION_SIZE;
+               break;
+
+       case FILE_NETWORK_OPEN_INFORMATION:
+               rc = get_file_network_open_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_NETWORK_OPEN_INFORMATION_SIZE;
+               break;
+
+       case FILE_EA_INFORMATION:
+               get_file_ea_info(rsp, rsp_org);
+               file_infoclass_size = FILE_EA_INFORMATION_SIZE;
+               break;
+
+       case FILE_FULL_EA_INFORMATION:
+               rc = smb2_get_ea(work, fp, req, rsp, rsp_org);
+               file_infoclass_size = FILE_FULL_EA_INFORMATION_SIZE;
+               break;
+
+       case FILE_POSITION_INFORMATION:
+               get_file_position_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_POSITION_INFORMATION_SIZE;
+               break;
+
+       case FILE_MODE_INFORMATION:
+               get_file_mode_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_MODE_INFORMATION_SIZE;
+               break;
+
+       case FILE_COMPRESSION_INFORMATION:
+               get_file_compression_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_COMPRESSION_INFORMATION_SIZE;
+               break;
+
+       case FILE_ATTRIBUTE_TAG_INFORMATION:
+               rc = get_file_attribute_tag_info(rsp, fp, rsp_org);
+               file_infoclass_size = FILE_ATTRIBUTE_TAG_INFORMATION_SIZE;
+               break;
+       case SMB_FIND_FILE_POSIX_INFO:
+               if (!work->tcon->posix_extensions) {
+                       pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+                       rc = -EOPNOTSUPP;
+               } else {
+                       rc = find_file_posix_info(rsp, fp, rsp_org);
+                       file_infoclass_size = sizeof(struct smb311_posix_qinfo);
+               }
+               break;
+       default:
+               ksmbd_debug(SMB, "fileinfoclass %d not supported yet\n",
+                           fileinfoclass);
+               rc = -EOPNOTSUPP;
+       }
+       if (!rc)
+               rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+                                     rsp,
+                                     file_infoclass_size);
+       ksmbd_fd_put(work, fp);
+       return rc;
+}
+
+static int smb2_get_info_filesystem(struct ksmbd_work *work,
+                                   struct smb2_query_info_req *req,
+                                   struct smb2_query_info_rsp *rsp, void *rsp_org)
+{
+       struct ksmbd_session *sess = work->sess;
+       struct ksmbd_conn *conn = sess->conn;
+       struct ksmbd_share_config *share = work->tcon->share_conf;
+       int fsinfoclass = 0;
+       struct kstatfs stfs;
+       struct path path;
+       int rc = 0, len;
+       int fs_infoclass_size = 0;
+       int lookup_flags = 0;
+
+       if (test_share_config_flag(share, KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
+               lookup_flags = LOOKUP_FOLLOW;
+
+       rc = ksmbd_vfs_kern_path(share->path, lookup_flags, &path, 0);
+       if (rc) {
+               pr_err("cannot create vfs path\n");
+               return -EIO;
+       }
+
+       rc = vfs_statfs(&path, &stfs);
+       if (rc) {
+               pr_err("cannot do stat of path %s\n", share->path);
+               path_put(&path);
+               return -EIO;
+       }
+
+       fsinfoclass = req->FileInfoClass;
+
+       switch (fsinfoclass) {
+       case FS_DEVICE_INFORMATION:
+       {
+               struct filesystem_device_info *info;
+
+               info = (struct filesystem_device_info *)rsp->Buffer;
+
+               info->DeviceType = cpu_to_le32(stfs.f_type);
+               info->DeviceCharacteristics = cpu_to_le32(0x00000020);
+               rsp->OutputBufferLength = cpu_to_le32(8);
+               inc_rfc1001_len(rsp_org, 8);
+               fs_infoclass_size = FS_DEVICE_INFORMATION_SIZE;
+               break;
+       }
+       case FS_ATTRIBUTE_INFORMATION:
+       {
+               struct filesystem_attribute_info *info;
+               size_t sz;
+
+               info = (struct filesystem_attribute_info *)rsp->Buffer;
+               info->Attributes = cpu_to_le32(FILE_SUPPORTS_OBJECT_IDS |
+                                              FILE_PERSISTENT_ACLS |
+                                              FILE_UNICODE_ON_DISK |
+                                              FILE_CASE_PRESERVED_NAMES |
+                                              FILE_CASE_SENSITIVE_SEARCH |
+                                              FILE_SUPPORTS_BLOCK_REFCOUNTING);
+
+               info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);
+
+               info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
+               len = smbConvertToUTF16((__le16 *)info->FileSystemName,
+                                       "NTFS", PATH_MAX, conn->local_nls, 0);
+               len = len * 2;
+               info->FileSystemNameLen = cpu_to_le32(len);
+               sz = sizeof(struct filesystem_attribute_info) - 2 + len;
+               rsp->OutputBufferLength = cpu_to_le32(sz);
+               inc_rfc1001_len(rsp_org, sz);
+               fs_infoclass_size = FS_ATTRIBUTE_INFORMATION_SIZE;
+               break;
+       }
+       case FS_VOLUME_INFORMATION:
+       {
+               struct filesystem_vol_info *info;
+               size_t sz;
+
+               info = (struct filesystem_vol_info *)(rsp->Buffer);
+               info->VolumeCreationTime = 0;
+               /* Taking dummy value of serial number*/
+               info->SerialNumber = cpu_to_le32(0xbc3ac512);
+               len = smbConvertToUTF16((__le16 *)info->VolumeLabel,
+                                       share->name, PATH_MAX,
+                                       conn->local_nls, 0);
+               len = len * 2;
+               info->VolumeLabelSize = cpu_to_le32(len);
+               info->Reserved = 0;
+               sz = sizeof(struct filesystem_vol_info) - 2 + len;
+               rsp->OutputBufferLength = cpu_to_le32(sz);
+               inc_rfc1001_len(rsp_org, sz);
+               fs_infoclass_size = FS_VOLUME_INFORMATION_SIZE;
+               break;
+       }
+       case FS_SIZE_INFORMATION:
+       {
+               struct filesystem_info *info;
+
+               info = (struct filesystem_info *)(rsp->Buffer);
+               info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+               info->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree);
+               info->SectorsPerAllocationUnit = cpu_to_le32(1);
+               info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+               rsp->OutputBufferLength = cpu_to_le32(24);
+               inc_rfc1001_len(rsp_org, 24);
+               fs_infoclass_size = FS_SIZE_INFORMATION_SIZE;
+               break;
+       }
+       case FS_FULL_SIZE_INFORMATION:
+       {
+               struct smb2_fs_full_size_info *info;
+
+               info = (struct smb2_fs_full_size_info *)(rsp->Buffer);
+               info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+               info->CallerAvailableAllocationUnits =
+                                       cpu_to_le64(stfs.f_bavail);
+               info->ActualAvailableAllocationUnits =
+                                       cpu_to_le64(stfs.f_bfree);
+               info->SectorsPerAllocationUnit = cpu_to_le32(1);
+               info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+               rsp->OutputBufferLength = cpu_to_le32(32);
+               inc_rfc1001_len(rsp_org, 32);
+               fs_infoclass_size = FS_FULL_SIZE_INFORMATION_SIZE;
+               break;
+       }
+       case FS_OBJECT_ID_INFORMATION:
+       {
+               struct object_id_info *info;
+
+               info = (struct object_id_info *)(rsp->Buffer);
+
+               if (!user_guest(sess->user))
+                       memcpy(info->objid, user_passkey(sess->user), 16);
+               else
+                       memset(info->objid, 0, 16);
+
+               info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC);
+               info->extended_info.version = cpu_to_le32(1);
+               info->extended_info.release = cpu_to_le32(1);
+               info->extended_info.rel_date = 0;
+               memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0"));
+               rsp->OutputBufferLength = cpu_to_le32(64);
+               inc_rfc1001_len(rsp_org, 64);
+               fs_infoclass_size = FS_OBJECT_ID_INFORMATION_SIZE;
+               break;
+       }
+       case FS_SECTOR_SIZE_INFORMATION:
+       {
+               struct smb3_fs_ss_info *info;
+
+               info = (struct smb3_fs_ss_info *)(rsp->Buffer);
+
+               info->LogicalBytesPerSector = cpu_to_le32(stfs.f_bsize);
+               info->PhysicalBytesPerSectorForAtomicity =
+                               cpu_to_le32(stfs.f_bsize);
+               info->PhysicalBytesPerSectorForPerf = cpu_to_le32(stfs.f_bsize);
+               info->FSEffPhysicalBytesPerSectorForAtomicity =
+                               cpu_to_le32(stfs.f_bsize);
+               info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE |
+                                   SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE);
+               info->ByteOffsetForSectorAlignment = 0;
+               info->ByteOffsetForPartitionAlignment = 0;
+               rsp->OutputBufferLength = cpu_to_le32(28);
+               inc_rfc1001_len(rsp_org, 28);
+               fs_infoclass_size = FS_SECTOR_SIZE_INFORMATION_SIZE;
+               break;
+       }
+       case FS_CONTROL_INFORMATION:
+       {
+               /*
+                * TODO : The current implementation is based on
+                * test result with win7(NTFS) server. It's need to
+                * modify this to get valid Quota values
+                * from Linux kernel
+                */
+               struct smb2_fs_control_info *info;
+
+               info = (struct smb2_fs_control_info *)(rsp->Buffer);
+               info->FreeSpaceStartFiltering = 0;
+               info->FreeSpaceThreshold = 0;
+               info->FreeSpaceStopFiltering = 0;
+               info->DefaultQuotaThreshold = cpu_to_le64(SMB2_NO_FID);
+               info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID);
+               info->Padding = 0;
+               rsp->OutputBufferLength = cpu_to_le32(48);
+               inc_rfc1001_len(rsp_org, 48);
+               fs_infoclass_size = FS_CONTROL_INFORMATION_SIZE;
+               break;
+       }
+       case FS_POSIX_INFORMATION:
+       {
+               struct filesystem_posix_info *info;
+
+               if (!work->tcon->posix_extensions) {
+                       pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+                       rc = -EOPNOTSUPP;
+               } else {
+                       info = (struct filesystem_posix_info *)(rsp->Buffer);
+                       info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize);
+                       info->BlockSize = cpu_to_le32(stfs.f_bsize);
+                       info->TotalBlocks = cpu_to_le64(stfs.f_blocks);
+                       info->BlocksAvail = cpu_to_le64(stfs.f_bfree);
+                       info->UserBlocksAvail = cpu_to_le64(stfs.f_bavail);
+                       info->TotalFileNodes = cpu_to_le64(stfs.f_files);
+                       info->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
+                       rsp->OutputBufferLength = cpu_to_le32(56);
+                       inc_rfc1001_len(rsp_org, 56);
+                       fs_infoclass_size = FS_POSIX_INFORMATION_SIZE;
+               }
+               break;
+       }
+       default:
+               path_put(&path);
+               return -EOPNOTSUPP;
+       }
+       rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+                             rsp,
+                             fs_infoclass_size);
+       path_put(&path);
+       return rc;
+}
+
+static int smb2_get_info_sec(struct ksmbd_work *work,
+                            struct smb2_query_info_req *req,
+                            struct smb2_query_info_rsp *rsp, void *rsp_org)
+{
+       struct ksmbd_file *fp;
+       struct user_namespace *user_ns;
+       struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL;
+       struct smb_fattr fattr = {{0}};
+       struct inode *inode;
+       __u32 secdesclen;
+       unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+       int addition_info = le32_to_cpu(req->AdditionalInformation);
+       int rc;
+
+       if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
+                             PROTECTED_DACL_SECINFO |
+                             UNPROTECTED_DACL_SECINFO)) {
+               pr_err("Unsupported addition info: 0x%x)\n",
+                      addition_info);
+
+               pntsd->revision = cpu_to_le16(1);
+               pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PROTECTED);
+               pntsd->osidoffset = 0;
+               pntsd->gsidoffset = 0;
+               pntsd->sacloffset = 0;
+               pntsd->dacloffset = 0;
+
+               secdesclen = sizeof(struct smb_ntsd);
+               rsp->OutputBufferLength = cpu_to_le32(secdesclen);
+               inc_rfc1001_len(rsp_org, secdesclen);
+
+               return 0;
+       }
+
+       if (work->next_smb2_rcv_hdr_off) {
+               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+                       ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+                                   work->compound_fid);
+                       id = work->compound_fid;
+                       pid = work->compound_pfid;
+               }
+       }
+
+       if (!has_file_id(id)) {
+               id = le64_to_cpu(req->VolatileFileId);
+               pid = le64_to_cpu(req->PersistentFileId);
+       }
+
+       fp = ksmbd_lookup_fd_slow(work, id, pid);
+       if (!fp)
+               return -ENOENT;
+
+       user_ns = file_mnt_user_ns(fp->filp);
+       inode = file_inode(fp->filp);
+       ksmbd_acls_fattr(&fattr, inode);
+
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_ACL_XATTR))
+               ksmbd_vfs_get_sd_xattr(work->conn, user_ns,
+                                      fp->filp->f_path.dentry, &ppntsd);
+
+       rc = build_sec_desc(user_ns, pntsd, ppntsd, addition_info,
+                           &secdesclen, &fattr);
+       posix_acl_release(fattr.cf_acls);
+       posix_acl_release(fattr.cf_dacls);
+       kfree(ppntsd);
+       ksmbd_fd_put(work, fp);
+       if (rc)
+               return rc;
+
+       rsp->OutputBufferLength = cpu_to_le32(secdesclen);
+       inc_rfc1001_len(rsp_org, secdesclen);
+       return 0;
+}
+
+/**
+ * smb2_query_info() - handler for smb2 query info command
+ * @work:      smb work containing query info request buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_query_info(struct ksmbd_work *work)
+{
+       struct smb2_query_info_req *req;
+       struct smb2_query_info_rsp *rsp, *rsp_org;
+       int rc = 0;
+
+       rsp_org = work->response_buf;
+       WORK_BUFFERS(work, req, rsp);
+
+       ksmbd_debug(SMB, "GOT query info request\n");
+
+       switch (req->InfoType) {
+       case SMB2_O_INFO_FILE:
+               ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
+               rc = smb2_get_info_file(work, req, rsp, (void *)rsp_org);
+               break;
+       case SMB2_O_INFO_FILESYSTEM:
+               ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILESYSTEM\n");
+               rc = smb2_get_info_filesystem(work, req, rsp, (void *)rsp_org);
+               break;
+       case SMB2_O_INFO_SECURITY:
+               ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
+               rc = smb2_get_info_sec(work, req, rsp, (void *)rsp_org);
+               break;
+       default:
+               ksmbd_debug(SMB, "InfoType %d not supported yet\n",
+                           req->InfoType);
+               rc = -EOPNOTSUPP;
+       }
+
+       if (rc < 0) {
+               if (rc == -EACCES)
+                       rsp->hdr.Status = STATUS_ACCESS_DENIED;
+               else if (rc == -ENOENT)
+                       rsp->hdr.Status = STATUS_FILE_CLOSED;
+               else if (rc == -EIO)
+                       rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+               else if (rc == -EOPNOTSUPP || rsp->hdr.Status == 0)
+                       rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
+               smb2_set_err_rsp(work);
+
+               ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n",
+                           rc);
+               return rc;
+       }
+       rsp->StructureSize = cpu_to_le16(9);
+       rsp->OutputBufferOffset = cpu_to_le16(72);
+       inc_rfc1001_len(rsp_org, 8);
+       return 0;
+}
+
+/**
+ * smb2_close_pipe() - handler for closing IPC pipe
+ * @work:      smb work containing close request buffer
+ *
+ * Return:     0
+ */
+static noinline int smb2_close_pipe(struct ksmbd_work *work)
+{
+       u64 id;
+       struct smb2_close_req *req = work->request_buf;
+       struct smb2_close_rsp *rsp = work->response_buf;
+
+       id = le64_to_cpu(req->VolatileFileId);
+       ksmbd_session_rpc_close(work->sess, id);
+
+       rsp->StructureSize = cpu_to_le16(60);
+       rsp->Flags = 0;
+       rsp->Reserved = 0;
+       rsp->CreationTime = 0;
+       rsp->LastAccessTime = 0;
+       rsp->LastWriteTime = 0;
+       rsp->ChangeTime = 0;
+       rsp->AllocationSize = 0;
+       rsp->EndOfFile = 0;
+       rsp->Attributes = 0;
+       inc_rfc1001_len(rsp, 60);
+       return 0;
+}
+
+/**
+ * smb2_close() - handler for smb2 close file command
+ * @work:      smb work containing close request buffer
+ *
+ * Return:     0
+ */
+int smb2_close(struct ksmbd_work *work)
+{
+       u64 volatile_id = KSMBD_NO_FID;
+       u64 sess_id;
+       struct smb2_close_req *req;
+       struct smb2_close_rsp *rsp;
+       struct smb2_close_rsp *rsp_org;
+       struct ksmbd_conn *conn = work->conn;
+       struct ksmbd_file *fp;
+       struct inode *inode;
+       u64 time;
+       int err = 0;
+
+       rsp_org = work->response_buf;
+       WORK_BUFFERS(work, req, rsp);
+
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_PIPE)) {
+               ksmbd_debug(SMB, "IPC pipe close request\n");
+               return smb2_close_pipe(work);
+       }
+
+       sess_id = le64_to_cpu(req->hdr.SessionId);
+       if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)
+               sess_id = work->compound_sid;
+
+       work->compound_sid = 0;
+       if (check_session_id(conn, sess_id)) {
+               work->compound_sid = sess_id;
+       } else {
+               rsp->hdr.Status = STATUS_USER_SESSION_DELETED;
+               if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               err = -EBADF;
+               goto out;
+       }
+
+       if (work->next_smb2_rcv_hdr_off &&
+           !has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(work->compound_fid)) {
+                       /* file already closed, return FILE_CLOSED */
+                       ksmbd_debug(SMB, "file already closed\n");
+                       rsp->hdr.Status = STATUS_FILE_CLOSED;
+                       err = -EBADF;
+                       goto out;
+               } else {
+                       ksmbd_debug(SMB,
+                                   "Compound request set FID = %llu:%llu\n",
+                                   work->compound_fid,
+                                   work->compound_pfid);
+                       volatile_id = work->compound_fid;
+
+                       /* file closed, stored id is not valid anymore */
+                       work->compound_fid = KSMBD_NO_FID;
+                       work->compound_pfid = KSMBD_NO_FID;
+               }
+       } else {
+               volatile_id = le64_to_cpu(req->VolatileFileId);
+       }
+       ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id);
+
+       rsp->StructureSize = cpu_to_le16(60);
+       rsp->Reserved = 0;
+
+       if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) {
+               fp = ksmbd_lookup_fd_fast(work, volatile_id);
+               if (!fp) {
+                       err = -ENOENT;
+                       goto out;
+               }
+
+               inode = file_inode(fp->filp);
+               rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
+               rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 :
+                       cpu_to_le64(inode->i_blocks << 9);
+               rsp->EndOfFile = cpu_to_le64(inode->i_size);
+               rsp->Attributes = fp->f_ci->m_fattr;
+               rsp->CreationTime = cpu_to_le64(fp->create_time);
+               time = ksmbd_UnixTimeToNT(inode->i_atime);
+               rsp->LastAccessTime = cpu_to_le64(time);
+               time = ksmbd_UnixTimeToNT(inode->i_mtime);
+               rsp->LastWriteTime = cpu_to_le64(time);
+               time = ksmbd_UnixTimeToNT(inode->i_ctime);
+               rsp->ChangeTime = cpu_to_le64(time);
+               ksmbd_fd_put(work, fp);
+       } else {
+               rsp->Flags = 0;
+               rsp->AllocationSize = 0;
+               rsp->EndOfFile = 0;
+               rsp->Attributes = 0;
+               rsp->CreationTime = 0;
+               rsp->LastAccessTime = 0;
+               rsp->LastWriteTime = 0;
+               rsp->ChangeTime = 0;
+       }
+
+       err = ksmbd_close_fd(work, volatile_id);
+out:
+       if (err) {
+               if (rsp->hdr.Status == 0)
+                       rsp->hdr.Status = STATUS_FILE_CLOSED;
+               smb2_set_err_rsp(work);
+       } else {
+               inc_rfc1001_len(rsp_org, 60);
+       }
+
+       return 0;
+}
+
+/**
+ * smb2_echo() - handler for smb2 echo(ping) command
+ * @work:      smb work containing echo request buffer
+ *
+ * Return:     0
+ */
+int smb2_echo(struct ksmbd_work *work)
+{
+       struct smb2_echo_rsp *rsp = work->response_buf;
+
+       rsp->StructureSize = cpu_to_le16(4);
+       rsp->Reserved = 0;
+       inc_rfc1001_len(rsp, 4);
+       return 0;
+}
+
+static int smb2_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+                      struct smb2_file_rename_info *file_info,
+                      struct nls_table *local_nls)
+{
+       struct ksmbd_share_config *share = fp->tcon->share_conf;
+       char *new_name = NULL, *abs_oldname = NULL, *old_name = NULL;
+       char *pathname = NULL;
+       struct path path;
+       bool file_present = true;
+       int rc;
+
+       ksmbd_debug(SMB, "setting FILE_RENAME_INFO\n");
+       pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+       if (!pathname)
+               return -ENOMEM;
+
+       abs_oldname = d_path(&fp->filp->f_path, pathname, PATH_MAX);
+       if (IS_ERR(abs_oldname)) {
+               rc = -EINVAL;
+               goto out;
+       }
+       old_name = strrchr(abs_oldname, '/');
+       if (old_name && old_name[1] != '\0') {
+               old_name++;
+       } else {
+               ksmbd_debug(SMB, "can't get last component in path %s\n",
+                           abs_oldname);
+               rc = -ENOENT;
+               goto out;
+       }
+
+       new_name = smb2_get_name(share,
+                                file_info->FileName,
+                                le32_to_cpu(file_info->FileNameLength),
+                                local_nls);
+       if (IS_ERR(new_name)) {
+               rc = PTR_ERR(new_name);
+               goto out;
+       }
+
+       if (strchr(new_name, ':')) {
+               int s_type;
+               char *xattr_stream_name, *stream_name = NULL;
+               size_t xattr_stream_size;
+               int len;
+
+               rc = parse_stream_name(new_name, &stream_name, &s_type);
+               if (rc < 0)
+                       goto out;
+
+               len = strlen(new_name);
+               if (new_name[len - 1] != '/') {
+                       pr_err("not allow base filename in rename\n");
+                       rc = -ESHARE;
+                       goto out;
+               }
+
+               rc = ksmbd_vfs_xattr_stream_name(stream_name,
+                                                &xattr_stream_name,
+                                                &xattr_stream_size,
+                                                s_type);
+               if (rc)
+                       goto out;
+
+               rc = ksmbd_vfs_setxattr(file_mnt_user_ns(fp->filp),
+                                       fp->filp->f_path.dentry,
+                                       xattr_stream_name,
+                                       NULL, 0, 0);
+               if (rc < 0) {
+                       pr_err("failed to store stream name in xattr: %d\n",
+                              rc);
+                       rc = -EINVAL;
+                       goto out;
+               }
+
+               goto out;
+       }
+
+       ksmbd_debug(SMB, "new name %s\n", new_name);
+       rc = ksmbd_vfs_kern_path(new_name, 0, &path, 1);
+       if (rc)
+               file_present = false;
+       else
+               path_put(&path);
+
+       if (ksmbd_share_veto_filename(share, new_name)) {
+               rc = -ENOENT;
+               ksmbd_debug(SMB, "Can't rename vetoed file: %s\n", new_name);
+               goto out;
+       }
+
+       if (file_info->ReplaceIfExists) {
+               if (file_present) {
+                       rc = ksmbd_vfs_remove_file(work, new_name);
+                       if (rc) {
+                               if (rc != -ENOTEMPTY)
+                                       rc = -EINVAL;
+                               ksmbd_debug(SMB, "cannot delete %s, rc %d\n",
+                                           new_name, rc);
+                               goto out;
+                       }
+               }
+       } else {
+               if (file_present &&
+                   strncmp(old_name, path.dentry->d_name.name, strlen(old_name))) {
+                       rc = -EEXIST;
+                       ksmbd_debug(SMB,
+                                   "cannot rename already existing file\n");
+                       goto out;
+               }
+       }
+
+       rc = ksmbd_vfs_fp_rename(work, fp, new_name);
+out:
+       kfree(pathname);
+       if (!IS_ERR(new_name))
+               kfree(new_name);
+       return rc;
+}
+
+static int smb2_create_link(struct ksmbd_work *work,
+                           struct ksmbd_share_config *share,
+                           struct smb2_file_link_info *file_info,
+                           struct file *filp,
+                           struct nls_table *local_nls)
+{
+       char *link_name = NULL, *target_name = NULL, *pathname = NULL;
+       struct path path;
+       bool file_present = true;
+       int rc;
+
+       ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
+       pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+       if (!pathname)
+               return -ENOMEM;
+
+       link_name = smb2_get_name(share,
+                                 file_info->FileName,
+                                 le32_to_cpu(file_info->FileNameLength),
+                                 local_nls);
+       if (IS_ERR(link_name) || S_ISDIR(file_inode(filp)->i_mode)) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       ksmbd_debug(SMB, "link name is %s\n", link_name);
+       target_name = d_path(&filp->f_path, pathname, PATH_MAX);
+       if (IS_ERR(target_name)) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       ksmbd_debug(SMB, "target name is %s\n", target_name);
+       rc = ksmbd_vfs_kern_path(link_name, 0, &path, 0);
+       if (rc)
+               file_present = false;
+       else
+               path_put(&path);
+
+       if (file_info->ReplaceIfExists) {
+               if (file_present) {
+                       rc = ksmbd_vfs_remove_file(work, link_name);
+                       if (rc) {
+                               rc = -EINVAL;
+                               ksmbd_debug(SMB, "cannot delete %s\n",
+                                           link_name);
+                               goto out;
+                       }
+               }
+       } else {
+               if (file_present) {
+                       rc = -EEXIST;
+                       ksmbd_debug(SMB, "link already exists\n");
+                       goto out;
+               }
+       }
+
+       rc = ksmbd_vfs_link(work, target_name, link_name);
+       if (rc)
+               rc = -EINVAL;
+out:
+       if (!IS_ERR(link_name))
+               kfree(link_name);
+       kfree(pathname);
+       return rc;
+}
+
+static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
+                              struct ksmbd_share_config *share)
+{
+       struct smb2_file_all_info *file_info;
+       struct iattr attrs;
+       struct iattr temp_attrs;
+       struct file *filp;
+       struct inode *inode;
+       struct user_namespace *user_ns;
+       int rc;
+
+       if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
+               return -EACCES;
+
+       file_info = (struct smb2_file_all_info *)buf;
+       attrs.ia_valid = 0;
+       filp = fp->filp;
+       inode = file_inode(filp);
+       user_ns = file_mnt_user_ns(filp);
+
+       if (file_info->CreationTime)
+               fp->create_time = le64_to_cpu(file_info->CreationTime);
+
+       if (file_info->LastAccessTime) {
+               attrs.ia_atime = ksmbd_NTtimeToUnix(file_info->LastAccessTime);
+               attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+       }
+
+       if (file_info->ChangeTime) {
+               temp_attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
+               attrs.ia_ctime = temp_attrs.ia_ctime;
+               attrs.ia_valid |= ATTR_CTIME;
+       } else {
+               temp_attrs.ia_ctime = inode->i_ctime;
+       }
+
+       if (file_info->LastWriteTime) {
+               attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
+               attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+       }
+
+       if (file_info->Attributes) {
+               if (!S_ISDIR(inode->i_mode) &&
+                   file_info->Attributes & ATTR_DIRECTORY_LE) {
+                       pr_err("can't change a file to a directory\n");
+                       return -EINVAL;
+               }
+
+               if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == ATTR_NORMAL_LE))
+                       fp->f_ci->m_fattr = file_info->Attributes |
+                               (fp->f_ci->m_fattr & ATTR_DIRECTORY_LE);
+       }
+
+       if (test_share_config_flag(share, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS) &&
+           (file_info->CreationTime || file_info->Attributes)) {
+               struct xattr_dos_attrib da = {0};
+
+               da.version = 4;
+               da.itime = fp->itime;
+               da.create_time = fp->create_time;
+               da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+               da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+                       XATTR_DOSINFO_ITIME;
+
+               rc = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
+                                                   filp->f_path.dentry, &da);
+               if (rc)
+                       ksmbd_debug(SMB,
+                                   "failed to restore file attribute in EA\n");
+               rc = 0;
+       }
+
+       /*
+        * HACK : set ctime here to avoid ctime changed
+        * when file_info->ChangeTime is zero.
+        */
+       attrs.ia_ctime = temp_attrs.ia_ctime;
+       attrs.ia_valid |= ATTR_CTIME;
+
+       if (attrs.ia_valid) {
+               struct dentry *dentry = filp->f_path.dentry;
+               struct inode *inode = d_inode(dentry);
+
+               if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+                       return -EACCES;
+
+               rc = setattr_prepare(user_ns, dentry, &attrs);
+               if (rc)
+                       return -EINVAL;
+
+               inode_lock(inode);
+               setattr_copy(user_ns, inode, &attrs);
+               attrs.ia_valid &= ~ATTR_CTIME;
+               rc = notify_change(user_ns, dentry, &attrs, NULL);
+               inode_unlock(inode);
+       }
+       return 0;
+}
+
+static int set_file_allocation_info(struct ksmbd_work *work,
+                                   struct ksmbd_file *fp, char *buf)
+{
+       /*
+        * TODO : It's working fine only when store dos attributes
+        * is not yes. need to implement a logic which works
+        * properly with any smb.conf option
+        */
+
+       struct smb2_file_alloc_info *file_alloc_info;
+       loff_t alloc_blks;
+       struct inode *inode;
+       int rc;
+
+       if (!(fp->daccess & FILE_WRITE_DATA_LE))
+               return -EACCES;
+
+       file_alloc_info = (struct smb2_file_alloc_info *)buf;
+       alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
+       inode = file_inode(fp->filp);
+
+       if (alloc_blks > inode->i_blocks) {
+               smb_break_all_levII_oplock(work, fp, 1);
+               rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
+                                  alloc_blks * 512);
+               if (rc && rc != -EOPNOTSUPP) {
+                       pr_err("vfs_fallocate is failed : %d\n", rc);
+                       return rc;
+               }
+       } else if (alloc_blks < inode->i_blocks) {
+               loff_t size;
+
+               /*
+                * Allocation size could be smaller than original one
+                * which means allocated blocks in file should be
+                * deallocated. use truncate to cut out it, but inode
+                * size is also updated with truncate offset.
+                * inode size is retained by backup inode size.
+                */
+               size = i_size_read(inode);
+               rc = ksmbd_vfs_truncate(work, NULL, fp, alloc_blks * 512);
+               if (rc) {
+                       pr_err("truncate failed! filename : %s, err %d\n",
+                              fp->filename, rc);
+                       return rc;
+               }
+               if (size < alloc_blks * 512)
+                       i_size_write(inode, size);
+       }
+       return 0;
+}
+
+static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
+                               char *buf)
+{
+       struct smb2_file_eof_info *file_eof_info;
+       loff_t newsize;
+       struct inode *inode;
+       int rc;
+
+       if (!(fp->daccess & FILE_WRITE_DATA_LE))
+               return -EACCES;
+
+       file_eof_info = (struct smb2_file_eof_info *)buf;
+       newsize = le64_to_cpu(file_eof_info->EndOfFile);
+       inode = file_inode(fp->filp);
+
+       /*
+        * If FILE_END_OF_FILE_INFORMATION of set_info_file is called
+        * on FAT32 shared device, truncate execution time is too long
+        * and network error could cause from windows client. because
+        * truncate of some filesystem like FAT32 fill zero data in
+        * truncated range.
+        */
+       if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
+               ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
+                           fp->filename, newsize);
+               rc = ksmbd_vfs_truncate(work, NULL, fp, newsize);
+               if (rc) {
+                       ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
+                                   fp->filename, rc);
+                       if (rc != -EAGAIN)
+                               rc = -EBADF;
+                       return rc;
+               }
+       }
+       return 0;
+}
+
+static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
+                          char *buf)
+{
+       struct ksmbd_file *parent_fp;
+       struct dentry *parent;
+       struct dentry *dentry = fp->filp->f_path.dentry;
+       int ret;
+
+       if (!(fp->daccess & FILE_DELETE_LE)) {
+               pr_err("no right to delete : 0x%x\n", fp->daccess);
+               return -EACCES;
+       }
+
+       if (ksmbd_stream_fd(fp))
+               goto next;
+
+       parent = dget_parent(dentry);
+       ret = ksmbd_vfs_lock_parent(parent, dentry);
+       if (ret) {
+               dput(parent);
+               return ret;
+       }
+
+       parent_fp = ksmbd_lookup_fd_inode(d_inode(parent));
+       inode_unlock(d_inode(parent));
+       dput(parent);
+
+       if (parent_fp) {
+               if (parent_fp->daccess & FILE_DELETE_LE) {
+                       pr_err("parent dir is opened with delete access\n");
+                       return -ESHARE;
+               }
+       }
+next:
+       return smb2_rename(work, fp,
+                          (struct smb2_file_rename_info *)buf,
+                          work->sess->conn->local_nls);
+}
+
+static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
+{
+       struct smb2_file_disposition_info *file_info;
+       struct inode *inode;
+
+       if (!(fp->daccess & FILE_DELETE_LE)) {
+               pr_err("no right to delete : 0x%x\n", fp->daccess);
+               return -EACCES;
+       }
+
+       inode = file_inode(fp->filp);
+       file_info = (struct smb2_file_disposition_info *)buf;
+       if (file_info->DeletePending) {
+               if (S_ISDIR(inode->i_mode) &&
+                   ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)
+                       return -EBUSY;
+               ksmbd_set_inode_pending_delete(fp);
+       } else {
+               ksmbd_clear_inode_pending_delete(fp);
+       }
+       return 0;
+}
+
+static int set_file_position_info(struct ksmbd_file *fp, char *buf)
+{
+       struct smb2_file_pos_info *file_info;
+       loff_t current_byte_offset;
+       unsigned long sector_size;
+       struct inode *inode;
+
+       inode = file_inode(fp->filp);
+       file_info = (struct smb2_file_pos_info *)buf;
+       current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);
+       sector_size = inode->i_sb->s_blocksize;
+
+       if (current_byte_offset < 0 ||
+           (fp->coption == FILE_NO_INTERMEDIATE_BUFFERING_LE &&
+            current_byte_offset & (sector_size - 1))) {
+               pr_err("CurrentByteOffset is not valid : %llu\n",
+                      current_byte_offset);
+               return -EINVAL;
+       }
+
+       fp->filp->f_pos = current_byte_offset;
+       return 0;
+}
+
+static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
+{
+       struct smb2_file_mode_info *file_info;
+       __le32 mode;
+
+       file_info = (struct smb2_file_mode_info *)buf;
+       mode = file_info->Mode;
+
+       if ((mode & ~FILE_MODE_INFO_MASK) ||
+           (mode & FILE_SYNCHRONOUS_IO_ALERT_LE &&
+            mode & FILE_SYNCHRONOUS_IO_NONALERT_LE)) {
+               pr_err("Mode is not valid : 0x%x\n", le32_to_cpu(mode));
+               return -EINVAL;
+       }
+
+       /*
+        * TODO : need to implement consideration for
+        * FILE_SYNCHRONOUS_IO_ALERT and FILE_SYNCHRONOUS_IO_NONALERT
+        */
+       ksmbd_vfs_set_fadvise(fp->filp, mode);
+       fp->coption = mode;
+       return 0;
+}
+
+/**
+ * smb2_set_info_file() - handler for smb2 set info command
+ * @work:      smb work containing set info command buffer
+ * @fp:                ksmbd_file pointer
+ * @info_class:        smb2 set info class
+ * @share:     ksmbd_share_config pointer
+ *
+ * Return:     0 on success, otherwise error
+ * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH
+ */
+static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+                             int info_class, char *buf,
+                             struct ksmbd_share_config *share)
+{
+       switch (info_class) {
+       case FILE_BASIC_INFORMATION:
+               return set_file_basic_info(fp, buf, share);
+
+       case FILE_ALLOCATION_INFORMATION:
+               return set_file_allocation_info(work, fp, buf);
+
+       case FILE_END_OF_FILE_INFORMATION:
+               return set_end_of_file_info(work, fp, buf);
+
+       case FILE_RENAME_INFORMATION:
+               if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+                       ksmbd_debug(SMB,
+                                   "User does not have write permission\n");
+                       return -EACCES;
+               }
+               return set_rename_info(work, fp, buf);
+
+       case FILE_LINK_INFORMATION:
+               return smb2_create_link(work, work->tcon->share_conf,
+                                       (struct smb2_file_link_info *)buf, fp->filp,
+                                       work->sess->conn->local_nls);
+
+       case FILE_DISPOSITION_INFORMATION:
+               if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+                       ksmbd_debug(SMB,
+                                   "User does not have write permission\n");
+                       return -EACCES;
+               }
+               return set_file_disposition_info(fp, buf);
+
+       case FILE_FULL_EA_INFORMATION:
+       {
+               if (!(fp->daccess & FILE_WRITE_EA_LE)) {
+                       pr_err("Not permitted to write ext  attr: 0x%x\n",
+                              fp->daccess);
+                       return -EACCES;
+               }
+
+               return smb2_set_ea((struct smb2_ea_info *)buf,
+                                  &fp->filp->f_path);
+       }
+
+       case FILE_POSITION_INFORMATION:
+               return set_file_position_info(fp, buf);
+
+       case FILE_MODE_INFORMATION:
+               return set_file_mode_info(fp, buf);
+       }
+
+       pr_err("Unimplemented Fileinfoclass :%d\n", info_class);
+       return -EOPNOTSUPP;
+}
+
+static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
+                            char *buffer, int buf_len)
+{
+       struct smb_ntsd *pntsd = (struct smb_ntsd *)buffer;
+
+       fp->saccess |= FILE_SHARE_DELETE_LE;
+
+       return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
+                       buf_len, false);
+}
+
+/**
+ * smb2_set_info() - handler for smb2 set info command handler
+ * @work:      smb work containing set info request buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_set_info(struct ksmbd_work *work)
+{
+       struct smb2_set_info_req *req;
+       struct smb2_set_info_rsp *rsp, *rsp_org;
+       struct ksmbd_file *fp;
+       int rc = 0;
+       unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+
+       ksmbd_debug(SMB, "Received set info request\n");
+
+       rsp_org = work->response_buf;
+       if (work->next_smb2_rcv_hdr_off) {
+               req = ksmbd_req_buf_next(work);
+               rsp = ksmbd_resp_buf_next(work);
+               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+                       ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+                                   work->compound_fid);
+                       id = work->compound_fid;
+                       pid = work->compound_pfid;
+               }
+       } else {
+               req = work->request_buf;
+               rsp = work->response_buf;
+       }
+
+       if (!has_file_id(id)) {
+               id = le64_to_cpu(req->VolatileFileId);
+               pid = le64_to_cpu(req->PersistentFileId);
+       }
+
+       fp = ksmbd_lookup_fd_slow(work, id, pid);
+       if (!fp) {
+               ksmbd_debug(SMB, "Invalid id for close: %u\n", id);
+               rc = -ENOENT;
+               goto err_out;
+       }
+
+       switch (req->InfoType) {
+       case SMB2_O_INFO_FILE:
+               ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
+               rc = smb2_set_info_file(work, fp, req->FileInfoClass,
+                                       req->Buffer, work->tcon->share_conf);
+               break;
+       case SMB2_O_INFO_SECURITY:
+               ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
+               if (ksmbd_override_fsids(work)) {
+                       rc = -ENOMEM;
+                       goto err_out;
+               }
+               rc = smb2_set_info_sec(fp,
+                                      le32_to_cpu(req->AdditionalInformation),
+                                      req->Buffer,
+                                      le32_to_cpu(req->BufferLength));
+               ksmbd_revert_fsids(work);
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       if (rc < 0)
+               goto err_out;
+
+       rsp->StructureSize = cpu_to_le16(2);
+       inc_rfc1001_len(rsp_org, 2);
+       ksmbd_fd_put(work, fp);
+       return 0;
+
+err_out:
+       if (rc == -EACCES || rc == -EPERM)
+               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+       else if (rc == -EINVAL)
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+       else if (rc == -ESHARE)
+               rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+       else if (rc == -ENOENT)
+               rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
+       else if (rc == -EBUSY || rc == -ENOTEMPTY)
+               rsp->hdr.Status = STATUS_DIRECTORY_NOT_EMPTY;
+       else if (rc == -EAGAIN)
+               rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+       else if (rc == -EBADF || rc == -ESTALE)
+               rsp->hdr.Status = STATUS_INVALID_HANDLE;
+       else if (rc == -EEXIST)
+               rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION;
+       else if (rsp->hdr.Status == 0 || rc == -EOPNOTSUPP)
+               rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
+       smb2_set_err_rsp(work);
+       ksmbd_fd_put(work, fp);
+       ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n", rc);
+       return rc;
+}
+
+/**
+ * smb2_read_pipe() - handler for smb2 read from IPC pipe
+ * @work:      smb work containing read IPC pipe command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+static noinline int smb2_read_pipe(struct ksmbd_work *work)
+{
+       int nbytes = 0, err;
+       u64 id;
+       struct ksmbd_rpc_command *rpc_resp;
+       struct smb2_read_req *req = work->request_buf;
+       struct smb2_read_rsp *rsp = work->response_buf;
+
+       id = le64_to_cpu(req->VolatileFileId);
+
+       inc_rfc1001_len(rsp, 16);
+       rpc_resp = ksmbd_rpc_read(work->sess, id);
+       if (rpc_resp) {
+               if (rpc_resp->flags != KSMBD_RPC_OK) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               work->aux_payload_buf =
+                       kvmalloc(rpc_resp->payload_sz, GFP_KERNEL | __GFP_ZERO);
+               if (!work->aux_payload_buf) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+
+               memcpy(work->aux_payload_buf, rpc_resp->payload,
+                      rpc_resp->payload_sz);
+
+               nbytes = rpc_resp->payload_sz;
+               work->resp_hdr_sz = get_rfc1002_len(rsp) + 4;
+               work->aux_payload_sz = nbytes;
+               kvfree(rpc_resp);
+       }
+
+       rsp->StructureSize = cpu_to_le16(17);
+       rsp->DataOffset = 80;
+       rsp->Reserved = 0;
+       rsp->DataLength = cpu_to_le32(nbytes);
+       rsp->DataRemaining = 0;
+       rsp->Reserved2 = 0;
+       inc_rfc1001_len(rsp, nbytes);
+       return 0;
+
+out:
+       rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+       smb2_set_err_rsp(work);
+       kvfree(rpc_resp);
+       return err;
+}
+
+static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
+                                     struct smb2_read_req *req, void *data_buf,
+                                     size_t length)
+{
+       struct smb2_buffer_desc_v1 *desc =
+               (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
+       int err;
+
+       if (work->conn->dialect == SMB30_PROT_ID &&
+           req->Channel != SMB2_CHANNEL_RDMA_V1)
+               return -EINVAL;
+
+       if (req->ReadChannelInfoOffset == 0 ||
+           le16_to_cpu(req->ReadChannelInfoLength) < sizeof(*desc))
+               return -EINVAL;
+
+       work->need_invalidate_rkey =
+               (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
+       work->remote_key = le32_to_cpu(desc->token);
+
+       err = ksmbd_conn_rdma_write(work->conn, data_buf, length,
+                                   le32_to_cpu(desc->token),
+                                   le64_to_cpu(desc->offset),
+                                   le32_to_cpu(desc->length));
+       if (err)
+               return err;
+
+       return length;
+}
+
+/**
+ * smb2_read() - handler for smb2 read from file
+ * @work:      smb work containing read command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_read(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_read_req *req;
+       struct smb2_read_rsp *rsp, *rsp_org;
+       struct ksmbd_file *fp;
+       loff_t offset;
+       size_t length, mincount;
+       ssize_t nbytes = 0, remain_bytes = 0;
+       int err = 0;
+
+       rsp_org = work->response_buf;
+       WORK_BUFFERS(work, req, rsp);
+
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_PIPE)) {
+               ksmbd_debug(SMB, "IPC pipe read request\n");
+               return smb2_read_pipe(work);
+       }
+
+       fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
+                                 le64_to_cpu(req->PersistentFileId));
+       if (!fp) {
+               err = -ENOENT;
+               goto out;
+       }
+
+       if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_READ_ATTRIBUTES_LE))) {
+               pr_err("Not permitted to read : 0x%x\n", fp->daccess);
+               err = -EACCES;
+               goto out;
+       }
+
+       offset = le64_to_cpu(req->Offset);
+       length = le32_to_cpu(req->Length);
+       mincount = le32_to_cpu(req->MinimumCount);
+
+       if (length > conn->vals->max_read_size) {
+               ksmbd_debug(SMB, "limiting read size to max size(%u)\n",
+                           conn->vals->max_read_size);
+               err = -EINVAL;
+               goto out;
+       }
+
+       ksmbd_debug(SMB, "filename %pd, offset %lld, len %zu\n",
+                   fp->filp->f_path.dentry, offset, length);
+
+       work->aux_payload_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+       if (!work->aux_payload_buf) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       nbytes = ksmbd_vfs_read(work, fp, length, &offset);
+       if (nbytes < 0) {
+               err = nbytes;
+               goto out;
+       }
+
+       if ((nbytes == 0 && length != 0) || nbytes < mincount) {
+               kvfree(work->aux_payload_buf);
+               work->aux_payload_buf = NULL;
+               rsp->hdr.Status = STATUS_END_OF_FILE;
+               smb2_set_err_rsp(work);
+               ksmbd_fd_put(work, fp);
+               return 0;
+       }
+
+       ksmbd_debug(SMB, "nbytes %zu, offset %lld mincount %zu\n",
+                   nbytes, offset, mincount);
+
+       if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
+           req->Channel == SMB2_CHANNEL_RDMA_V1) {
+               /* write data to the client using rdma channel */
+               remain_bytes = smb2_read_rdma_channel(work, req,
+                                                     work->aux_payload_buf,
+                                                     nbytes);
+               kvfree(work->aux_payload_buf);
+               work->aux_payload_buf = NULL;
+
+               nbytes = 0;
+               if (remain_bytes < 0) {
+                       err = (int)remain_bytes;
+                       goto out;
+               }
+       }
+
+       rsp->StructureSize = cpu_to_le16(17);
+       rsp->DataOffset = 80;
+       rsp->Reserved = 0;
+       rsp->DataLength = cpu_to_le32(nbytes);
+       rsp->DataRemaining = cpu_to_le32(remain_bytes);
+       rsp->Reserved2 = 0;
+       inc_rfc1001_len(rsp_org, 16);
+       work->resp_hdr_sz = get_rfc1002_len(rsp_org) + 4;
+       work->aux_payload_sz = nbytes;
+       inc_rfc1001_len(rsp_org, nbytes);
+       ksmbd_fd_put(work, fp);
+       return 0;
+
+out:
+       if (err) {
+               if (err == -EISDIR)
+                       rsp->hdr.Status = STATUS_INVALID_DEVICE_REQUEST;
+               else if (err == -EAGAIN)
+                       rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+               else if (err == -ENOENT)
+                       rsp->hdr.Status = STATUS_FILE_CLOSED;
+               else if (err == -EACCES)
+                       rsp->hdr.Status = STATUS_ACCESS_DENIED;
+               else if (err == -ESHARE)
+                       rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+               else if (err == -EINVAL)
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               else
+                       rsp->hdr.Status = STATUS_INVALID_HANDLE;
+
+               smb2_set_err_rsp(work);
+       }
+       ksmbd_fd_put(work, fp);
+       return err;
+}
+
+/**
+ * smb2_write_pipe() - handler for smb2 write on IPC pipe
+ * @work:      smb work containing write IPC pipe command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+static noinline int smb2_write_pipe(struct ksmbd_work *work)
+{
+       struct smb2_write_req *req = work->request_buf;
+       struct smb2_write_rsp *rsp = work->response_buf;
+       struct ksmbd_rpc_command *rpc_resp;
+       u64 id = 0;
+       int err = 0, ret = 0;
+       char *data_buf;
+       size_t length;
+
+       length = le32_to_cpu(req->Length);
+       id = le64_to_cpu(req->VolatileFileId);
+
+       if (le16_to_cpu(req->DataOffset) ==
+           (offsetof(struct smb2_write_req, Buffer) - 4)) {
+               data_buf = (char *)&req->Buffer[0];
+       } else {
+               if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
+                   (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
+                       pr_err("invalid write data offset %u, smb_len %u\n",
+                              le16_to_cpu(req->DataOffset),
+                              get_rfc1002_len(req));
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+                               le16_to_cpu(req->DataOffset));
+       }
+
+       rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length);
+       if (rpc_resp) {
+               if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+                       rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+                       kvfree(rpc_resp);
+                       smb2_set_err_rsp(work);
+                       return -EOPNOTSUPP;
+               }
+               if (rpc_resp->flags != KSMBD_RPC_OK) {
+                       rsp->hdr.Status = STATUS_INVALID_HANDLE;
+                       smb2_set_err_rsp(work);
+                       kvfree(rpc_resp);
+                       return ret;
+               }
+               kvfree(rpc_resp);
+       }
+
+       rsp->StructureSize = cpu_to_le16(17);
+       rsp->DataOffset = 0;
+       rsp->Reserved = 0;
+       rsp->DataLength = cpu_to_le32(length);
+       rsp->DataRemaining = 0;
+       rsp->Reserved2 = 0;
+       inc_rfc1001_len(rsp, 16);
+       return 0;
+out:
+       if (err) {
+               rsp->hdr.Status = STATUS_INVALID_HANDLE;
+               smb2_set_err_rsp(work);
+       }
+
+       return err;
+}
+
+static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
+                                      struct smb2_write_req *req,
+                                      struct ksmbd_file *fp,
+                                      loff_t offset, size_t length, bool sync)
+{
+       struct smb2_buffer_desc_v1 *desc;
+       char *data_buf;
+       int ret;
+       ssize_t nbytes;
+
+       desc = (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
+
+       if (work->conn->dialect == SMB30_PROT_ID &&
+           req->Channel != SMB2_CHANNEL_RDMA_V1)
+               return -EINVAL;
+
+       if (req->Length != 0 || req->DataOffset != 0)
+               return -EINVAL;
+
+       if (req->WriteChannelInfoOffset == 0 ||
+           le16_to_cpu(req->WriteChannelInfoLength) < sizeof(*desc))
+               return -EINVAL;
+
+       work->need_invalidate_rkey =
+               (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
+       work->remote_key = le32_to_cpu(desc->token);
+
+       data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+       if (!data_buf)
+               return -ENOMEM;
+
+       ret = ksmbd_conn_rdma_read(work->conn, data_buf, length,
+                                  le32_to_cpu(desc->token),
+                                  le64_to_cpu(desc->offset),
+                                  le32_to_cpu(desc->length));
+       if (ret < 0) {
+               kvfree(data_buf);
+               return ret;
+       }
+
+       ret = ksmbd_vfs_write(work, fp, data_buf, length, &offset, sync, &nbytes);
+       kvfree(data_buf);
+       if (ret < 0)
+               return ret;
+
+       return nbytes;
+}
+
+/**
+ * smb2_write() - handler for smb2 write from file
+ * @work:      smb work containing write command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_write(struct ksmbd_work *work)
+{
+       struct smb2_write_req *req;
+       struct smb2_write_rsp *rsp, *rsp_org;
+       struct ksmbd_file *fp = NULL;
+       loff_t offset;
+       size_t length;
+       ssize_t nbytes;
+       char *data_buf;
+       bool writethrough = false;
+       int err = 0;
+
+       rsp_org = work->response_buf;
+       WORK_BUFFERS(work, req, rsp);
+
+       if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) {
+               ksmbd_debug(SMB, "IPC pipe write request\n");
+               return smb2_write_pipe(work);
+       }
+
+       if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+               ksmbd_debug(SMB, "User does not have write permission\n");
+               err = -EACCES;
+               goto out;
+       }
+
+       fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
+                                 le64_to_cpu(req->PersistentFileId));
+       if (!fp) {
+               err = -ENOENT;
+               goto out;
+       }
+
+       if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_READ_ATTRIBUTES_LE))) {
+               pr_err("Not permitted to write : 0x%x\n", fp->daccess);
+               err = -EACCES;
+               goto out;
+       }
+
+       offset = le64_to_cpu(req->Offset);
+       length = le32_to_cpu(req->Length);
+
+       if (length > work->conn->vals->max_write_size) {
+               ksmbd_debug(SMB, "limiting write size to max size(%u)\n",
+                           work->conn->vals->max_write_size);
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
+               writethrough = true;
+
+       if (req->Channel != SMB2_CHANNEL_RDMA_V1 &&
+           req->Channel != SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
+               if (le16_to_cpu(req->DataOffset) ==
+                   (offsetof(struct smb2_write_req, Buffer) - 4)) {
+                       data_buf = (char *)&req->Buffer[0];
+               } else {
+                       if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
+                           (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
+                               pr_err("invalid write data offset %u, smb_len %u\n",
+                                      le16_to_cpu(req->DataOffset),
+                                      get_rfc1002_len(req));
+                               err = -EINVAL;
+                               goto out;
+                       }
+
+                       data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+                                       le16_to_cpu(req->DataOffset));
+               }
+
+               ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
+               if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
+                       writethrough = true;
+
+               ksmbd_debug(SMB, "filename %pd, offset %lld, len %zu\n",
+                           fp->filp->f_path.dentry, offset, length);
+               err = ksmbd_vfs_write(work, fp, data_buf, length, &offset,
+                                     writethrough, &nbytes);
+               if (err < 0)
+                       goto out;
+       } else {
+               /* read data from the client using rdma channel, and
+                * write the data.
+                */
+               nbytes = smb2_write_rdma_channel(work, req, fp, offset,
+                                                le32_to_cpu(req->RemainingBytes),
+                                                writethrough);
+               if (nbytes < 0) {
+                       err = (int)nbytes;
+                       goto out;
+               }
+       }
+
+       rsp->StructureSize = cpu_to_le16(17);
+       rsp->DataOffset = 0;
+       rsp->Reserved = 0;
+       rsp->DataLength = cpu_to_le32(nbytes);
+       rsp->DataRemaining = 0;
+       rsp->Reserved2 = 0;
+       inc_rfc1001_len(rsp_org, 16);
+       ksmbd_fd_put(work, fp);
+       return 0;
+
+out:
+       if (err == -EAGAIN)
+               rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+       else if (err == -ENOSPC || err == -EFBIG)
+               rsp->hdr.Status = STATUS_DISK_FULL;
+       else if (err == -ENOENT)
+               rsp->hdr.Status = STATUS_FILE_CLOSED;
+       else if (err == -EACCES)
+               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+       else if (err == -ESHARE)
+               rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+       else if (err == -EINVAL)
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+       else
+               rsp->hdr.Status = STATUS_INVALID_HANDLE;
+
+       smb2_set_err_rsp(work);
+       ksmbd_fd_put(work, fp);
+       return err;
+}
+
+/**
+ * smb2_flush() - handler for smb2 flush file - fsync
+ * @work:      smb work containing flush command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_flush(struct ksmbd_work *work)
+{
+       struct smb2_flush_req *req;
+       struct smb2_flush_rsp *rsp, *rsp_org;
+       int err;
+
+       rsp_org = work->response_buf;
+       WORK_BUFFERS(work, req, rsp);
+
+       ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n",
+                   le64_to_cpu(req->VolatileFileId));
+
+       err = ksmbd_vfs_fsync(work,
+                             le64_to_cpu(req->VolatileFileId),
+                             le64_to_cpu(req->PersistentFileId));
+       if (err)
+               goto out;
+
+       rsp->StructureSize = cpu_to_le16(4);
+       rsp->Reserved = 0;
+       inc_rfc1001_len(rsp_org, 4);
+       return 0;
+
+out:
+       if (err) {
+               rsp->hdr.Status = STATUS_INVALID_HANDLE;
+               smb2_set_err_rsp(work);
+       }
+
+       return err;
+}
+
+/**
+ * smb2_cancel() - handler for smb2 cancel command
+ * @work:      smb work containing cancel command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_cancel(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_hdr *hdr = work->request_buf;
+       struct smb2_hdr *chdr;
+       struct ksmbd_work *cancel_work = NULL;
+       int canceled = 0;
+       struct list_head *command_list;
+
+       ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
+                   hdr->MessageId, hdr->Flags);
+
+       if (hdr->Flags & SMB2_FLAGS_ASYNC_COMMAND) {
+               command_list = &conn->async_requests;
+
+               spin_lock(&conn->request_lock);
+               list_for_each_entry(cancel_work, command_list,
+                                   async_request_entry) {
+                       chdr = cancel_work->request_buf;
+
+                       if (cancel_work->async_id !=
+                           le64_to_cpu(hdr->Id.AsyncId))
+                               continue;
+
+                       ksmbd_debug(SMB,
+                                   "smb2 with AsyncId %llu cancelled command = 0x%x\n",
+                                   le64_to_cpu(hdr->Id.AsyncId),
+                                   le16_to_cpu(chdr->Command));
+                       canceled = 1;
+                       break;
+               }
+               spin_unlock(&conn->request_lock);
+       } else {
+               command_list = &conn->requests;
+
+               spin_lock(&conn->request_lock);
+               list_for_each_entry(cancel_work, command_list, request_entry) {
+                       chdr = cancel_work->request_buf;
+
+                       if (chdr->MessageId != hdr->MessageId ||
+                           cancel_work == work)
+                               continue;
+
+                       ksmbd_debug(SMB,
+                                   "smb2 with mid %llu cancelled command = 0x%x\n",
+                                   le64_to_cpu(hdr->MessageId),
+                                   le16_to_cpu(chdr->Command));
+                       canceled = 1;
+                       break;
+               }
+               spin_unlock(&conn->request_lock);
+       }
+
+       if (canceled) {
+               cancel_work->state = KSMBD_WORK_CANCELLED;
+               if (cancel_work->cancel_fn)
+                       cancel_work->cancel_fn(cancel_work->cancel_argv);
+       }
+
+       /* For SMB2_CANCEL command itself send no response*/
+       work->send_no_response = 1;
+       return 0;
+}
+
+struct file_lock *smb_flock_init(struct file *f)
+{
+       struct file_lock *fl;
+
+       fl = locks_alloc_lock();
+       if (!fl)
+               goto out;
+
+       locks_init_lock(fl);
+
+       fl->fl_owner = f;
+       fl->fl_pid = current->tgid;
+       fl->fl_file = f;
+       fl->fl_flags = FL_POSIX;
+       fl->fl_ops = NULL;
+       fl->fl_lmops = NULL;
+
+out:
+       return fl;
+}
+
+static int smb2_set_flock_flags(struct file_lock *flock, int flags)
+{
+       int cmd = -EINVAL;
+
+       /* Checking for wrong flag combination during lock request*/
+       switch (flags) {
+       case SMB2_LOCKFLAG_SHARED:
+               ksmbd_debug(SMB, "received shared request\n");
+               cmd = F_SETLKW;
+               flock->fl_type = F_RDLCK;
+               flock->fl_flags |= FL_SLEEP;
+               break;
+       case SMB2_LOCKFLAG_EXCLUSIVE:
+               ksmbd_debug(SMB, "received exclusive request\n");
+               cmd = F_SETLKW;
+               flock->fl_type = F_WRLCK;
+               flock->fl_flags |= FL_SLEEP;
+               break;
+       case SMB2_LOCKFLAG_SHARED | SMB2_LOCKFLAG_FAIL_IMMEDIATELY:
+               ksmbd_debug(SMB,
+                           "received shared & fail immediately request\n");
+               cmd = F_SETLK;
+               flock->fl_type = F_RDLCK;
+               break;
+       case SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_FAIL_IMMEDIATELY:
+               ksmbd_debug(SMB,
+                           "received exclusive & fail immediately request\n");
+               cmd = F_SETLK;
+               flock->fl_type = F_WRLCK;
+               break;
+       case SMB2_LOCKFLAG_UNLOCK:
+               ksmbd_debug(SMB, "received unlock request\n");
+               flock->fl_type = F_UNLCK;
+               cmd = 0;
+               break;
+       }
+
+       return cmd;
+}
+
+static struct ksmbd_lock *smb2_lock_init(struct file_lock *flock,
+                                        unsigned int cmd, int flags,
+                                        struct list_head *lock_list)
+{
+       struct ksmbd_lock *lock;
+
+       lock = kzalloc(sizeof(struct ksmbd_lock), GFP_KERNEL);
+       if (!lock)
+               return NULL;
+
+       lock->cmd = cmd;
+       lock->fl = flock;
+       lock->start = flock->fl_start;
+       lock->end = flock->fl_end;
+       lock->flags = flags;
+       if (lock->start == lock->end)
+               lock->zero_len = 1;
+       INIT_LIST_HEAD(&lock->clist);
+       INIT_LIST_HEAD(&lock->flist);
+       INIT_LIST_HEAD(&lock->llist);
+       list_add_tail(&lock->llist, lock_list);
+
+       return lock;
+}
+
+static void smb2_remove_blocked_lock(void **argv)
+{
+       struct file_lock *flock = (struct file_lock *)argv[0];
+
+       ksmbd_vfs_posix_lock_unblock(flock);
+       wake_up(&flock->fl_wait);
+}
+
+static inline bool lock_defer_pending(struct file_lock *fl)
+{
+       /* check pending lock waiters */
+       return waitqueue_active(&fl->fl_wait);
+}
+
+/**
+ * smb2_lock() - handler for smb2 file lock command
+ * @work:      smb work containing lock command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_lock(struct ksmbd_work *work)
+{
+       struct smb2_lock_req *req = work->request_buf;
+       struct smb2_lock_rsp *rsp = work->response_buf;
+       struct smb2_lock_element *lock_ele;
+       struct ksmbd_file *fp = NULL;
+       struct file_lock *flock = NULL;
+       struct file *filp = NULL;
+       int lock_count;
+       int flags = 0;
+       int cmd = 0;
+       int err = -EIO, i, rc = 0;
+       u64 lock_start, lock_length;
+       struct ksmbd_lock *smb_lock = NULL, *cmp_lock, *tmp, *tmp2;
+       struct ksmbd_conn *conn;
+       int nolock = 0;
+       LIST_HEAD(lock_list);
+       LIST_HEAD(rollback_list);
+       int prior_lock = 0;
+
+       ksmbd_debug(SMB, "Received lock request\n");
+       fp = ksmbd_lookup_fd_slow(work,
+                                 le64_to_cpu(req->VolatileFileId),
+                                 le64_to_cpu(req->PersistentFileId));
+       if (!fp) {
+               ksmbd_debug(SMB, "Invalid file id for lock : %llu\n",
+                           le64_to_cpu(req->VolatileFileId));
+               err = -ENOENT;
+               goto out2;
+       }
+
+       filp = fp->filp;
+       lock_count = le16_to_cpu(req->LockCount);
+       lock_ele = req->locks;
+
+       ksmbd_debug(SMB, "lock count is %d\n", lock_count);
+       if (!lock_count) {
+               err = -EINVAL;
+               goto out2;
+       }
+
+       for (i = 0; i < lock_count; i++) {
+               flags = le32_to_cpu(lock_ele[i].Flags);
+
+               flock = smb_flock_init(filp);
+               if (!flock)
+                       goto out;
+
+               cmd = smb2_set_flock_flags(flock, flags);
+
+               lock_start = le64_to_cpu(lock_ele[i].Offset);
+               lock_length = le64_to_cpu(lock_ele[i].Length);
+               if (lock_start > U64_MAX - lock_length) {
+                       pr_err("Invalid lock range requested\n");
+                       rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
+                       goto out;
+               }
+
+               if (lock_start > OFFSET_MAX)
+                       flock->fl_start = OFFSET_MAX;
+               else
+                       flock->fl_start = lock_start;
+
+               lock_length = le64_to_cpu(lock_ele[i].Length);
+               if (lock_length > OFFSET_MAX - flock->fl_start)
+                       lock_length = OFFSET_MAX - flock->fl_start;
+
+               flock->fl_end = flock->fl_start + lock_length;
+
+               if (flock->fl_end < flock->fl_start) {
+                       ksmbd_debug(SMB,
+                                   "the end offset(%llx) is smaller than the start offset(%llx)\n",
+                                   flock->fl_end, flock->fl_start);
+                       rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
+                       goto out;
+               }
+
+               /* Check conflict locks in one request */
+               list_for_each_entry(cmp_lock, &lock_list, llist) {
+                       if (cmp_lock->fl->fl_start <= flock->fl_start &&
+                           cmp_lock->fl->fl_end >= flock->fl_end) {
+                               if (cmp_lock->fl->fl_type != F_UNLCK &&
+                                   flock->fl_type != F_UNLCK) {
+                                       pr_err("conflict two locks in one request\n");
+                                       err = -EINVAL;
+                                       goto out;
+                               }
+                       }
+               }
+
+               smb_lock = smb2_lock_init(flock, cmd, flags, &lock_list);
+               if (!smb_lock) {
+                       err = -EINVAL;
+                       goto out;
+               }
+       }
+
+       list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+               if (smb_lock->cmd < 0) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               if (!(smb_lock->flags & SMB2_LOCKFLAG_MASK)) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               if ((prior_lock & (SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_SHARED) &&
+                    smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) ||
+                   (prior_lock == SMB2_LOCKFLAG_UNLOCK &&
+                    !(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK))) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               prior_lock = smb_lock->flags;
+
+               if (!(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) &&
+                   !(smb_lock->flags & SMB2_LOCKFLAG_FAIL_IMMEDIATELY))
+                       goto no_check_cl;
+
+               nolock = 1;
+               /* check locks in connection list */
+               read_lock(&conn_list_lock);
+               list_for_each_entry(conn, &conn_list, conns_list) {
+                       spin_lock(&conn->llist_lock);
+                       list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+                               if (file_inode(cmp_lock->fl->fl_file) !=
+                                   file_inode(smb_lock->fl->fl_file))
+                                       continue;
+
+                               if (smb_lock->fl->fl_type == F_UNLCK) {
+                                       if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file &&
+                                           cmp_lock->start == smb_lock->start &&
+                                           cmp_lock->end == smb_lock->end &&
+                                           !lock_defer_pending(cmp_lock->fl)) {
+                                               nolock = 0;
+                                               list_del(&cmp_lock->flist);
+                                               list_del(&cmp_lock->clist);
+                                               spin_unlock(&conn->llist_lock);
+                                               read_unlock(&conn_list_lock);
+
+                                               locks_free_lock(cmp_lock->fl);
+                                               kfree(cmp_lock);
+                                               goto out_check_cl;
+                                       }
+                                       continue;
+                               }
+
+                               if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file) {
+                                       if (smb_lock->flags & SMB2_LOCKFLAG_SHARED)
+                                               continue;
+                               } else {
+                                       if (cmp_lock->flags & SMB2_LOCKFLAG_SHARED)
+                                               continue;
+                               }
+
+                               /* check zero byte lock range */
+                               if (cmp_lock->zero_len && !smb_lock->zero_len &&
+                                   cmp_lock->start > smb_lock->start &&
+                                   cmp_lock->start < smb_lock->end) {
+                                       spin_unlock(&conn->llist_lock);
+                                       read_unlock(&conn_list_lock);
+                                       pr_err("previous lock conflict with zero byte lock range\n");
+                                       goto out;
+                               }
+
+                               if (smb_lock->zero_len && !cmp_lock->zero_len &&
+                                   smb_lock->start > cmp_lock->start &&
+                                   smb_lock->start < cmp_lock->end) {
+                                       spin_unlock(&conn->llist_lock);
+                                       read_unlock(&conn_list_lock);
+                                       pr_err("current lock conflict with zero byte lock range\n");
+                                       goto out;
+                               }
+
+                               if (((cmp_lock->start <= smb_lock->start &&
+                                     cmp_lock->end > smb_lock->start) ||
+                                    (cmp_lock->start < smb_lock->end &&
+                                     cmp_lock->end >= smb_lock->end)) &&
+                                   !cmp_lock->zero_len && !smb_lock->zero_len) {
+                                       spin_unlock(&conn->llist_lock);
+                                       read_unlock(&conn_list_lock);
+                                       pr_err("Not allow lock operation on exclusive lock range\n");
+                                       goto out;
+                               }
+                       }
+                       spin_unlock(&conn->llist_lock);
+               }
+               read_unlock(&conn_list_lock);
+out_check_cl:
+               if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
+                       pr_err("Try to unlock nolocked range\n");
+                       rsp->hdr.Status = STATUS_RANGE_NOT_LOCKED;
+                       goto out;
+               }
+
+no_check_cl:
+               if (smb_lock->zero_len) {
+                       err = 0;
+                       goto skip;
+               }
+
+               flock = smb_lock->fl;
+               list_del(&smb_lock->llist);
+retry:
+               rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+skip:
+               if (flags & SMB2_LOCKFLAG_UNLOCK) {
+                       if (!rc) {
+                               ksmbd_debug(SMB, "File unlocked\n");
+                       } else if (rc == -ENOENT) {
+                               rsp->hdr.Status = STATUS_NOT_LOCKED;
+                               goto out;
+                       }
+                       locks_free_lock(flock);
+                       kfree(smb_lock);
+               } else {
+                       if (rc == FILE_LOCK_DEFERRED) {
+                               void **argv;
+
+                               ksmbd_debug(SMB,
+                                           "would have to wait for getting lock\n");
+                               spin_lock(&work->conn->llist_lock);
+                               list_add_tail(&smb_lock->clist,
+                                             &work->conn->lock_list);
+                               spin_unlock(&work->conn->llist_lock);
+                               list_add(&smb_lock->llist, &rollback_list);
+
+                               argv = kmalloc(sizeof(void *), GFP_KERNEL);
+                               if (!argv) {
+                                       err = -ENOMEM;
+                                       goto out;
+                               }
+                               argv[0] = flock;
+
+                               rc = setup_async_work(work,
+                                                     smb2_remove_blocked_lock,
+                                                     argv);
+                               if (rc) {
+                                       err = -ENOMEM;
+                                       goto out;
+                               }
+                               spin_lock(&fp->f_lock);
+                               list_add(&work->fp_entry, &fp->blocked_works);
+                               spin_unlock(&fp->f_lock);
+
+                               smb2_send_interim_resp(work, STATUS_PENDING);
+
+                               ksmbd_vfs_posix_lock_wait(flock);
+
+                               if (work->state != KSMBD_WORK_ACTIVE) {
+                                       list_del(&smb_lock->llist);
+                                       spin_lock(&work->conn->llist_lock);
+                                       list_del(&smb_lock->clist);
+                                       spin_unlock(&work->conn->llist_lock);
+                                       locks_free_lock(flock);
+
+                                       if (work->state == KSMBD_WORK_CANCELLED) {
+                                               spin_lock(&fp->f_lock);
+                                               list_del(&work->fp_entry);
+                                               spin_unlock(&fp->f_lock);
+                                               rsp->hdr.Status =
+                                                       STATUS_CANCELLED;
+                                               kfree(smb_lock);
+                                               smb2_send_interim_resp(work,
+                                                                      STATUS_CANCELLED);
+                                               work->send_no_response = 1;
+                                               goto out;
+                                       }
+                                       init_smb2_rsp_hdr(work);
+                                       smb2_set_err_rsp(work);
+                                       rsp->hdr.Status =
+                                               STATUS_RANGE_NOT_LOCKED;
+                                       kfree(smb_lock);
+                                       goto out2;
+                               }
+
+                               list_del(&smb_lock->llist);
+                               spin_lock(&work->conn->llist_lock);
+                               list_del(&smb_lock->clist);
+                               spin_unlock(&work->conn->llist_lock);
+
+                               spin_lock(&fp->f_lock);
+                               list_del(&work->fp_entry);
+                               spin_unlock(&fp->f_lock);
+                               goto retry;
+                       } else if (!rc) {
+                               spin_lock(&work->conn->llist_lock);
+                               list_add_tail(&smb_lock->clist,
+                                             &work->conn->lock_list);
+                               list_add_tail(&smb_lock->flist,
+                                             &fp->lock_list);
+                               spin_unlock(&work->conn->llist_lock);
+                               list_add(&smb_lock->llist, &rollback_list);
+                               ksmbd_debug(SMB, "successful in taking lock\n");
+                       } else {
+                               goto out;
+                       }
+               }
+       }
+
+       if (atomic_read(&fp->f_ci->op_count) > 1)
+               smb_break_all_oplock(work, fp);
+
+       rsp->StructureSize = cpu_to_le16(4);
+       ksmbd_debug(SMB, "successful in taking lock\n");
+       rsp->hdr.Status = STATUS_SUCCESS;
+       rsp->Reserved = 0;
+       inc_rfc1001_len(rsp, 4);
+       ksmbd_fd_put(work, fp);
+       return 0;
+
+out:
+       list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+               locks_free_lock(smb_lock->fl);
+               list_del(&smb_lock->llist);
+               kfree(smb_lock);
+       }
+
+       list_for_each_entry_safe(smb_lock, tmp, &rollback_list, llist) {
+               struct file_lock *rlock = NULL;
+
+               rlock = smb_flock_init(filp);
+               rlock->fl_type = F_UNLCK;
+               rlock->fl_start = smb_lock->start;
+               rlock->fl_end = smb_lock->end;
+
+               rc = vfs_lock_file(filp, 0, rlock, NULL);
+               if (rc)
+                       pr_err("rollback unlock fail : %d\n", rc);
+
+               list_del(&smb_lock->llist);
+               spin_lock(&work->conn->llist_lock);
+               if (!list_empty(&smb_lock->flist))
+                       list_del(&smb_lock->flist);
+               list_del(&smb_lock->clist);
+               spin_unlock(&work->conn->llist_lock);
+
+               locks_free_lock(smb_lock->fl);
+               locks_free_lock(rlock);
+               kfree(smb_lock);
+       }
+out2:
+       ksmbd_debug(SMB, "failed in taking lock(flags : %x), err : %d\n", flags, err);
+
+       if (!rsp->hdr.Status) {
+               if (err == -EINVAL)
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               else if (err == -ENOMEM)
+                       rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+               else if (err == -ENOENT)
+                       rsp->hdr.Status = STATUS_FILE_CLOSED;
+               else
+                       rsp->hdr.Status = STATUS_LOCK_NOT_GRANTED;
+       }
+
+       smb2_set_err_rsp(work);
+       ksmbd_fd_put(work, fp);
+       return err;
+}
+
+static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
+                          struct smb2_ioctl_rsp *rsp)
+{
+       struct copychunk_ioctl_req *ci_req;
+       struct copychunk_ioctl_rsp *ci_rsp;
+       struct ksmbd_file *src_fp = NULL, *dst_fp = NULL;
+       struct srv_copychunk *chunks;
+       unsigned int i, chunk_count, chunk_count_written = 0;
+       unsigned int chunk_size_written = 0;
+       loff_t total_size_written = 0;
+       int ret, cnt_code;
+
+       cnt_code = le32_to_cpu(req->CntCode);
+       ci_req = (struct copychunk_ioctl_req *)&req->Buffer[0];
+       ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
+
+       rsp->VolatileFileId = req->VolatileFileId;
+       rsp->PersistentFileId = req->PersistentFileId;
+       ci_rsp->ChunksWritten =
+               cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
+       ci_rsp->ChunkBytesWritten =
+               cpu_to_le32(ksmbd_server_side_copy_max_chunk_size());
+       ci_rsp->TotalBytesWritten =
+               cpu_to_le32(ksmbd_server_side_copy_max_total_size());
+
+       chunks = (struct srv_copychunk *)&ci_req->Chunks[0];
+       chunk_count = le32_to_cpu(ci_req->ChunkCount);
+       total_size_written = 0;
+
+       /* verify the SRV_COPYCHUNK_COPY packet */
+       if (chunk_count > ksmbd_server_side_copy_max_chunk_count() ||
+           le32_to_cpu(req->InputCount) <
+            offsetof(struct copychunk_ioctl_req, Chunks) +
+            chunk_count * sizeof(struct srv_copychunk)) {
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               return -EINVAL;
+       }
+
+       for (i = 0; i < chunk_count; i++) {
+               if (le32_to_cpu(chunks[i].Length) == 0 ||
+                   le32_to_cpu(chunks[i].Length) > ksmbd_server_side_copy_max_chunk_size())
+                       break;
+               total_size_written += le32_to_cpu(chunks[i].Length);
+       }
+
+       if (i < chunk_count ||
+           total_size_written > ksmbd_server_side_copy_max_total_size()) {
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               return -EINVAL;
+       }
+
+       src_fp = ksmbd_lookup_foreign_fd(work,
+                                        le64_to_cpu(ci_req->ResumeKey[0]));
+       dst_fp = ksmbd_lookup_fd_slow(work,
+                                     le64_to_cpu(req->VolatileFileId),
+                                     le64_to_cpu(req->PersistentFileId));
+       ret = -EINVAL;
+       if (!src_fp ||
+           src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) {
+               rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
+               goto out;
+       }
+
+       if (!dst_fp) {
+               rsp->hdr.Status = STATUS_FILE_CLOSED;
+               goto out;
+       }
+
+       /*
+        * FILE_READ_DATA should only be included in
+        * the FSCTL_COPYCHUNK case
+        */
+       if (cnt_code == FSCTL_COPYCHUNK &&
+           !(dst_fp->daccess & (FILE_READ_DATA_LE | FILE_GENERIC_READ_LE))) {
+               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+               goto out;
+       }
+
+       ret = ksmbd_vfs_copy_file_ranges(work, src_fp, dst_fp,
+                                        chunks, chunk_count,
+                                        &chunk_count_written,
+                                        &chunk_size_written,
+                                        &total_size_written);
+       if (ret < 0) {
+               if (ret == -EACCES)
+                       rsp->hdr.Status = STATUS_ACCESS_DENIED;
+               if (ret == -EAGAIN)
+                       rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+               else if (ret == -EBADF)
+                       rsp->hdr.Status = STATUS_INVALID_HANDLE;
+               else if (ret == -EFBIG || ret == -ENOSPC)
+                       rsp->hdr.Status = STATUS_DISK_FULL;
+               else if (ret == -EINVAL)
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               else if (ret == -EISDIR)
+                       rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
+               else if (ret == -E2BIG)
+                       rsp->hdr.Status = STATUS_INVALID_VIEW_SIZE;
+               else
+                       rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+       }
+
+       ci_rsp->ChunksWritten = cpu_to_le32(chunk_count_written);
+       ci_rsp->ChunkBytesWritten = cpu_to_le32(chunk_size_written);
+       ci_rsp->TotalBytesWritten = cpu_to_le32(total_size_written);
+out:
+       ksmbd_fd_put(work, src_fp);
+       ksmbd_fd_put(work, dst_fp);
+       return ret;
+}
+
+static __be32 idev_ipv4_address(struct in_device *idev)
+{
+       __be32 addr = 0;
+
+       struct in_ifaddr *ifa;
+
+       rcu_read_lock();
+       in_dev_for_each_ifa_rcu(ifa, idev) {
+               if (ifa->ifa_flags & IFA_F_SECONDARY)
+                       continue;
+
+               addr = ifa->ifa_address;
+               break;
+       }
+       rcu_read_unlock();
+       return addr;
+}
+
+static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
+                                       struct smb2_ioctl_req *req,
+                                       struct smb2_ioctl_rsp *rsp)
+{
+       struct network_interface_info_ioctl_rsp *nii_rsp = NULL;
+       int nbytes = 0;
+       struct net_device *netdev;
+       struct sockaddr_storage_rsp *sockaddr_storage;
+       unsigned int flags;
+       unsigned long long speed;
+       struct sockaddr_in6 *csin6 = (struct sockaddr_in6 *)&conn->peer_addr;
+
+       rtnl_lock();
+       for_each_netdev(&init_net, netdev) {
+               if (netdev->type == ARPHRD_LOOPBACK)
+                       continue;
+
+               flags = dev_get_flags(netdev);
+               if (!(flags & IFF_RUNNING))
+                       continue;
+
+               nii_rsp = (struct network_interface_info_ioctl_rsp *)
+                               &rsp->Buffer[nbytes];
+               nii_rsp->IfIndex = cpu_to_le32(netdev->ifindex);
+
+               nii_rsp->Capability = 0;
+               if (ksmbd_rdma_capable_netdev(netdev))
+                       nii_rsp->Capability |= cpu_to_le32(RDMA_CAPABLE);
+
+               nii_rsp->Next = cpu_to_le32(152);
+               nii_rsp->Reserved = 0;
+
+               if (netdev->ethtool_ops->get_link_ksettings) {
+                       struct ethtool_link_ksettings cmd;
+
+                       netdev->ethtool_ops->get_link_ksettings(netdev, &cmd);
+                       speed = cmd.base.speed;
+               } else {
+                       pr_err("%s %s\n", netdev->name,
+                              "speed is unknown, defaulting to 1Gb/sec");
+                       speed = SPEED_1000;
+               }
+
+               speed *= 1000000;
+               nii_rsp->LinkSpeed = cpu_to_le64(speed);
+
+               sockaddr_storage = (struct sockaddr_storage_rsp *)
+                                       nii_rsp->SockAddr_Storage;
+               memset(sockaddr_storage, 0, 128);
+
+               if (conn->peer_addr.ss_family == PF_INET ||
+                   ipv6_addr_v4mapped(&csin6->sin6_addr)) {
+                       struct in_device *idev;
+
+                       sockaddr_storage->Family = cpu_to_le16(INTERNETWORK);
+                       sockaddr_storage->addr4.Port = 0;
+
+                       idev = __in_dev_get_rtnl(netdev);
+                       if (!idev)
+                               continue;
+                       sockaddr_storage->addr4.IPv4address =
+                                               idev_ipv4_address(idev);
+               } else {
+                       struct inet6_dev *idev6;
+                       struct inet6_ifaddr *ifa;
+                       __u8 *ipv6_addr = sockaddr_storage->addr6.IPv6address;
+
+                       sockaddr_storage->Family = cpu_to_le16(INTERNETWORKV6);
+                       sockaddr_storage->addr6.Port = 0;
+                       sockaddr_storage->addr6.FlowInfo = 0;
+
+                       idev6 = __in6_dev_get(netdev);
+                       if (!idev6)
+                               continue;
+
+                       list_for_each_entry(ifa, &idev6->addr_list, if_list) {
+                               if (ifa->flags & (IFA_F_TENTATIVE |
+                                                       IFA_F_DEPRECATED))
+                                       continue;
+                               memcpy(ipv6_addr, ifa->addr.s6_addr, 16);
+                               break;
+                       }
+                       sockaddr_storage->addr6.ScopeId = 0;
+               }
+
+               nbytes += sizeof(struct network_interface_info_ioctl_rsp);
+       }
+       rtnl_unlock();
+
+       /* zero if this is last one */
+       if (nii_rsp)
+               nii_rsp->Next = 0;
+
+       if (!nbytes) {
+               rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
+               return -EINVAL;
+       }
+
+       rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
+       rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+       return nbytes;
+}
+
+static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
+                                        struct validate_negotiate_info_req *neg_req,
+                                        struct validate_negotiate_info_rsp *neg_rsp)
+{
+       int ret = 0;
+       int dialect;
+
+       dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects,
+                                            neg_req->DialectCount);
+       if (dialect == BAD_PROT_ID || dialect != conn->dialect) {
+               ret = -EINVAL;
+               goto err_out;
+       }
+
+       if (strncmp(neg_req->Guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE)) {
+               ret = -EINVAL;
+               goto err_out;
+       }
+
+       if (le16_to_cpu(neg_req->SecurityMode) != conn->cli_sec_mode) {
+               ret = -EINVAL;
+               goto err_out;
+       }
+
+       if (le32_to_cpu(neg_req->Capabilities) != conn->cli_cap) {
+               ret = -EINVAL;
+               goto err_out;
+       }
+
+       neg_rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
+       memset(neg_rsp->Guid, 0, SMB2_CLIENT_GUID_SIZE);
+       neg_rsp->SecurityMode = cpu_to_le16(conn->srv_sec_mode);
+       neg_rsp->Dialect = cpu_to_le16(conn->dialect);
+err_out:
+       return ret;
+}
+
+static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
+                                       struct file_allocated_range_buffer *qar_req,
+                                       struct file_allocated_range_buffer *qar_rsp,
+                                       int in_count, int *out_count)
+{
+       struct ksmbd_file *fp;
+       loff_t start, length;
+       int ret = 0;
+
+       *out_count = 0;
+       if (in_count == 0)
+               return -EINVAL;
+
+       fp = ksmbd_lookup_fd_fast(work, id);
+       if (!fp)
+               return -ENOENT;
+
+       start = le64_to_cpu(qar_req->file_offset);
+       length = le64_to_cpu(qar_req->length);
+
+       ret = ksmbd_vfs_fqar_lseek(fp, start, length,
+                                  qar_rsp, in_count, out_count);
+       if (ret && ret != -E2BIG)
+               *out_count = 0;
+
+       ksmbd_fd_put(work, fp);
+       return ret;
+}
+
+static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
+                                int out_buf_len, struct smb2_ioctl_req *req,
+                                struct smb2_ioctl_rsp *rsp)
+{
+       struct ksmbd_rpc_command *rpc_resp;
+       char *data_buf = (char *)&req->Buffer[0];
+       int nbytes = 0;
+
+       rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
+                                  le32_to_cpu(req->InputCount));
+       if (rpc_resp) {
+               if (rpc_resp->flags == KSMBD_RPC_SOME_NOT_MAPPED) {
+                       /*
+                        * set STATUS_SOME_NOT_MAPPED response
+                        * for unknown domain sid.
+                        */
+                       rsp->hdr.Status = STATUS_SOME_NOT_MAPPED;
+               } else if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+                       rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+                       goto out;
+               } else if (rpc_resp->flags != KSMBD_RPC_OK) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       goto out;
+               }
+
+               nbytes = rpc_resp->payload_sz;
+               if (rpc_resp->payload_sz > out_buf_len) {
+                       rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
+                       nbytes = out_buf_len;
+               }
+
+               if (!rpc_resp->payload_sz) {
+                       rsp->hdr.Status =
+                               STATUS_UNEXPECTED_IO_ERROR;
+                       goto out;
+               }
+
+               memcpy((char *)rsp->Buffer, rpc_resp->payload, nbytes);
+       }
+out:
+       kvfree(rpc_resp);
+       return nbytes;
+}
+
+static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
+                                  struct file_sparse *sparse)
+{
+       struct ksmbd_file *fp;
+       struct user_namespace *user_ns;
+       int ret = 0;
+       __le32 old_fattr;
+
+       fp = ksmbd_lookup_fd_fast(work, id);
+       if (!fp)
+               return -ENOENT;
+       user_ns = file_mnt_user_ns(fp->filp);
+
+       old_fattr = fp->f_ci->m_fattr;
+       if (sparse->SetSparse)
+               fp->f_ci->m_fattr |= ATTR_SPARSE_FILE_LE;
+       else
+               fp->f_ci->m_fattr &= ~ATTR_SPARSE_FILE_LE;
+
+       if (fp->f_ci->m_fattr != old_fattr &&
+           test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+               struct xattr_dos_attrib da;
+
+               ret = ksmbd_vfs_get_dos_attrib_xattr(user_ns,
+                                                    fp->filp->f_path.dentry, &da);
+               if (ret <= 0)
+                       goto out;
+
+               da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+               ret = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
+                                                    fp->filp->f_path.dentry, &da);
+               if (ret)
+                       fp->f_ci->m_fattr = old_fattr;
+       }
+
+out:
+       ksmbd_fd_put(work, fp);
+       return ret;
+}
+
+static int fsctl_request_resume_key(struct ksmbd_work *work,
+                                   struct smb2_ioctl_req *req,
+                                   struct resume_key_ioctl_rsp *key_rsp)
+{
+       struct ksmbd_file *fp;
+
+       fp = ksmbd_lookup_fd_slow(work,
+                                 le64_to_cpu(req->VolatileFileId),
+                                 le64_to_cpu(req->PersistentFileId));
+       if (!fp)
+               return -ENOENT;
+
+       memset(key_rsp, 0, sizeof(*key_rsp));
+       key_rsp->ResumeKey[0] = req->VolatileFileId;
+       key_rsp->ResumeKey[1] = req->PersistentFileId;
+       ksmbd_fd_put(work, fp);
+
+       return 0;
+}
+
+/**
+ * smb2_ioctl() - handler for smb2 ioctl command
+ * @work:      smb work containing ioctl command buffer
+ *
+ * Return:     0 on success, otherwise error
+ */
+int smb2_ioctl(struct ksmbd_work *work)
+{
+       struct smb2_ioctl_req *req;
+       struct smb2_ioctl_rsp *rsp, *rsp_org;
+       int cnt_code, nbytes = 0;
+       int out_buf_len;
+       u64 id = KSMBD_NO_FID;
+       struct ksmbd_conn *conn = work->conn;
+       int ret = 0;
+
+       rsp_org = work->response_buf;
+       if (work->next_smb2_rcv_hdr_off) {
+               req = ksmbd_req_buf_next(work);
+               rsp = ksmbd_resp_buf_next(work);
+               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+                       ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+                                   work->compound_fid);
+                       id = work->compound_fid;
+               }
+       } else {
+               req = work->request_buf;
+               rsp = work->response_buf;
+       }
+
+       if (!has_file_id(id))
+               id = le64_to_cpu(req->VolatileFileId);
+
+       if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) {
+               rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+
+       cnt_code = le32_to_cpu(req->CntCode);
+       out_buf_len = le32_to_cpu(req->MaxOutputResponse);
+       out_buf_len = min(KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
+
+       switch (cnt_code) {
+       case FSCTL_DFS_GET_REFERRALS:
+       case FSCTL_DFS_GET_REFERRALS_EX:
+               /* Not support DFS yet */
+               rsp->hdr.Status = STATUS_FS_DRIVER_REQUIRED;
+               goto out;
+       case FSCTL_CREATE_OR_GET_OBJECT_ID:
+       {
+               struct file_object_buf_type1_ioctl_rsp *obj_buf;
+
+               nbytes = sizeof(struct file_object_buf_type1_ioctl_rsp);
+               obj_buf = (struct file_object_buf_type1_ioctl_rsp *)
+                       &rsp->Buffer[0];
+
+               /*
+                * TODO: This is dummy implementation to pass smbtorture
+                * Need to check correct response later
+                */
+               memset(obj_buf->ObjectId, 0x0, 16);
+               memset(obj_buf->BirthVolumeId, 0x0, 16);
+               memset(obj_buf->BirthObjectId, 0x0, 16);
+               memset(obj_buf->DomainId, 0x0, 16);
+
+               break;
+       }
+       case FSCTL_PIPE_TRANSCEIVE:
+               nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
+               break;
+       case FSCTL_VALIDATE_NEGOTIATE_INFO:
+               if (conn->dialect < SMB30_PROT_ID) {
+                       ret = -EOPNOTSUPP;
+                       goto out;
+               }
+
+               ret = fsctl_validate_negotiate_info(conn,
+                       (struct validate_negotiate_info_req *)&req->Buffer[0],
+                       (struct validate_negotiate_info_rsp *)&rsp->Buffer[0]);
+               if (ret < 0)
+                       goto out;
+
+               nbytes = sizeof(struct validate_negotiate_info_rsp);
+               rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
+               rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+               break;
+       case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
+               nbytes = fsctl_query_iface_info_ioctl(conn, req, rsp);
+               if (nbytes < 0)
+                       goto out;
+               break;
+       case FSCTL_REQUEST_RESUME_KEY:
+               if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               ret = fsctl_request_resume_key(work, req,
+                                              (struct resume_key_ioctl_rsp *)&rsp->Buffer[0]);
+               if (ret < 0)
+                       goto out;
+               rsp->PersistentFileId = req->PersistentFileId;
+               rsp->VolatileFileId = req->VolatileFileId;
+               nbytes = sizeof(struct resume_key_ioctl_rsp);
+               break;
+       case FSCTL_COPYCHUNK:
+       case FSCTL_COPYCHUNK_WRITE:
+               if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+                       ksmbd_debug(SMB,
+                                   "User does not have write permission\n");
+                       ret = -EACCES;
+                       goto out;
+               }
+
+               if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               nbytes = sizeof(struct copychunk_ioctl_rsp);
+               fsctl_copychunk(work, req, rsp);
+               break;
+       case FSCTL_SET_SPARSE:
+               ret = fsctl_set_sparse(work, id,
+                                      (struct file_sparse *)&req->Buffer[0]);
+               if (ret < 0)
+                       goto out;
+               break;
+       case FSCTL_SET_ZERO_DATA:
+       {
+               struct file_zero_data_information *zero_data;
+               struct ksmbd_file *fp;
+               loff_t off, len;
+
+               if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+                       ksmbd_debug(SMB,
+                                   "User does not have write permission\n");
+                       ret = -EACCES;
+                       goto out;
+               }
+
+               zero_data =
+                       (struct file_zero_data_information *)&req->Buffer[0];
+
+               fp = ksmbd_lookup_fd_fast(work, id);
+               if (!fp) {
+                       ret = -ENOENT;
+                       goto out;
+               }
+
+               off = le64_to_cpu(zero_data->FileOffset);
+               len = le64_to_cpu(zero_data->BeyondFinalZero) - off;
+
+               ret = ksmbd_vfs_zero_data(work, fp, off, len);
+               ksmbd_fd_put(work, fp);
+               if (ret < 0)
+                       goto out;
+               break;
+       }
+       case FSCTL_QUERY_ALLOCATED_RANGES:
+               ret = fsctl_query_allocated_ranges(work, id,
+                       (struct file_allocated_range_buffer *)&req->Buffer[0],
+                       (struct file_allocated_range_buffer *)&rsp->Buffer[0],
+                       out_buf_len /
+                       sizeof(struct file_allocated_range_buffer), &nbytes);
+               if (ret == -E2BIG) {
+                       rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
+               } else if (ret < 0) {
+                       nbytes = 0;
+                       goto out;
+               }
+
+               nbytes *= sizeof(struct file_allocated_range_buffer);
+               break;
+       case FSCTL_GET_REPARSE_POINT:
+       {
+               struct reparse_data_buffer *reparse_ptr;
+               struct ksmbd_file *fp;
+
+               reparse_ptr = (struct reparse_data_buffer *)&rsp->Buffer[0];
+               fp = ksmbd_lookup_fd_fast(work, id);
+               if (!fp) {
+                       pr_err("not found fp!!\n");
+                       ret = -ENOENT;
+                       goto out;
+               }
+
+               reparse_ptr->ReparseTag =
+                       smb2_get_reparse_tag_special_file(file_inode(fp->filp)->i_mode);
+               reparse_ptr->ReparseDataLength = 0;
+               ksmbd_fd_put(work, fp);
+               nbytes = sizeof(struct reparse_data_buffer);
+               break;
+       }
+       case FSCTL_DUPLICATE_EXTENTS_TO_FILE:
+       {
+               struct ksmbd_file *fp_in, *fp_out = NULL;
+               struct duplicate_extents_to_file *dup_ext;
+               loff_t src_off, dst_off, length, cloned;
+
+               dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
+
+               fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
+                                            dup_ext->PersistentFileHandle);
+               if (!fp_in) {
+                       pr_err("not found file handle in duplicate extent to file\n");
+                       ret = -ENOENT;
+                       goto out;
+               }
+
+               fp_out = ksmbd_lookup_fd_fast(work, id);
+               if (!fp_out) {
+                       pr_err("not found fp\n");
+                       ret = -ENOENT;
+                       goto dup_ext_out;
+               }
+
+               src_off = le64_to_cpu(dup_ext->SourceFileOffset);
+               dst_off = le64_to_cpu(dup_ext->TargetFileOffset);
+               length = le64_to_cpu(dup_ext->ByteCount);
+               cloned = vfs_clone_file_range(fp_in->filp, src_off, fp_out->filp,
+                                             dst_off, length, 0);
+               if (cloned == -EXDEV || cloned == -EOPNOTSUPP) {
+                       ret = -EOPNOTSUPP;
+                       goto dup_ext_out;
+               } else if (cloned != length) {
+                       cloned = vfs_copy_file_range(fp_in->filp, src_off,
+                                                    fp_out->filp, dst_off, length, 0);
+                       if (cloned != length) {
+                               if (cloned < 0)
+                                       ret = cloned;
+                               else
+                                       ret = -EINVAL;
+                       }
+               }
+
+dup_ext_out:
+               ksmbd_fd_put(work, fp_in);
+               ksmbd_fd_put(work, fp_out);
+               if (ret < 0)
+                       goto out;
+               break;
+       }
+       default:
+               ksmbd_debug(SMB, "not implemented yet ioctl command 0x%x\n",
+                           cnt_code);
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       rsp->CntCode = cpu_to_le32(cnt_code);
+       rsp->InputCount = cpu_to_le32(0);
+       rsp->InputOffset = cpu_to_le32(112);
+       rsp->OutputOffset = cpu_to_le32(112);
+       rsp->OutputCount = cpu_to_le32(nbytes);
+       rsp->StructureSize = cpu_to_le16(49);
+       rsp->Reserved = cpu_to_le16(0);
+       rsp->Flags = cpu_to_le32(0);
+       rsp->Reserved2 = cpu_to_le32(0);
+       inc_rfc1001_len(rsp_org, 48 + nbytes);
+
+       return 0;
+
+out:
+       if (ret == -EACCES)
+               rsp->hdr.Status = STATUS_ACCESS_DENIED;
+       else if (ret == -ENOENT)
+               rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
+       else if (ret == -EOPNOTSUPP)
+               rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+       else if (ret < 0 || rsp->hdr.Status == 0)
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+       smb2_set_err_rsp(work);
+       return 0;
+}
+
+/**
+ * smb20_oplock_break_ack() - handler for smb2.0 oplock break command
+ * @work:      smb work containing oplock break command buffer
+ *
+ * Return:     0
+ */
+static void smb20_oplock_break_ack(struct ksmbd_work *work)
+{
+       struct smb2_oplock_break *req = work->request_buf;
+       struct smb2_oplock_break *rsp = work->response_buf;
+       struct ksmbd_file *fp;
+       struct oplock_info *opinfo = NULL;
+       __le32 err = 0;
+       int ret = 0;
+       u64 volatile_id, persistent_id;
+       char req_oplevel = 0, rsp_oplevel = 0;
+       unsigned int oplock_change_type;
+
+       volatile_id = le64_to_cpu(req->VolatileFid);
+       persistent_id = le64_to_cpu(req->PersistentFid);
+       req_oplevel = req->OplockLevel;
+       ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n",
+                   volatile_id, persistent_id, req_oplevel);
+
+       fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
+       if (!fp) {
+               rsp->hdr.Status = STATUS_FILE_CLOSED;
+               smb2_set_err_rsp(work);
+               return;
+       }
+
+       opinfo = opinfo_get(fp);
+       if (!opinfo) {
+               pr_err("unexpected null oplock_info\n");
+               rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL;
+               smb2_set_err_rsp(work);
+               ksmbd_fd_put(work, fp);
+               return;
+       }
+
+       if (opinfo->level == SMB2_OPLOCK_LEVEL_NONE) {
+               rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL;
+               goto err_out;
+       }
+
+       if (opinfo->op_state == OPLOCK_STATE_NONE) {
+               ksmbd_debug(SMB, "unexpected oplock state 0x%x\n", opinfo->op_state);
+               rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+               goto err_out;
+       }
+
+       if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
+            opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
+           (req_oplevel != SMB2_OPLOCK_LEVEL_II &&
+            req_oplevel != SMB2_OPLOCK_LEVEL_NONE)) {
+               err = STATUS_INVALID_OPLOCK_PROTOCOL;
+               oplock_change_type = OPLOCK_WRITE_TO_NONE;
+       } else if (opinfo->level == SMB2_OPLOCK_LEVEL_II &&
+                  req_oplevel != SMB2_OPLOCK_LEVEL_NONE) {
+               err = STATUS_INVALID_OPLOCK_PROTOCOL;
+               oplock_change_type = OPLOCK_READ_TO_NONE;
+       } else if (req_oplevel == SMB2_OPLOCK_LEVEL_II ||
+                  req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
+               err = STATUS_INVALID_DEVICE_STATE;
+               if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
+                    opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
+                   req_oplevel == SMB2_OPLOCK_LEVEL_II) {
+                       oplock_change_type = OPLOCK_WRITE_TO_READ;
+               } else if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
+                           opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
+                          req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
+                       oplock_change_type = OPLOCK_WRITE_TO_NONE;
+               } else if (opinfo->level == SMB2_OPLOCK_LEVEL_II &&
+                          req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
+                       oplock_change_type = OPLOCK_READ_TO_NONE;
+               } else {
+                       oplock_change_type = 0;
+               }
+       } else {
+               oplock_change_type = 0;
+       }
+
+       switch (oplock_change_type) {
+       case OPLOCK_WRITE_TO_READ:
+               ret = opinfo_write_to_read(opinfo);
+               rsp_oplevel = SMB2_OPLOCK_LEVEL_II;
+               break;
+       case OPLOCK_WRITE_TO_NONE:
+               ret = opinfo_write_to_none(opinfo);
+               rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE;
+               break;
+       case OPLOCK_READ_TO_NONE:
+               ret = opinfo_read_to_none(opinfo);
+               rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE;
+               break;
+       default:
+               pr_err("unknown oplock change 0x%x -> 0x%x\n",
+                      opinfo->level, rsp_oplevel);
+       }
+
+       if (ret < 0) {
+               rsp->hdr.Status = err;
+               goto err_out;
+       }
+
+       opinfo_put(opinfo);
+       ksmbd_fd_put(work, fp);
+       opinfo->op_state = OPLOCK_STATE_NONE;
+       wake_up_interruptible_all(&opinfo->oplock_q);
+
+       rsp->StructureSize = cpu_to_le16(24);
+       rsp->OplockLevel = rsp_oplevel;
+       rsp->Reserved = 0;
+       rsp->Reserved2 = 0;
+       rsp->VolatileFid = cpu_to_le64(volatile_id);
+       rsp->PersistentFid = cpu_to_le64(persistent_id);
+       inc_rfc1001_len(rsp, 24);
+       return;
+
+err_out:
+       opinfo->op_state = OPLOCK_STATE_NONE;
+       wake_up_interruptible_all(&opinfo->oplock_q);
+
+       opinfo_put(opinfo);
+       ksmbd_fd_put(work, fp);
+       smb2_set_err_rsp(work);
+}
+
+static int check_lease_state(struct lease *lease, __le32 req_state)
+{
+       if ((lease->new_state ==
+            (SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)) &&
+           !(req_state & SMB2_LEASE_WRITE_CACHING_LE)) {
+               lease->new_state = req_state;
+               return 0;
+       }
+
+       if (lease->new_state == req_state)
+               return 0;
+
+       return 1;
+}
+
+/**
+ * smb21_lease_break_ack() - handler for smb2.1 lease break command
+ * @work:      smb work containing lease break command buffer
+ *
+ * Return:     0
+ */
+static void smb21_lease_break_ack(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_lease_ack *req = work->request_buf;
+       struct smb2_lease_ack *rsp = work->response_buf;
+       struct oplock_info *opinfo;
+       __le32 err = 0;
+       int ret = 0;
+       unsigned int lease_change_type;
+       __le32 lease_state;
+       struct lease *lease;
+
+       ksmbd_debug(OPLOCK, "smb21 lease break, lease state(0x%x)\n",
+                   le32_to_cpu(req->LeaseState));
+       opinfo = lookup_lease_in_table(conn, req->LeaseKey);
+       if (!opinfo) {
+               ksmbd_debug(OPLOCK, "file not opened\n");
+               smb2_set_err_rsp(work);
+               rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+               return;
+       }
+       lease = opinfo->o_lease;
+
+       if (opinfo->op_state == OPLOCK_STATE_NONE) {
+               pr_err("unexpected lease break state 0x%x\n",
+                      opinfo->op_state);
+               rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+               goto err_out;
+       }
+
+       if (check_lease_state(lease, req->LeaseState)) {
+               rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED;
+               ksmbd_debug(OPLOCK,
+                           "req lease state: 0x%x, expected state: 0x%x\n",
+                           req->LeaseState, lease->new_state);
+               goto err_out;
+       }
+
+       if (!atomic_read(&opinfo->breaking_cnt)) {
+               rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+               goto err_out;
+       }
+
+       /* check for bad lease state */
+       if (req->LeaseState &
+           (~(SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE))) {
+               err = STATUS_INVALID_OPLOCK_PROTOCOL;
+               if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+                       lease_change_type = OPLOCK_WRITE_TO_NONE;
+               else
+                       lease_change_type = OPLOCK_READ_TO_NONE;
+               ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n",
+                           le32_to_cpu(lease->state),
+                           le32_to_cpu(req->LeaseState));
+       } else if (lease->state == SMB2_LEASE_READ_CACHING_LE &&
+                  req->LeaseState != SMB2_LEASE_NONE_LE) {
+               err = STATUS_INVALID_OPLOCK_PROTOCOL;
+               lease_change_type = OPLOCK_READ_TO_NONE;
+               ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n",
+                           le32_to_cpu(lease->state),
+                           le32_to_cpu(req->LeaseState));
+       } else {
+               /* valid lease state changes */
+               err = STATUS_INVALID_DEVICE_STATE;
+               if (req->LeaseState == SMB2_LEASE_NONE_LE) {
+                       if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+                               lease_change_type = OPLOCK_WRITE_TO_NONE;
+                       else
+                               lease_change_type = OPLOCK_READ_TO_NONE;
+               } else if (req->LeaseState & SMB2_LEASE_READ_CACHING_LE) {
+                       if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+                               lease_change_type = OPLOCK_WRITE_TO_READ;
+                       else
+                               lease_change_type = OPLOCK_READ_HANDLE_TO_READ;
+               } else {
+                       lease_change_type = 0;
+               }
+       }
+
+       switch (lease_change_type) {
+       case OPLOCK_WRITE_TO_READ:
+               ret = opinfo_write_to_read(opinfo);
+               break;
+       case OPLOCK_READ_HANDLE_TO_READ:
+               ret = opinfo_read_handle_to_read(opinfo);
+               break;
+       case OPLOCK_WRITE_TO_NONE:
+               ret = opinfo_write_to_none(opinfo);
+               break;
+       case OPLOCK_READ_TO_NONE:
+               ret = opinfo_read_to_none(opinfo);
+               break;
+       default:
+               ksmbd_debug(OPLOCK, "unknown lease change 0x%x -> 0x%x\n",
+                           le32_to_cpu(lease->state),
+                           le32_to_cpu(req->LeaseState));
+       }
+
+       lease_state = lease->state;
+       opinfo->op_state = OPLOCK_STATE_NONE;
+       wake_up_interruptible_all(&opinfo->oplock_q);
+       atomic_dec(&opinfo->breaking_cnt);
+       wake_up_interruptible_all(&opinfo->oplock_brk);
+       opinfo_put(opinfo);
+
+       if (ret < 0) {
+               rsp->hdr.Status = err;
+               goto err_out;
+       }
+
+       rsp->StructureSize = cpu_to_le16(36);
+       rsp->Reserved = 0;
+       rsp->Flags = 0;
+       memcpy(rsp->LeaseKey, req->LeaseKey, 16);
+       rsp->LeaseState = lease_state;
+       rsp->LeaseDuration = 0;
+       inc_rfc1001_len(rsp, 36);
+       return;
+
+err_out:
+       opinfo->op_state = OPLOCK_STATE_NONE;
+       wake_up_interruptible_all(&opinfo->oplock_q);
+       atomic_dec(&opinfo->breaking_cnt);
+       wake_up_interruptible_all(&opinfo->oplock_brk);
+
+       opinfo_put(opinfo);
+       smb2_set_err_rsp(work);
+}
+
+/**
+ * smb2_oplock_break() - dispatcher for smb2.0 and 2.1 oplock/lease break
+ * @work:      smb work containing oplock/lease break command buffer
+ *
+ * Return:     0
+ */
+int smb2_oplock_break(struct ksmbd_work *work)
+{
+       struct smb2_oplock_break *req = work->request_buf;
+       struct smb2_oplock_break *rsp = work->response_buf;
+
+       switch (le16_to_cpu(req->StructureSize)) {
+       case OP_BREAK_STRUCT_SIZE_20:
+               smb20_oplock_break_ack(work);
+               break;
+       case OP_BREAK_STRUCT_SIZE_21:
+               smb21_lease_break_ack(work);
+               break;
+       default:
+               ksmbd_debug(OPLOCK, "invalid break cmd %d\n",
+                           le16_to_cpu(req->StructureSize));
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               smb2_set_err_rsp(work);
+       }
+
+       return 0;
+}
+
+/**
+ * smb2_notify() - handler for smb2 notify request
+ * @work:   smb work containing notify command buffer
+ *
+ * Return:      0
+ */
+int smb2_notify(struct ksmbd_work *work)
+{
+       struct smb2_notify_req *req;
+       struct smb2_notify_rsp *rsp;
+
+       WORK_BUFFERS(work, req, rsp);
+
+       if (work->next_smb2_rcv_hdr_off && req->hdr.NextCommand) {
+               rsp->hdr.Status = STATUS_INTERNAL_ERROR;
+               smb2_set_err_rsp(work);
+               return 0;
+       }
+
+       smb2_set_err_rsp(work);
+       rsp->hdr.Status = STATUS_NOT_IMPLEMENTED;
+       return 0;
+}
+
+/**
+ * smb2_is_sign_req() - handler for checking packet signing status
+ * @work:      smb work containing notify command buffer
+ * @command:   SMB2 command id
+ *
+ * Return:     true if packed is signed, false otherwise
+ */
+bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
+{
+       struct smb2_hdr *rcv_hdr2 = work->request_buf;
+
+       if ((rcv_hdr2->Flags & SMB2_FLAGS_SIGNED) &&
+           command != SMB2_NEGOTIATE_HE &&
+           command != SMB2_SESSION_SETUP_HE &&
+           command != SMB2_OPLOCK_BREAK_HE)
+               return true;
+
+       return false;
+}
+
+/**
+ * smb2_check_sign_req() - handler for req packet sign processing
+ * @work:   smb work containing notify command buffer
+ *
+ * Return:     1 on success, 0 otherwise
+ */
+int smb2_check_sign_req(struct ksmbd_work *work)
+{
+       struct smb2_hdr *hdr, *hdr_org;
+       char signature_req[SMB2_SIGNATURE_SIZE];
+       char signature[SMB2_HMACSHA256_SIZE];
+       struct kvec iov[1];
+       size_t len;
+
+       hdr_org = hdr = work->request_buf;
+       if (work->next_smb2_rcv_hdr_off)
+               hdr = ksmbd_req_buf_next(work);
+
+       if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
+               len = be32_to_cpu(hdr_org->smb2_buf_length);
+       else if (hdr->NextCommand)
+               len = le32_to_cpu(hdr->NextCommand);
+       else
+               len = be32_to_cpu(hdr_org->smb2_buf_length) -
+                       work->next_smb2_rcv_hdr_off;
+
+       memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
+       memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+
+       iov[0].iov_base = (char *)&hdr->ProtocolId;
+       iov[0].iov_len = len;
+
+       if (ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, 1,
+                               signature))
+               return 0;
+
+       if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+               pr_err("bad smb2 signature\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+/**
+ * smb2_set_sign_rsp() - handler for rsp packet sign processing
+ * @work:   smb work containing notify command buffer
+ *
+ */
+void smb2_set_sign_rsp(struct ksmbd_work *work)
+{
+       struct smb2_hdr *hdr, *hdr_org;
+       struct smb2_hdr *req_hdr;
+       char signature[SMB2_HMACSHA256_SIZE];
+       struct kvec iov[2];
+       size_t len;
+       int n_vec = 1;
+
+       hdr_org = hdr = work->response_buf;
+       if (work->next_smb2_rsp_hdr_off)
+               hdr = ksmbd_resp_buf_next(work);
+
+       req_hdr = ksmbd_req_buf_next(work);
+
+       if (!work->next_smb2_rsp_hdr_off) {
+               len = get_rfc1002_len(hdr_org);
+               if (req_hdr->NextCommand)
+                       len = ALIGN(len, 8);
+       } else {
+               len = get_rfc1002_len(hdr_org) - work->next_smb2_rsp_hdr_off;
+               len = ALIGN(len, 8);
+       }
+
+       if (req_hdr->NextCommand)
+               hdr->NextCommand = cpu_to_le32(len);
+
+       hdr->Flags |= SMB2_FLAGS_SIGNED;
+       memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+
+       iov[0].iov_base = (char *)&hdr->ProtocolId;
+       iov[0].iov_len = len;
+
+       if (work->aux_payload_sz) {
+               iov[0].iov_len -= work->aux_payload_sz;
+
+               iov[1].iov_base = work->aux_payload_buf;
+               iov[1].iov_len = work->aux_payload_sz;
+               n_vec++;
+       }
+
+       if (!ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, n_vec,
+                                signature))
+               memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
+}
+
+/**
+ * smb3_check_sign_req() - handler for req packet sign processing
+ * @work:   smb work containing notify command buffer
+ *
+ * Return:     1 on success, 0 otherwise
+ */
+int smb3_check_sign_req(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       char *signing_key;
+       struct smb2_hdr *hdr, *hdr_org;
+       struct channel *chann;
+       char signature_req[SMB2_SIGNATURE_SIZE];
+       char signature[SMB2_CMACAES_SIZE];
+       struct kvec iov[1];
+       size_t len;
+
+       hdr_org = hdr = work->request_buf;
+       if (work->next_smb2_rcv_hdr_off)
+               hdr = ksmbd_req_buf_next(work);
+
+       if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
+               len = be32_to_cpu(hdr_org->smb2_buf_length);
+       else if (hdr->NextCommand)
+               len = le32_to_cpu(hdr->NextCommand);
+       else
+               len = be32_to_cpu(hdr_org->smb2_buf_length) -
+                       work->next_smb2_rcv_hdr_off;
+
+       if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+               signing_key = work->sess->smb3signingkey;
+       } else {
+               chann = lookup_chann_list(work->sess, conn);
+               if (!chann)
+                       return 0;
+               signing_key = chann->smb3signingkey;
+       }
+
+       if (!signing_key) {
+               pr_err("SMB3 signing key is not generated\n");
+               return 0;
+       }
+
+       memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
+       memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+       iov[0].iov_base = (char *)&hdr->ProtocolId;
+       iov[0].iov_len = len;
+
+       if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature))
+               return 0;
+
+       if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+               pr_err("bad smb2 signature\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+/**
+ * smb3_set_sign_rsp() - handler for rsp packet sign processing
+ * @work:   smb work containing notify command buffer
+ *
+ */
+void smb3_set_sign_rsp(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_hdr *req_hdr;
+       struct smb2_hdr *hdr, *hdr_org;
+       struct channel *chann;
+       char signature[SMB2_CMACAES_SIZE];
+       struct kvec iov[2];
+       int n_vec = 1;
+       size_t len;
+       char *signing_key;
+
+       hdr_org = hdr = work->response_buf;
+       if (work->next_smb2_rsp_hdr_off)
+               hdr = ksmbd_resp_buf_next(work);
+
+       req_hdr = ksmbd_req_buf_next(work);
+
+       if (!work->next_smb2_rsp_hdr_off) {
+               len = get_rfc1002_len(hdr_org);
+               if (req_hdr->NextCommand)
+                       len = ALIGN(len, 8);
+       } else {
+               len = get_rfc1002_len(hdr_org) - work->next_smb2_rsp_hdr_off;
+               len = ALIGN(len, 8);
+       }
+
+       if (conn->binding == false &&
+           le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+               signing_key = work->sess->smb3signingkey;
+       } else {
+               chann = lookup_chann_list(work->sess, work->conn);
+               if (!chann)
+                       return;
+               signing_key = chann->smb3signingkey;
+       }
+
+       if (!signing_key)
+               return;
+
+       if (req_hdr->NextCommand)
+               hdr->NextCommand = cpu_to_le32(len);
+
+       hdr->Flags |= SMB2_FLAGS_SIGNED;
+       memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+       iov[0].iov_base = (char *)&hdr->ProtocolId;
+       iov[0].iov_len = len;
+       if (work->aux_payload_sz) {
+               iov[0].iov_len -= work->aux_payload_sz;
+               iov[1].iov_base = work->aux_payload_buf;
+               iov[1].iov_len = work->aux_payload_sz;
+               n_vec++;
+       }
+
+       if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec, signature))
+               memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
+}
+
+/**
+ * smb3_preauth_hash_rsp() - handler for computing preauth hash on response
+ * @work:   smb work containing response buffer
+ *
+ */
+void smb3_preauth_hash_rsp(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct ksmbd_session *sess = work->sess;
+       struct smb2_hdr *req, *rsp;
+
+       if (conn->dialect != SMB311_PROT_ID)
+               return;
+
+       WORK_BUFFERS(work, req, rsp);
+
+       if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE)
+               ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
+                                                conn->preauth_info->Preauth_HashValue);
+
+       if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE && sess) {
+               __u8 *hash_value;
+
+               if (conn->binding) {
+                       struct preauth_session *preauth_sess;
+
+                       preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
+                       if (!preauth_sess)
+                               return;
+                       hash_value = preauth_sess->Preauth_HashValue;
+               } else {
+                       hash_value = sess->Preauth_HashValue;
+                       if (!hash_value)
+                               return;
+               }
+               ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
+                                                hash_value);
+       }
+}
+
+static void fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, char *old_buf,
+                              __le16 cipher_type)
+{
+       struct smb2_hdr *hdr = (struct smb2_hdr *)old_buf;
+       unsigned int orig_len = get_rfc1002_len(old_buf);
+
+       memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
+       tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
+       tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
+       tr_hdr->Flags = cpu_to_le16(0x01);
+       if (cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+           cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+               get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+       else
+               get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+       memcpy(&tr_hdr->SessionId, &hdr->SessionId, 8);
+       inc_rfc1001_len(tr_hdr, sizeof(struct smb2_transform_hdr) - 4);
+       inc_rfc1001_len(tr_hdr, orig_len);
+}
+
+int smb3_encrypt_resp(struct ksmbd_work *work)
+{
+       char *buf = work->response_buf;
+       struct smb2_transform_hdr *tr_hdr;
+       struct kvec iov[3];
+       int rc = -ENOMEM;
+       int buf_size = 0, rq_nvec = 2 + (work->aux_payload_sz ? 1 : 0);
+
+       if (ARRAY_SIZE(iov) < rq_nvec)
+               return -ENOMEM;
+
+       tr_hdr = kzalloc(sizeof(struct smb2_transform_hdr), GFP_KERNEL);
+       if (!tr_hdr)
+               return rc;
+
+       /* fill transform header */
+       fill_transform_hdr(tr_hdr, buf, work->conn->cipher_type);
+
+       iov[0].iov_base = tr_hdr;
+       iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+       buf_size += iov[0].iov_len - 4;
+
+       iov[1].iov_base = buf + 4;
+       iov[1].iov_len = get_rfc1002_len(buf);
+       if (work->aux_payload_sz) {
+               iov[1].iov_len = work->resp_hdr_sz - 4;
+
+               iov[2].iov_base = work->aux_payload_buf;
+               iov[2].iov_len = work->aux_payload_sz;
+               buf_size += iov[2].iov_len;
+       }
+       buf_size += iov[1].iov_len;
+       work->resp_hdr_sz = iov[1].iov_len;
+
+       rc = ksmbd_crypt_message(work->conn, iov, rq_nvec, 1);
+       if (rc)
+               return rc;
+
+       memmove(buf, iov[1].iov_base, iov[1].iov_len);
+       tr_hdr->smb2_buf_length = cpu_to_be32(buf_size);
+       work->tr_buf = tr_hdr;
+
+       return rc;
+}
+
+bool smb3_is_transform_hdr(void *buf)
+{
+       struct smb2_transform_hdr *trhdr = buf;
+
+       return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
+}
+
+int smb3_decrypt_req(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct ksmbd_session *sess;
+       char *buf = work->request_buf;
+       struct smb2_hdr *hdr;
+       unsigned int pdu_length = get_rfc1002_len(buf);
+       struct kvec iov[2];
+       unsigned int buf_data_size = pdu_length + 4 -
+               sizeof(struct smb2_transform_hdr);
+       struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
+       unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+       int rc = 0;
+
+       sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
+       if (!sess) {
+               pr_err("invalid session id(%llx) in transform header\n",
+                      le64_to_cpu(tr_hdr->SessionId));
+               return -ECONNABORTED;
+       }
+
+       if (pdu_length + 4 <
+           sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
+               pr_err("Transform message is too small (%u)\n",
+                      pdu_length);
+               return -ECONNABORTED;
+       }
+
+       if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) {
+               pr_err("Transform message is broken\n");
+               return -ECONNABORTED;
+       }
+
+       iov[0].iov_base = buf;
+       iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+       iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+       iov[1].iov_len = buf_data_size;
+       rc = ksmbd_crypt_message(conn, iov, 2, 0);
+       if (rc)
+               return rc;
+
+       memmove(buf + 4, iov[1].iov_base, buf_data_size);
+       hdr = (struct smb2_hdr *)buf;
+       hdr->smb2_buf_length = cpu_to_be32(buf_data_size);
+
+       return rc;
+}
+
+bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_hdr *rsp = work->response_buf;
+
+       if (conn->dialect < SMB30_PROT_ID)
+               return false;
+
+       if (work->next_smb2_rcv_hdr_off)
+               rsp = ksmbd_resp_buf_next(work);
+
+       if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
+           rsp->Status == STATUS_SUCCESS)
+               return true;
+       return false;
+}
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
new file mode 100644 (file)
index 0000000..bcec845
--- /dev/null
@@ -0,0 +1,1698 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef _SMB2PDU_H
+#define _SMB2PDU_H
+
+#include "ntlmssp.h"
+#include "smbacl.h"
+
+/*
+ * Note that, due to trying to use names similar to the protocol specifications,
+ * there are many mixed case field names in the structures below.  Although
+ * this does not match typical Linux kernel style, it is necessary to be
+ * able to match against the protocol specfication.
+ *
+ * SMB2 commands
+ * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
+ * (ie no useful data other than the SMB error code itself) and are marked such.
+ * Knowing this helps avoid response buffer allocations and copy in some cases.
+ */
+
+/* List of commands in host endian */
+#define SMB2_NEGOTIATE_HE      0x0000
+#define SMB2_SESSION_SETUP_HE  0x0001
+#define SMB2_LOGOFF_HE         0x0002 /* trivial request/resp */
+#define SMB2_TREE_CONNECT_HE   0x0003
+#define SMB2_TREE_DISCONNECT_HE        0x0004 /* trivial req/resp */
+#define SMB2_CREATE_HE         0x0005
+#define SMB2_CLOSE_HE          0x0006
+#define SMB2_FLUSH_HE          0x0007 /* trivial resp */
+#define SMB2_READ_HE           0x0008
+#define SMB2_WRITE_HE          0x0009
+#define SMB2_LOCK_HE           0x000A
+#define SMB2_IOCTL_HE          0x000B
+#define SMB2_CANCEL_HE         0x000C
+#define SMB2_ECHO_HE           0x000D
+#define SMB2_QUERY_DIRECTORY_HE        0x000E
+#define SMB2_CHANGE_NOTIFY_HE  0x000F
+#define SMB2_QUERY_INFO_HE     0x0010
+#define SMB2_SET_INFO_HE       0x0011
+#define SMB2_OPLOCK_BREAK_HE   0x0012
+
+/* The same list in little endian */
+#define SMB2_NEGOTIATE         cpu_to_le16(SMB2_NEGOTIATE_HE)
+#define SMB2_SESSION_SETUP     cpu_to_le16(SMB2_SESSION_SETUP_HE)
+#define SMB2_LOGOFF            cpu_to_le16(SMB2_LOGOFF_HE)
+#define SMB2_TREE_CONNECT      cpu_to_le16(SMB2_TREE_CONNECT_HE)
+#define SMB2_TREE_DISCONNECT   cpu_to_le16(SMB2_TREE_DISCONNECT_HE)
+#define SMB2_CREATE            cpu_to_le16(SMB2_CREATE_HE)
+#define SMB2_CLOSE             cpu_to_le16(SMB2_CLOSE_HE)
+#define SMB2_FLUSH             cpu_to_le16(SMB2_FLUSH_HE)
+#define SMB2_READ              cpu_to_le16(SMB2_READ_HE)
+#define SMB2_WRITE             cpu_to_le16(SMB2_WRITE_HE)
+#define SMB2_LOCK              cpu_to_le16(SMB2_LOCK_HE)
+#define SMB2_IOCTL             cpu_to_le16(SMB2_IOCTL_HE)
+#define SMB2_CANCEL            cpu_to_le16(SMB2_CANCEL_HE)
+#define SMB2_ECHO              cpu_to_le16(SMB2_ECHO_HE)
+#define SMB2_QUERY_DIRECTORY   cpu_to_le16(SMB2_QUERY_DIRECTORY_HE)
+#define SMB2_CHANGE_NOTIFY     cpu_to_le16(SMB2_CHANGE_NOTIFY_HE)
+#define SMB2_QUERY_INFO                cpu_to_le16(SMB2_QUERY_INFO_HE)
+#define SMB2_SET_INFO          cpu_to_le16(SMB2_SET_INFO_HE)
+#define SMB2_OPLOCK_BREAK      cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
+
+/*Create Action Flags*/
+#define FILE_SUPERSEDED                0x00000000
+#define FILE_OPENED            0x00000001
+#define FILE_CREATED           0x00000002
+#define FILE_OVERWRITTEN       0x00000003
+
+/*
+ * Size of the session key (crypto key encrypted with the password
+ */
+#define SMB2_NTLMV2_SESSKEY_SIZE       16
+#define SMB2_SIGNATURE_SIZE            16
+#define SMB2_HMACSHA256_SIZE           32
+#define SMB2_CMACAES_SIZE              16
+#define SMB3_GCM128_CRYPTKEY_SIZE      16
+#define SMB3_GCM256_CRYPTKEY_SIZE      32
+
+/*
+ * Size of the smb3 encryption/decryption keys
+ */
+#define SMB3_ENC_DEC_KEY_SIZE          32
+
+/*
+ * Size of the smb3 signing key
+ */
+#define SMB3_SIGN_KEY_SIZE             16
+
+#define CIFS_CLIENT_CHALLENGE_SIZE     8
+#define SMB_SERVER_CHALLENGE_SIZE      8
+
+/* SMB2 Max Credits */
+#define SMB2_MAX_CREDITS               8192
+
+#define SMB2_CLIENT_GUID_SIZE          16
+#define SMB2_CREATE_GUID_SIZE          16
+
+/* Maximum buffer size value we can send with 1 credit */
+#define SMB2_MAX_BUFFER_SIZE 65536
+
+#define NUMBER_OF_SMB2_COMMANDS        0x0013
+
+/* BB FIXME - analyze following length BB */
+#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+
+#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) /* 'B''M''S' */
+#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
+
+#define SMB21_DEFAULT_IOSIZE   (1024 * 1024)
+#define SMB3_DEFAULT_IOSIZE    (4 * 1024 * 1024)
+#define SMB3_DEFAULT_TRANS_SIZE        (1024 * 1024)
+
+/*
+ * SMB2 Header Definition
+ *
+ * "MBZ" :  Must be Zero
+ * "BB"  :  BugBug, Something to check/review/analyze later
+ * "PDU" :  "Protocol Data Unit" (ie a network "frame")
+ *
+ */
+
+#define __SMB2_HEADER_STRUCTURE_SIZE   64
+#define SMB2_HEADER_STRUCTURE_SIZE                             \
+       cpu_to_le16(__SMB2_HEADER_STRUCTURE_SIZE)
+
+struct smb2_hdr {
+       __be32 smb2_buf_length; /* big endian on wire */
+                               /*
+                                * length is only two or three bytes - with
+                                * one or two byte type preceding it that MBZ
+                                */
+       __le32 ProtocolId;      /* 0xFE 'S' 'M' 'B' */
+       __le16 StructureSize;   /* 64 */
+       __le16 CreditCharge;    /* MBZ */
+       __le32 Status;          /* Error from server */
+       __le16 Command;
+       __le16 CreditRequest;   /* CreditResponse */
+       __le32 Flags;
+       __le32 NextCommand;
+       __le64 MessageId;
+       union {
+               struct {
+                       __le32 ProcessId;
+                       __le32  TreeId;
+               } __packed SyncId;
+               __le64  AsyncId;
+       } __packed Id;
+       __le64  SessionId;
+       __u8   Signature[16];
+} __packed;
+
+struct smb2_pdu {
+       struct smb2_hdr hdr;
+       __le16 StructureSize2; /* size of wct area (varies, request specific) */
+} __packed;
+
+#define SMB3_AES_CCM_NONCE 11
+#define SMB3_AES_GCM_NONCE 12
+
+struct smb2_transform_hdr {
+       __be32 smb2_buf_length; /* big endian on wire */
+       /*
+        * length is only two or three bytes - with
+        * one or two byte type preceding it that MBZ
+        */
+       __le32 ProtocolId;      /* 0xFD 'S' 'M' 'B' */
+       __u8   Signature[16];
+       __u8   Nonce[16];
+       __le32 OriginalMessageSize;
+       __u16  Reserved1;
+       __le16 Flags; /* EncryptionAlgorithm */
+       __le64  SessionId;
+} __packed;
+
+/*
+ *     SMB2 flag definitions
+ */
+#define SMB2_FLAGS_SERVER_TO_REDIR     cpu_to_le32(0x00000001)
+#define SMB2_FLAGS_ASYNC_COMMAND       cpu_to_le32(0x00000002)
+#define SMB2_FLAGS_RELATED_OPERATIONS  cpu_to_le32(0x00000004)
+#define SMB2_FLAGS_SIGNED              cpu_to_le32(0x00000008)
+#define SMB2_FLAGS_DFS_OPERATIONS      cpu_to_le32(0x10000000)
+#define SMB2_FLAGS_REPLAY_OPERATIONS   cpu_to_le32(0x20000000)
+
+/*
+ *     Definitions for SMB2 Protocol Data Units (network frames)
+ *
+ *  See MS-SMB2.PDF specification for protocol details.
+ *  The Naming convention is the lower case version of the SMB2
+ *  command code name for the struct. Note that structures must be packed.
+ *
+ */
+
+#define SMB2_ERROR_STRUCTURE_SIZE2     9
+#define SMB2_ERROR_STRUCTURE_SIZE2_LE  cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
+
+struct smb2_err_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;
+       __u8   ErrorContextCount;
+       __u8   Reserved;
+       __le32 ByteCount;  /* even if zero, at least one byte follows */
+       __u8   ErrorData[1];  /* variable length */
+} __packed;
+
+struct smb2_negotiate_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 36 */
+       __le16 DialectCount;
+       __le16 SecurityMode;
+       __le16 Reserved;        /* MBZ */
+       __le32 Capabilities;
+       __u8   ClientGUID[SMB2_CLIENT_GUID_SIZE];
+       /* In SMB3.02 and earlier next three were MBZ le64 ClientStartTime */
+       __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
+       __le16 NegotiateContextCount;  /* SMB3.1.1 only. MBZ earlier */
+       __le16 Reserved2;
+       __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
+} __packed;
+
+/* SecurityMode flags */
+#define SMB2_NEGOTIATE_SIGNING_ENABLED_LE      cpu_to_le16(0x0001)
+#define SMB2_NEGOTIATE_SIGNING_REQUIRED                0x0002
+#define SMB2_NEGOTIATE_SIGNING_REQUIRED_LE     cpu_to_le16(0x0002)
+/* Capabilities flags */
+#define SMB2_GLOBAL_CAP_DFS            0x00000001
+#define SMB2_GLOBAL_CAP_LEASING                0x00000002 /* Resp only New to SMB2.1 */
+#define SMB2_GLOBAL_CAP_LARGE_MTU      0X00000004 /* Resp only New to SMB2.1 */
+#define SMB2_GLOBAL_CAP_MULTI_CHANNEL  0x00000008 /* New to SMB3 */
+#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
+#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING  0x00000020 /* New to SMB3 */
+#define SMB2_GLOBAL_CAP_ENCRYPTION     0x00000040 /* New to SMB3 */
+/* Internal types */
+#define SMB2_NT_FIND                   0x00100000
+#define SMB2_LARGE_FILES               0x00200000
+
+#define SMB311_SALT_SIZE                       32
+/* Hash Algorithm Types */
+#define SMB2_PREAUTH_INTEGRITY_SHA512  cpu_to_le16(0x0001)
+
+#define PREAUTH_HASHVALUE_SIZE         64
+
+struct preauth_integrity_info {
+       /* PreAuth integrity Hash ID */
+       __le16                  Preauth_HashId;
+       /* PreAuth integrity Hash Value */
+       __u8                    Preauth_HashValue[PREAUTH_HASHVALUE_SIZE];
+};
+
+/* offset is sizeof smb2_negotiate_rsp - 4 but rounded up to 8 bytes. */
+#ifdef CONFIG_SMB_SERVER_KERBEROS5
+/* sizeof(struct smb2_negotiate_rsp) - 4 =
+ * header(64) + response(64) + GSS_LENGTH(96) + GSS_PADDING(0)
+ */
+#define OFFSET_OF_NEG_CONTEXT  0xe0
+#else
+/* sizeof(struct smb2_negotiate_rsp) - 4 =
+ * header(64) + response(64) + GSS_LENGTH(74) + GSS_PADDING(6)
+ */
+#define OFFSET_OF_NEG_CONTEXT  0xd0
+#endif
+
+#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES    cpu_to_le16(1)
+#define SMB2_ENCRYPTION_CAPABILITIES           cpu_to_le16(2)
+#define SMB2_COMPRESSION_CAPABILITIES          cpu_to_le16(3)
+#define SMB2_NETNAME_NEGOTIATE_CONTEXT_ID      cpu_to_le16(5)
+#define SMB2_SIGNING_CAPABILITIES              cpu_to_le16(8)
+#define SMB2_POSIX_EXTENSIONS_AVAILABLE                cpu_to_le16(0x100)
+
+struct smb2_neg_context {
+       __le16  ContextType;
+       __le16  DataLength;
+       __le32  Reserved;
+       /* Followed by array of data */
+} __packed;
+
+struct smb2_preauth_neg_context {
+       __le16  ContextType; /* 1 */
+       __le16  DataLength;
+       __le32  Reserved;
+       __le16  HashAlgorithmCount; /* 1 */
+       __le16  SaltLength;
+       __le16  HashAlgorithms; /* HashAlgorithms[0] since only one defined */
+       __u8    Salt[SMB311_SALT_SIZE];
+} __packed;
+
+/* Encryption Algorithms Ciphers */
+#define SMB2_ENCRYPTION_AES128_CCM     cpu_to_le16(0x0001)
+#define SMB2_ENCRYPTION_AES128_GCM     cpu_to_le16(0x0002)
+#define SMB2_ENCRYPTION_AES256_CCM     cpu_to_le16(0x0003)
+#define SMB2_ENCRYPTION_AES256_GCM     cpu_to_le16(0x0004)
+
+struct smb2_encryption_neg_context {
+       __le16  ContextType; /* 2 */
+       __le16  DataLength;
+       __le32  Reserved;
+       /* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
+       __le16  CipherCount; /* AES-128-GCM and AES-128-CCM by default */
+       __le16  Ciphers[];
+} __packed;
+
+#define SMB3_COMPRESS_NONE     cpu_to_le16(0x0000)
+#define SMB3_COMPRESS_LZNT1    cpu_to_le16(0x0001)
+#define SMB3_COMPRESS_LZ77     cpu_to_le16(0x0002)
+#define SMB3_COMPRESS_LZ77_HUFF        cpu_to_le16(0x0003)
+
+struct smb2_compression_ctx {
+       __le16  ContextType; /* 3 */
+       __le16  DataLength;
+       __le32  Reserved;
+       __le16  CompressionAlgorithmCount;
+       __u16   Padding;
+       __le32  Reserved1;
+       __le16  CompressionAlgorithms[];
+} __packed;
+
+#define POSIX_CTXT_DATA_LEN     16
+struct smb2_posix_neg_context {
+       __le16  ContextType; /* 0x100 */
+       __le16  DataLength;
+       __le32  Reserved;
+       __u8    Name[16]; /* POSIX ctxt GUID 93AD25509CB411E7B42383DE968BCD7C */
+} __packed;
+
+struct smb2_netname_neg_context {
+       __le16  ContextType; /* 0x100 */
+       __le16  DataLength;
+       __le32  Reserved;
+       __le16  NetName[]; /* hostname of target converted to UCS-2 */
+} __packed;
+
+/* Signing algorithms */
+#define SIGNING_ALG_HMAC_SHA256                cpu_to_le16(0)
+#define SIGNING_ALG_AES_CMAC           cpu_to_le16(1)
+#define SIGNING_ALG_AES_GMAC           cpu_to_le16(2)
+
+struct smb2_signing_capabilities {
+       __le16  ContextType; /* 8 */
+       __le16  DataLength;
+       __le32  Reserved;
+       __le16  SigningAlgorithmCount;
+       __le16  SigningAlgorithms[];
+} __packed;
+
+struct smb2_negotiate_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 65 */
+       __le16 SecurityMode;
+       __le16 DialectRevision;
+       __le16 NegotiateContextCount; /* Prior to SMB3.1.1 was Reserved & MBZ */
+       __u8   ServerGUID[16];
+       __le32 Capabilities;
+       __le32 MaxTransactSize;
+       __le32 MaxReadSize;
+       __le32 MaxWriteSize;
+       __le64 SystemTime;      /* MBZ */
+       __le64 ServerStartTime;
+       __le16 SecurityBufferOffset;
+       __le16 SecurityBufferLength;
+       __le32 NegotiateContextOffset;  /* Pre:SMB3.1.1 was reserved/ignored */
+       __u8   Buffer[1];       /* variable length GSS security buffer */
+} __packed;
+
+/* Flags */
+#define SMB2_SESSION_REQ_FLAG_BINDING          0x01
+#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA     0x04
+
+#define SMB2_SESSION_EXPIRED           (0)
+#define SMB2_SESSION_IN_PROGRESS       BIT(0)
+#define SMB2_SESSION_VALID             BIT(1)
+
+/* Flags */
+#define SMB2_SESSION_REQ_FLAG_BINDING          0x01
+#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA     0x04
+
+struct smb2_sess_setup_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 25 */
+       __u8   Flags;
+       __u8   SecurityMode;
+       __le32 Capabilities;
+       __le32 Channel;
+       __le16 SecurityBufferOffset;
+       __le16 SecurityBufferLength;
+       __le64 PreviousSessionId;
+       __u8   Buffer[1];       /* variable length GSS security buffer */
+} __packed;
+
+/* Flags/Reserved for SMB3.1.1 */
+#define SMB2_SHAREFLAG_CLUSTER_RECONNECT       0x0001
+
+/* Currently defined SessionFlags */
+#define SMB2_SESSION_FLAG_IS_GUEST_LE          cpu_to_le16(0x0001)
+#define SMB2_SESSION_FLAG_IS_NULL_LE           cpu_to_le16(0x0002)
+#define SMB2_SESSION_FLAG_ENCRYPT_DATA_LE      cpu_to_le16(0x0004)
+struct smb2_sess_setup_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 SessionFlags;
+       __le16 SecurityBufferOffset;
+       __le16 SecurityBufferLength;
+       __u8   Buffer[1];       /* variable length GSS security buffer */
+} __packed;
+
+struct smb2_logoff_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __le16 Reserved;
+} __packed;
+
+struct smb2_logoff_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __le16 Reserved;
+} __packed;
+
+struct smb2_tree_connect_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 9 */
+       __le16 Reserved;        /* Flags in SMB3.1.1 */
+       __le16 PathOffset;
+       __le16 PathLength;
+       __u8   Buffer[1];       /* variable length */
+} __packed;
+
+struct smb2_tree_connect_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 16 */
+       __u8   ShareType;  /* see below */
+       __u8   Reserved;
+       __le32 ShareFlags; /* see below */
+       __le32 Capabilities; /* see below */
+       __le32 MaximalAccess;
+} __packed;
+
+/* Possible ShareType values */
+#define SMB2_SHARE_TYPE_DISK   0x01
+#define SMB2_SHARE_TYPE_PIPE   0x02
+#define        SMB2_SHARE_TYPE_PRINT   0x03
+
+/*
+ * Possible ShareFlags - exactly one and only one of the first 4 caching flags
+ * must be set (any of the remaining, SHI1005, flags may be set individually
+ * or in combination.
+ */
+#define SMB2_SHAREFLAG_MANUAL_CACHING                  0x00000000
+#define SMB2_SHAREFLAG_AUTO_CACHING                    0x00000010
+#define SMB2_SHAREFLAG_VDO_CACHING                     0x00000020
+#define SMB2_SHAREFLAG_NO_CACHING                      0x00000030
+#define SHI1005_FLAGS_DFS                              0x00000001
+#define SHI1005_FLAGS_DFS_ROOT                         0x00000002
+#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS         0x00000100
+#define SHI1005_FLAGS_FORCE_SHARED_DELETE              0x00000200
+#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING          0x00000400
+#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM      0x00000800
+#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK             0x00001000
+#define SHI1005_FLAGS_ENABLE_HASH                      0x00002000
+
+/* Possible share capabilities */
+#define SMB2_SHARE_CAP_DFS     cpu_to_le32(0x00000008)
+
+struct smb2_tree_disconnect_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __le16 Reserved;
+} __packed;
+
+struct smb2_tree_disconnect_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __le16 Reserved;
+} __packed;
+
+#define ATTR_READONLY_LE       cpu_to_le32(ATTR_READONLY)
+#define ATTR_HIDDEN_LE         cpu_to_le32(ATTR_HIDDEN)
+#define ATTR_SYSTEM_LE         cpu_to_le32(ATTR_SYSTEM)
+#define ATTR_DIRECTORY_LE      cpu_to_le32(ATTR_DIRECTORY)
+#define ATTR_ARCHIVE_LE                cpu_to_le32(ATTR_ARCHIVE)
+#define ATTR_NORMAL_LE         cpu_to_le32(ATTR_NORMAL)
+#define ATTR_TEMPORARY_LE      cpu_to_le32(ATTR_TEMPORARY)
+#define ATTR_SPARSE_FILE_LE    cpu_to_le32(ATTR_SPARSE)
+#define ATTR_REPARSE_POINT_LE  cpu_to_le32(ATTR_REPARSE)
+#define ATTR_COMPRESSED_LE     cpu_to_le32(ATTR_COMPRESSED)
+#define ATTR_OFFLINE_LE                cpu_to_le32(ATTR_OFFLINE)
+#define ATTR_NOT_CONTENT_INDEXED_LE    cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
+#define ATTR_ENCRYPTED_LE      cpu_to_le32(ATTR_ENCRYPTED)
+#define ATTR_INTEGRITY_STREAML_LE      cpu_to_le32(0x00008000)
+#define ATTR_NO_SCRUB_DATA_LE  cpu_to_le32(0x00020000)
+#define ATTR_MASK_LE           cpu_to_le32(0x00007FB7)
+
+/* Oplock levels */
+#define SMB2_OPLOCK_LEVEL_NONE         0x00
+#define SMB2_OPLOCK_LEVEL_II           0x01
+#define SMB2_OPLOCK_LEVEL_EXCLUSIVE    0x08
+#define SMB2_OPLOCK_LEVEL_BATCH                0x09
+#define SMB2_OPLOCK_LEVEL_LEASE                0xFF
+/* Non-spec internal type */
+#define SMB2_OPLOCK_LEVEL_NOCHANGE     0x99
+
+/* Desired Access Flags */
+#define FILE_READ_DATA_LE              cpu_to_le32(0x00000001)
+#define FILE_LIST_DIRECTORY_LE         cpu_to_le32(0x00000001)
+#define FILE_WRITE_DATA_LE             cpu_to_le32(0x00000002)
+#define FILE_ADD_FILE_LE               cpu_to_le32(0x00000002)
+#define FILE_APPEND_DATA_LE            cpu_to_le32(0x00000004)
+#define FILE_ADD_SUBDIRECTORY_LE       cpu_to_le32(0x00000004)
+#define FILE_READ_EA_LE                        cpu_to_le32(0x00000008)
+#define FILE_WRITE_EA_LE               cpu_to_le32(0x00000010)
+#define FILE_EXECUTE_LE                        cpu_to_le32(0x00000020)
+#define FILE_TRAVERSE_LE               cpu_to_le32(0x00000020)
+#define FILE_DELETE_CHILD_LE           cpu_to_le32(0x00000040)
+#define FILE_READ_ATTRIBUTES_LE                cpu_to_le32(0x00000080)
+#define FILE_WRITE_ATTRIBUTES_LE       cpu_to_le32(0x00000100)
+#define FILE_DELETE_LE                 cpu_to_le32(0x00010000)
+#define FILE_READ_CONTROL_LE           cpu_to_le32(0x00020000)
+#define FILE_WRITE_DAC_LE              cpu_to_le32(0x00040000)
+#define FILE_WRITE_OWNER_LE            cpu_to_le32(0x00080000)
+#define FILE_SYNCHRONIZE_LE            cpu_to_le32(0x00100000)
+#define FILE_ACCESS_SYSTEM_SECURITY_LE cpu_to_le32(0x01000000)
+#define FILE_MAXIMAL_ACCESS_LE         cpu_to_le32(0x02000000)
+#define FILE_GENERIC_ALL_LE            cpu_to_le32(0x10000000)
+#define FILE_GENERIC_EXECUTE_LE                cpu_to_le32(0x20000000)
+#define FILE_GENERIC_WRITE_LE          cpu_to_le32(0x40000000)
+#define FILE_GENERIC_READ_LE           cpu_to_le32(0x80000000)
+#define DESIRED_ACCESS_MASK            cpu_to_le32(0xF21F01FF)
+
+/* ShareAccess Flags */
+#define FILE_SHARE_READ_LE             cpu_to_le32(0x00000001)
+#define FILE_SHARE_WRITE_LE            cpu_to_le32(0x00000002)
+#define FILE_SHARE_DELETE_LE           cpu_to_le32(0x00000004)
+#define FILE_SHARE_ALL_LE              cpu_to_le32(0x00000007)
+
+/* CreateDisposition Flags */
+#define FILE_SUPERSEDE_LE              cpu_to_le32(0x00000000)
+#define FILE_OPEN_LE                   cpu_to_le32(0x00000001)
+#define FILE_CREATE_LE                 cpu_to_le32(0x00000002)
+#define        FILE_OPEN_IF_LE                 cpu_to_le32(0x00000003)
+#define FILE_OVERWRITE_LE              cpu_to_le32(0x00000004)
+#define FILE_OVERWRITE_IF_LE           cpu_to_le32(0x00000005)
+#define FILE_CREATE_MASK_LE            cpu_to_le32(0x00000007)
+
+#define FILE_READ_DESIRED_ACCESS_LE    (FILE_READ_DATA_LE |            \
+                                       FILE_READ_EA_LE |               \
+                                       FILE_GENERIC_READ_LE)
+#define FILE_WRITE_DESIRE_ACCESS_LE    (FILE_WRITE_DATA_LE |           \
+                                       FILE_APPEND_DATA_LE |           \
+                                       FILE_WRITE_EA_LE |              \
+                                       FILE_WRITE_ATTRIBUTES_LE |      \
+                                       FILE_GENERIC_WRITE_LE)
+
+/* Impersonation Levels */
+#define IL_ANONYMOUS_LE                cpu_to_le32(0x00000000)
+#define IL_IDENTIFICATION_LE   cpu_to_le32(0x00000001)
+#define IL_IMPERSONATION_LE    cpu_to_le32(0x00000002)
+#define IL_DELEGATE_LE         cpu_to_le32(0x00000003)
+
+/* Create Context Values */
+#define SMB2_CREATE_EA_BUFFER                  "ExtA" /* extended attributes */
+#define SMB2_CREATE_SD_BUFFER                  "SecD" /* security descriptor */
+#define SMB2_CREATE_DURABLE_HANDLE_REQUEST     "DHnQ"
+#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT   "DHnC"
+#define SMB2_CREATE_ALLOCATION_SIZE            "AlSi"
+#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
+#define SMB2_CREATE_TIMEWARP_REQUEST           "TWrp"
+#define SMB2_CREATE_QUERY_ON_DISK_ID           "QFid"
+#define SMB2_CREATE_REQUEST_LEASE              "RqLs"
+#define SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2   "DH2Q"
+#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 "DH2C"
+#define SMB2_CREATE_APP_INSTANCE_ID     "\x45\xBC\xA6\x6A\xEF\xA7\xF7\x4A\x90\x08\xFA\x46\x2E\x14\x4D\x74"
+ #define SMB2_CREATE_APP_INSTANCE_VERSION      "\xB9\x82\xD0\xB7\x3B\x56\x07\x4F\xA0\x7B\x52\x4A\x81\x16\xA0\x10"
+#define SVHDX_OPEN_DEVICE_CONTEXT       0x83CE6F1AD851E0986E34401CC9BCFCE9
+#define SMB2_CREATE_TAG_POSIX          "\x93\xAD\x25\x50\x9C\xB4\x11\xE7\xB4\x23\x83\xDE\x96\x8B\xCD\x7C"
+
+struct smb2_create_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 57 */
+       __u8   SecurityFlags;
+       __u8   RequestedOplockLevel;
+       __le32 ImpersonationLevel;
+       __le64 SmbCreateFlags;
+       __le64 Reserved;
+       __le32 DesiredAccess;
+       __le32 FileAttributes;
+       __le32 ShareAccess;
+       __le32 CreateDisposition;
+       __le32 CreateOptions;
+       __le16 NameOffset;
+       __le16 NameLength;
+       __le32 CreateContextsOffset;
+       __le32 CreateContextsLength;
+       __u8   Buffer[0];
+} __packed;
+
+struct smb2_create_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 89 */
+       __u8   OplockLevel;
+       __u8   Reserved;
+       __le32 CreateAction;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 AllocationSize;
+       __le64 EndofFile;
+       __le32 FileAttributes;
+       __le32 Reserved2;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __le32 CreateContextsOffset;
+       __le32 CreateContextsLength;
+       __u8   Buffer[1];
+} __packed;
+
+struct create_context {
+       __le32 Next;
+       __le16 NameOffset;
+       __le16 NameLength;
+       __le16 Reserved;
+       __le16 DataOffset;
+       __le32 DataLength;
+       __u8 Buffer[0];
+} __packed;
+
+struct create_durable_req_v2 {
+       struct create_context ccontext;
+       __u8   Name[8];
+       __le32 Timeout;
+       __le32 Flags;
+       __u8 Reserved[8];
+       __u8 CreateGuid[16];
+} __packed;
+
+struct create_durable_reconn_req {
+       struct create_context ccontext;
+       __u8   Name[8];
+       union {
+               __u8  Reserved[16];
+               struct {
+                       __le64 PersistentFileId;
+                       __le64 VolatileFileId;
+               } Fid;
+       } Data;
+} __packed;
+
+struct create_durable_reconn_v2_req {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct {
+               __le64 PersistentFileId;
+               __le64 VolatileFileId;
+       } Fid;
+       __u8 CreateGuid[16];
+       __le32 Flags;
+} __packed;
+
+struct create_app_inst_id {
+       struct create_context ccontext;
+       __u8 Name[8];
+       __u8 Reserved[8];
+       __u8 AppInstanceId[16];
+} __packed;
+
+struct create_app_inst_id_vers {
+       struct create_context ccontext;
+       __u8 Name[8];
+       __u8 Reserved[2];
+       __u8 Padding[4];
+       __le64 AppInstanceVersionHigh;
+       __le64 AppInstanceVersionLow;
+} __packed;
+
+struct create_mxac_req {
+       struct create_context ccontext;
+       __u8   Name[8];
+       __le64 Timestamp;
+} __packed;
+
+struct create_alloc_size_req {
+       struct create_context ccontext;
+       __u8   Name[8];
+       __le64 AllocationSize;
+} __packed;
+
+struct create_posix {
+       struct create_context ccontext;
+       __u8    Name[16];
+       __le32  Mode;
+       __u32   Reserved;
+} __packed;
+
+struct create_durable_rsp {
+       struct create_context ccontext;
+       __u8   Name[8];
+       union {
+               __u8  Reserved[8];
+               __u64 data;
+       } Data;
+} __packed;
+
+struct create_durable_v2_rsp {
+       struct create_context ccontext;
+       __u8   Name[8];
+       __le32 Timeout;
+       __le32 Flags;
+} __packed;
+
+struct create_mxac_rsp {
+       struct create_context ccontext;
+       __u8   Name[8];
+       __le32 QueryStatus;
+       __le32 MaximalAccess;
+} __packed;
+
+struct create_disk_id_rsp {
+       struct create_context ccontext;
+       __u8   Name[8];
+       __le64 DiskFileId;
+       __le64 VolumeId;
+       __u8  Reserved[16];
+} __packed;
+
+/* equivalent of the contents of SMB3.1.1 POSIX open context response */
+struct create_posix_rsp {
+       struct create_context ccontext;
+       __u8    Name[16];
+       __le32 nlink;
+       __le32 reparse_tag;
+       __le32 mode;
+       u8 SidBuffer[40];
+} __packed;
+
+#define SMB2_LEASE_NONE_LE                     cpu_to_le32(0x00)
+#define SMB2_LEASE_READ_CACHING_LE             cpu_to_le32(0x01)
+#define SMB2_LEASE_HANDLE_CACHING_LE           cpu_to_le32(0x02)
+#define SMB2_LEASE_WRITE_CACHING_LE            cpu_to_le32(0x04)
+
+#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE   cpu_to_le32(0x02)
+
+struct lease_context {
+       __le64 LeaseKeyLow;
+       __le64 LeaseKeyHigh;
+       __le32 LeaseState;
+       __le32 LeaseFlags;
+       __le64 LeaseDuration;
+} __packed;
+
+struct lease_context_v2 {
+       __le64 LeaseKeyLow;
+       __le64 LeaseKeyHigh;
+       __le32 LeaseState;
+       __le32 LeaseFlags;
+       __le64 LeaseDuration;
+       __le64 ParentLeaseKeyLow;
+       __le64 ParentLeaseKeyHigh;
+       __le16 Epoch;
+       __le16 Reserved;
+} __packed;
+
+struct create_lease {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct lease_context lcontext;
+} __packed;
+
+struct create_lease_v2 {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct lease_context_v2 lcontext;
+       __u8   Pad[4];
+} __packed;
+
+/* Currently defined values for close flags */
+#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB       cpu_to_le16(0x0001)
+struct smb2_close_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 24 */
+       __le16 Flags;
+       __le32 Reserved;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+} __packed;
+
+struct smb2_close_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* 60 */
+       __le16 Flags;
+       __le32 Reserved;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
+       __le64 EndOfFile;
+       __le32 Attributes;
+} __packed;
+
+struct smb2_flush_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 24 */
+       __le16 Reserved1;
+       __le32 Reserved2;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+} __packed;
+
+struct smb2_flush_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;
+       __le16 Reserved;
+} __packed;
+
+struct smb2_buffer_desc_v1 {
+       __le64 offset;
+       __le32 token;
+       __le32 length;
+} __packed;
+
+#define SMB2_CHANNEL_NONE              cpu_to_le32(0x00000000)
+#define SMB2_CHANNEL_RDMA_V1           cpu_to_le32(0x00000001)
+#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002)
+
+struct smb2_read_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 49 */
+       __u8   Padding; /* offset from start of SMB2 header to place read */
+       __u8   Reserved;
+       __le32 Length;
+       __le64 Offset;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __le32 MinimumCount;
+       __le32 Channel; /* Reserved MBZ */
+       __le32 RemainingBytes;
+       __le16 ReadChannelInfoOffset; /* Reserved MBZ */
+       __le16 ReadChannelInfoLength; /* Reserved MBZ */
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_read_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 17 */
+       __u8   DataOffset;
+       __u8   Reserved;
+       __le32 DataLength;
+       __le32 DataRemaining;
+       __u32  Reserved2;
+       __u8   Buffer[1];
+} __packed;
+
+/* For write request Flags field below the following flag is defined: */
+#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001
+
+struct smb2_write_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 49 */
+       __le16 DataOffset; /* offset from start of SMB2 header to write data */
+       __le32 Length;
+       __le64 Offset;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __le32 Channel; /* Reserved MBZ */
+       __le32 RemainingBytes;
+       __le16 WriteChannelInfoOffset; /* Reserved MBZ */
+       __le16 WriteChannelInfoLength; /* Reserved MBZ */
+       __le32 Flags;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_write_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 17 */
+       __u8   DataOffset;
+       __u8   Reserved;
+       __le32 DataLength;
+       __le32 DataRemaining;
+       __u32  Reserved2;
+       __u8   Buffer[1];
+} __packed;
+
+#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
+
+struct duplicate_extents_to_file {
+       __u64 PersistentFileHandle; /* source file handle, opaque endianness */
+       __u64 VolatileFileHandle;
+       __le64 SourceFileOffset;
+       __le64 TargetFileOffset;
+       __le64 ByteCount;  /* Bytes to be copied */
+} __packed;
+
+struct smb2_ioctl_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 57 */
+       __le16 Reserved; /* offset from start of SMB2 header to write data */
+       __le32 CntCode;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __le32 InputOffset; /* Reserved MBZ */
+       __le32 InputCount;
+       __le32 MaxInputResponse;
+       __le32 OutputOffset;
+       __le32 OutputCount;
+       __le32 MaxOutputResponse;
+       __le32 Flags;
+       __le32 Reserved2;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_ioctl_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 49 */
+       __le16 Reserved; /* offset from start of SMB2 header to write data */
+       __le32 CntCode;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __le32 InputOffset; /* Reserved MBZ */
+       __le32 InputCount;
+       __le32 OutputOffset;
+       __le32 OutputCount;
+       __le32 Flags;
+       __le32 Reserved2;
+       __u8   Buffer[1];
+} __packed;
+
+struct validate_negotiate_info_req {
+       __le32 Capabilities;
+       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
+       __le16 SecurityMode;
+       __le16 DialectCount;
+       __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
+} __packed;
+
+struct validate_negotiate_info_rsp {
+       __le32 Capabilities;
+       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
+       __le16 SecurityMode;
+       __le16 Dialect; /* Dialect in use for the connection */
+} __packed;
+
+struct smb_sockaddr_in {
+       __be16 Port;
+       __be32 IPv4address;
+       __u8 Reserved[8];
+} __packed;
+
+struct smb_sockaddr_in6 {
+       __be16 Port;
+       __be32 FlowInfo;
+       __u8 IPv6address[16];
+       __be32 ScopeId;
+} __packed;
+
+#define INTERNETWORK   0x0002
+#define INTERNETWORKV6 0x0017
+
+struct sockaddr_storage_rsp {
+       __le16 Family;
+       union {
+               struct smb_sockaddr_in addr4;
+               struct smb_sockaddr_in6 addr6;
+       };
+} __packed;
+
+#define RSS_CAPABLE    0x00000001
+#define RDMA_CAPABLE   0x00000002
+
+struct network_interface_info_ioctl_rsp {
+       __le32 Next; /* next interface. zero if this is last one */
+       __le32 IfIndex;
+       __le32 Capability; /* RSS or RDMA Capable */
+       __le32 Reserved;
+       __le64 LinkSpeed;
+       char    SockAddr_Storage[128];
+} __packed;
+
+struct file_object_buf_type1_ioctl_rsp {
+       __u8 ObjectId[16];
+       __u8 BirthVolumeId[16];
+       __u8 BirthObjectId[16];
+       __u8 DomainId[16];
+} __packed;
+
+struct resume_key_ioctl_rsp {
+       __le64 ResumeKey[3];
+       __le32 ContextLength;
+       __u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */
+} __packed;
+
+struct copychunk_ioctl_req {
+       __le64 ResumeKey[3];
+       __le32 ChunkCount;
+       __le32 Reserved;
+       __u8 Chunks[1]; /* array of srv_copychunk */
+} __packed;
+
+struct srv_copychunk {
+       __le64 SourceOffset;
+       __le64 TargetOffset;
+       __le32 Length;
+       __le32 Reserved;
+} __packed;
+
+struct copychunk_ioctl_rsp {
+       __le32 ChunksWritten;
+       __le32 ChunkBytesWritten;
+       __le32 TotalBytesWritten;
+} __packed;
+
+struct file_sparse {
+       __u8    SetSparse;
+} __packed;
+
+struct file_zero_data_information {
+       __le64  FileOffset;
+       __le64  BeyondFinalZero;
+} __packed;
+
+struct file_allocated_range_buffer {
+       __le64  file_offset;
+       __le64  length;
+} __packed;
+
+struct reparse_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __u8    DataBuffer[]; /* Variable Length */
+} __packed;
+
+/* Completion Filter flags for Notify */
+#define FILE_NOTIFY_CHANGE_FILE_NAME   0x00000001
+#define FILE_NOTIFY_CHANGE_DIR_NAME    0x00000002
+#define FILE_NOTIFY_CHANGE_NAME                0x00000003
+#define FILE_NOTIFY_CHANGE_ATTRIBUTES  0x00000004
+#define FILE_NOTIFY_CHANGE_SIZE                0x00000008
+#define FILE_NOTIFY_CHANGE_LAST_WRITE  0x00000010
+#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020
+#define FILE_NOTIFY_CHANGE_CREATION    0x00000040
+#define FILE_NOTIFY_CHANGE_EA          0x00000080
+#define FILE_NOTIFY_CHANGE_SECURITY    0x00000100
+#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200
+#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400
+#define FILE_NOTIFY_CHANGE_STREAM_WRITE        0x00000800
+
+/* Flags */
+#define SMB2_WATCH_TREE        0x0001
+
+struct smb2_notify_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 32 */
+       __le16 Flags;
+       __le32 OutputBufferLength;
+       __le64 PersistentFileId;
+       __le64 VolatileFileId;
+       __u32 CompletionFileter;
+       __u32 Reserved;
+} __packed;
+
+struct smb2_notify_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 OutputBufferOffset;
+       __le32 OutputBufferLength;
+       __u8 Buffer[1];
+} __packed;
+
+/* SMB2 Notify Action Flags */
+#define FILE_ACTION_ADDED              0x00000001
+#define FILE_ACTION_REMOVED            0x00000002
+#define FILE_ACTION_MODIFIED           0x00000003
+#define FILE_ACTION_RENAMED_OLD_NAME   0x00000004
+#define FILE_ACTION_RENAMED_NEW_NAME   0x00000005
+#define FILE_ACTION_ADDED_STREAM       0x00000006
+#define FILE_ACTION_REMOVED_STREAM     0x00000007
+#define FILE_ACTION_MODIFIED_STREAM    0x00000008
+#define FILE_ACTION_REMOVED_BY_DELETE  0x00000009
+
+#define SMB2_LOCKFLAG_SHARED           0x0001
+#define SMB2_LOCKFLAG_EXCLUSIVE                0x0002
+#define SMB2_LOCKFLAG_UNLOCK           0x0004
+#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
+#define SMB2_LOCKFLAG_MASK             0x0007
+
+struct smb2_lock_element {
+       __le64 Offset;
+       __le64 Length;
+       __le32 Flags;
+       __le32 Reserved;
+} __packed;
+
+struct smb2_lock_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 48 */
+       __le16 LockCount;
+       __le32 Reserved;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       /* Followed by at least one */
+       struct smb2_lock_element locks[1];
+} __packed;
+
+struct smb2_lock_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 4 */
+       __le16 Reserved;
+} __packed;
+
+struct smb2_echo_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __u16  Reserved;
+} __packed;
+
+struct smb2_echo_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __u16  Reserved;
+} __packed;
+
+/* search (query_directory) Flags field */
+#define SMB2_RESTART_SCANS             0x01
+#define SMB2_RETURN_SINGLE_ENTRY       0x02
+#define SMB2_INDEX_SPECIFIED           0x04
+#define SMB2_REOPEN                    0x10
+
+struct smb2_query_directory_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 33 */
+       __u8   FileInformationClass;
+       __u8   Flags;
+       __le32 FileIndex;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __le16 FileNameOffset;
+       __le16 FileNameLength;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_query_directory_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 OutputBufferOffset;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+/* Possible InfoType values */
+#define SMB2_O_INFO_FILE       0x01
+#define SMB2_O_INFO_FILESYSTEM 0x02
+#define SMB2_O_INFO_SECURITY   0x03
+#define SMB2_O_INFO_QUOTA      0x04
+
+/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
+#define OWNER_SECINFO   0x00000001
+#define GROUP_SECINFO   0x00000002
+#define DACL_SECINFO   0x00000004
+#define SACL_SECINFO   0x00000008
+#define LABEL_SECINFO   0x00000010
+#define ATTRIBUTE_SECINFO   0x00000020
+#define SCOPE_SECINFO   0x00000040
+#define BACKUP_SECINFO   0x00010000
+#define UNPROTECTED_SACL_SECINFO   0x10000000
+#define UNPROTECTED_DACL_SECINFO   0x20000000
+#define PROTECTED_SACL_SECINFO   0x40000000
+#define PROTECTED_DACL_SECINFO   0x80000000
+
+struct smb2_query_info_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 41 */
+       __u8   InfoType;
+       __u8   FileInfoClass;
+       __le32 OutputBufferLength;
+       __le16 InputBufferOffset;
+       __u16  Reserved;
+       __le32 InputBufferLength;
+       __le32 AdditionalInformation;
+       __le32 Flags;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_query_info_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 OutputBufferOffset;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_set_info_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 33 */
+       __u8   InfoType;
+       __u8   FileInfoClass;
+       __le32 BufferLength;
+       __le16 BufferOffset;
+       __u16  Reserved;
+       __le32 AdditionalInformation;
+       __le64  PersistentFileId;
+       __le64  VolatileFileId;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_set_info_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 2 */
+} __packed;
+
+/* FILE Info response size */
+#define FILE_DIRECTORY_INFORMATION_SIZE       1
+#define FILE_FULL_DIRECTORY_INFORMATION_SIZE  2
+#define FILE_BOTH_DIRECTORY_INFORMATION_SIZE  3
+#define FILE_BASIC_INFORMATION_SIZE           40
+#define FILE_STANDARD_INFORMATION_SIZE        24
+#define FILE_INTERNAL_INFORMATION_SIZE        8
+#define FILE_EA_INFORMATION_SIZE              4
+#define FILE_ACCESS_INFORMATION_SIZE          4
+#define FILE_NAME_INFORMATION_SIZE            9
+#define FILE_RENAME_INFORMATION_SIZE          10
+#define FILE_LINK_INFORMATION_SIZE            11
+#define FILE_NAMES_INFORMATION_SIZE           12
+#define FILE_DISPOSITION_INFORMATION_SIZE     13
+#define FILE_POSITION_INFORMATION_SIZE        14
+#define FILE_FULL_EA_INFORMATION_SIZE         15
+#define FILE_MODE_INFORMATION_SIZE            4
+#define FILE_ALIGNMENT_INFORMATION_SIZE       4
+#define FILE_ALL_INFORMATION_SIZE             104
+#define FILE_ALLOCATION_INFORMATION_SIZE      19
+#define FILE_END_OF_FILE_INFORMATION_SIZE     20
+#define FILE_ALTERNATE_NAME_INFORMATION_SIZE  8
+#define FILE_STREAM_INFORMATION_SIZE          32
+#define FILE_PIPE_INFORMATION_SIZE            23
+#define FILE_PIPE_LOCAL_INFORMATION_SIZE      24
+#define FILE_PIPE_REMOTE_INFORMATION_SIZE     25
+#define FILE_MAILSLOT_QUERY_INFORMATION_SIZE  26
+#define FILE_MAILSLOT_SET_INFORMATION_SIZE    27
+#define FILE_COMPRESSION_INFORMATION_SIZE     16
+#define FILE_OBJECT_ID_INFORMATION_SIZE       29
+/* Number 30 not defined in documents */
+#define FILE_MOVE_CLUSTER_INFORMATION_SIZE    31
+#define FILE_QUOTA_INFORMATION_SIZE           32
+#define FILE_REPARSE_POINT_INFORMATION_SIZE   33
+#define FILE_NETWORK_OPEN_INFORMATION_SIZE    56
+#define FILE_ATTRIBUTE_TAG_INFORMATION_SIZE   8
+
+/* FS Info response  size */
+#define FS_DEVICE_INFORMATION_SIZE     8
+#define FS_ATTRIBUTE_INFORMATION_SIZE  16
+#define FS_VOLUME_INFORMATION_SIZE     24
+#define FS_SIZE_INFORMATION_SIZE       24
+#define FS_FULL_SIZE_INFORMATION_SIZE  32
+#define FS_SECTOR_SIZE_INFORMATION_SIZE 28
+#define FS_OBJECT_ID_INFORMATION_SIZE 64
+#define FS_CONTROL_INFORMATION_SIZE 48
+#define FS_POSIX_INFORMATION_SIZE 56
+
+/* FS_ATTRIBUTE_File_System_Name */
+#define FS_TYPE_SUPPORT_SIZE   44
+struct fs_type_info {
+       char            *fs_name;
+       long            magic_number;
+} __packed;
+
+struct smb2_oplock_break {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 24 */
+       __u8   OplockLevel;
+       __u8   Reserved;
+       __le32 Reserved2;
+       __le64  PersistentFid;
+       __le64  VolatileFid;
+} __packed;
+
+#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
+
+struct smb2_lease_break {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 44 */
+       __le16 Epoch;
+       __le32 Flags;
+       __u8   LeaseKey[16];
+       __le32 CurrentLeaseState;
+       __le32 NewLeaseState;
+       __le32 BreakReason;
+       __le32 AccessMaskHint;
+       __le32 ShareMaskHint;
+} __packed;
+
+struct smb2_lease_ack {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 36 */
+       __le16 Reserved;
+       __le32 Flags;
+       __u8   LeaseKey[16];
+       __le32 LeaseState;
+       __le64 LeaseDuration;
+} __packed;
+
+/*
+ *     PDU infolevel structure definitions
+ *     BB consider moving to a different header
+ */
+
+/* File System Information Classes */
+#define FS_VOLUME_INFORMATION          1 /* Query */
+#define FS_LABEL_INFORMATION           2 /* Set */
+#define FS_SIZE_INFORMATION            3 /* Query */
+#define FS_DEVICE_INFORMATION          4 /* Query */
+#define FS_ATTRIBUTE_INFORMATION       5 /* Query */
+#define FS_CONTROL_INFORMATION         6 /* Query, Set */
+#define FS_FULL_SIZE_INFORMATION       7 /* Query */
+#define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
+#define FS_DRIVER_PATH_INFORMATION     9 /* Query */
+#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
+#define FS_POSIX_INFORMATION           100 /* SMB3.1.1 POSIX. Query */
+
+struct smb2_fs_full_size_info {
+       __le64 TotalAllocationUnits;
+       __le64 CallerAvailableAllocationUnits;
+       __le64 ActualAvailableAllocationUnits;
+       __le32 SectorsPerAllocationUnit;
+       __le32 BytesPerSector;
+} __packed;
+
+#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
+#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
+#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
+
+/* sector size info struct */
+struct smb3_fs_ss_info {
+       __le32 LogicalBytesPerSector;
+       __le32 PhysicalBytesPerSectorForAtomicity;
+       __le32 PhysicalBytesPerSectorForPerf;
+       __le32 FSEffPhysicalBytesPerSectorForAtomicity;
+       __le32 Flags;
+       __le32 ByteOffsetForSectorAlignment;
+       __le32 ByteOffsetForPartitionAlignment;
+} __packed;
+
+/* File System Control Information */
+struct smb2_fs_control_info {
+       __le64 FreeSpaceStartFiltering;
+       __le64 FreeSpaceThreshold;
+       __le64 FreeSpaceStopFiltering;
+       __le64 DefaultQuotaThreshold;
+       __le64 DefaultQuotaLimit;
+       __le32 FileSystemControlFlags;
+       __le32 Padding;
+} __packed;
+
+/* partial list of QUERY INFO levels */
+#define FILE_DIRECTORY_INFORMATION     1
+#define FILE_FULL_DIRECTORY_INFORMATION 2
+#define FILE_BOTH_DIRECTORY_INFORMATION 3
+#define FILE_BASIC_INFORMATION         4
+#define FILE_STANDARD_INFORMATION      5
+#define FILE_INTERNAL_INFORMATION      6
+#define FILE_EA_INFORMATION            7
+#define FILE_ACCESS_INFORMATION                8
+#define FILE_NAME_INFORMATION          9
+#define FILE_RENAME_INFORMATION                10
+#define FILE_LINK_INFORMATION          11
+#define FILE_NAMES_INFORMATION         12
+#define FILE_DISPOSITION_INFORMATION   13
+#define FILE_POSITION_INFORMATION      14
+#define FILE_FULL_EA_INFORMATION       15
+#define FILE_MODE_INFORMATION          16
+#define FILE_ALIGNMENT_INFORMATION     17
+#define FILE_ALL_INFORMATION           18
+#define FILE_ALLOCATION_INFORMATION    19
+#define FILE_END_OF_FILE_INFORMATION   20
+#define FILE_ALTERNATE_NAME_INFORMATION 21
+#define FILE_STREAM_INFORMATION                22
+#define FILE_PIPE_INFORMATION          23
+#define FILE_PIPE_LOCAL_INFORMATION    24
+#define FILE_PIPE_REMOTE_INFORMATION   25
+#define FILE_MAILSLOT_QUERY_INFORMATION 26
+#define FILE_MAILSLOT_SET_INFORMATION  27
+#define FILE_COMPRESSION_INFORMATION   28
+#define FILE_OBJECT_ID_INFORMATION     29
+/* Number 30 not defined in documents */
+#define FILE_MOVE_CLUSTER_INFORMATION  31
+#define FILE_QUOTA_INFORMATION         32
+#define FILE_REPARSE_POINT_INFORMATION 33
+#define FILE_NETWORK_OPEN_INFORMATION  34
+#define FILE_ATTRIBUTE_TAG_INFORMATION 35
+#define FILE_TRACKING_INFORMATION      36
+#define FILEID_BOTH_DIRECTORY_INFORMATION 37
+#define FILEID_FULL_DIRECTORY_INFORMATION 38
+#define FILE_VALID_DATA_LENGTH_INFORMATION 39
+#define FILE_SHORT_NAME_INFORMATION    40
+#define FILE_SFIO_RESERVE_INFORMATION  44
+#define FILE_SFIO_VOLUME_INFORMATION   45
+#define FILE_HARD_LINK_INFORMATION     46
+#define FILE_NORMALIZED_NAME_INFORMATION 48
+#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
+#define FILE_STANDARD_LINK_INFORMATION 54
+
+#define OP_BREAK_STRUCT_SIZE_20                24
+#define OP_BREAK_STRUCT_SIZE_21                36
+
+struct smb2_file_access_info {
+       __le32 AccessFlags;
+} __packed;
+
+struct smb2_file_alignment_info {
+       __le32 AlignmentRequirement;
+} __packed;
+
+struct smb2_file_internal_info {
+       __le64 IndexNumber;
+} __packed; /* level 6 Query */
+
+struct smb2_file_rename_info { /* encoding of request for level 10 */
+       __u8   ReplaceIfExists; /* 1 = replace existing target with new */
+                               /* 0 = fail if target already exists */
+       __u8   Reserved[7];
+       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+       __le32 FileNameLength;
+       char   FileName[0];     /* New name to be assigned */
+} __packed; /* level 10 Set */
+
+struct smb2_file_link_info { /* encoding of request for level 11 */
+       __u8   ReplaceIfExists; /* 1 = replace existing link with new */
+                               /* 0 = fail if link already exists */
+       __u8   Reserved[7];
+       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+       __le32 FileNameLength;
+       char   FileName[0];     /* Name to be assigned to new link */
+} __packed; /* level 11 Set */
+
+/*
+ * This level 18, although with struct with same name is different from cifs
+ * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
+ * CurrentByteOffset.
+ */
+struct smb2_file_all_info { /* data block encoding of response to level 18 */
+       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le32 Attributes;
+       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
+       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
+       __le64 EndOfFile;       /* size ie offset to first free byte in file */
+       __le32 NumberOfLinks;   /* hard links */
+       __u8   DeletePending;
+       __u8   Directory;
+       __u16  Pad2;            /* End of FILE_STANDARD_INFO equivalent */
+       __le64 IndexNumber;
+       __le32 EASize;
+       __le32 AccessFlags;
+       __le64 CurrentByteOffset;
+       __le32 Mode;
+       __le32 AlignmentRequirement;
+       __le32 FileNameLength;
+       char   FileName[1];
+} __packed; /* level 18 Query */
+
+struct smb2_file_alt_name_info {
+       __le32 FileNameLength;
+       char FileName[0];
+} __packed;
+
+struct smb2_file_stream_info {
+       __le32  NextEntryOffset;
+       __le32  StreamNameLength;
+       __le64 StreamSize;
+       __le64 StreamAllocationSize;
+       char   StreamName[0];
+} __packed;
+
+struct smb2_file_eof_info { /* encoding of request for level 10 */
+       __le64 EndOfFile; /* new end of file value */
+} __packed; /* level 20 Set */
+
+struct smb2_file_ntwrk_info {
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 AllocationSize;
+       __le64 EndOfFile;
+       __le32 Attributes;
+       __le32 Reserved;
+} __packed;
+
+struct smb2_file_standard_info {
+       __le64 AllocationSize;
+       __le64 EndOfFile;
+       __le32 NumberOfLinks;   /* hard links */
+       __u8   DeletePending;
+       __u8   Directory;
+       __le16 Reserved;
+} __packed; /* level 18 Query */
+
+struct smb2_file_ea_info {
+       __le32 EASize;
+} __packed;
+
+struct smb2_file_alloc_info {
+       __le64 AllocationSize;
+} __packed;
+
+struct smb2_file_disposition_info {
+       __u8 DeletePending;
+} __packed;
+
+struct smb2_file_pos_info {
+       __le64 CurrentByteOffset;
+} __packed;
+
+#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000103e)
+
+struct smb2_file_mode_info {
+       __le32 Mode;
+} __packed;
+
+#define COMPRESSION_FORMAT_NONE 0x0000
+#define COMPRESSION_FORMAT_LZNT1 0x0002
+
+struct smb2_file_comp_info {
+       __le64 CompressedFileSize;
+       __le16 CompressionFormat;
+       __u8 CompressionUnitShift;
+       __u8 ChunkShift;
+       __u8 ClusterShift;
+       __u8 Reserved[3];
+} __packed;
+
+struct smb2_file_attr_tag_info {
+       __le32 FileAttributes;
+       __le32 ReparseTag;
+} __packed;
+
+#define SL_RESTART_SCAN        0x00000001
+#define SL_RETURN_SINGLE_ENTRY 0x00000002
+#define SL_INDEX_SPECIFIED     0x00000004
+
+struct smb2_ea_info_req {
+       __le32 NextEntryOffset;
+       __u8   EaNameLength;
+       char name[1];
+} __packed; /* level 15 Query */
+
+struct smb2_ea_info {
+       __le32 NextEntryOffset;
+       __u8   Flags;
+       __u8   EaNameLength;
+       __le16 EaValueLength;
+       char name[1];
+       /* optionally followed by value */
+} __packed; /* level 15 Query */
+
+struct create_ea_buf_req {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct smb2_ea_info ea;
+} __packed;
+
+struct create_sd_buf_req {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct smb_ntsd ntsd;
+} __packed;
+
+/* Find File infolevels */
+#define SMB_FIND_FILE_POSIX_INFO       0x064
+
+/* Level 100 query info */
+struct smb311_posix_qinfo {
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 DosAttributes;
+       __le64 Inode;
+       __le32 DeviceId;
+       __le32 Zero;
+       /* beginning of POSIX Create Context Response */
+       __le32 HardLinks;
+       __le32 ReparseTag;
+       __le32 Mode;
+       u8     Sids[];
+       /*
+        * var sized owner SID
+        * var sized group SID
+        * le32 filenamelength
+        * u8  filename[]
+        */
+} __packed;
+
+struct smb2_posix_info {
+       __le32 NextEntryOffset;
+       __u32 Ignored;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 DosAttributes;
+       __le64 Inode;
+       __le32 DeviceId;
+       __le32 Zero;
+       /* beginning of POSIX Create Context Response */
+       __le32 HardLinks;
+       __le32 ReparseTag;
+       __le32 Mode;
+       u8 SidBuffer[40];
+       __le32 name_len;
+       u8 name[1];
+       /*
+        * var sized owner SID
+        * var sized group SID
+        * le32 filenamelength
+        * u8  filename[]
+        */
+} __packed;
+
+/* functions */
+int init_smb2_0_server(struct ksmbd_conn *conn);
+void init_smb2_1_server(struct ksmbd_conn *conn);
+void init_smb3_0_server(struct ksmbd_conn *conn);
+void init_smb3_02_server(struct ksmbd_conn *conn);
+int init_smb3_11_server(struct ksmbd_conn *conn);
+
+void init_smb2_max_read_size(unsigned int sz);
+void init_smb2_max_write_size(unsigned int sz);
+void init_smb2_max_trans_size(unsigned int sz);
+
+bool is_smb2_neg_cmd(struct ksmbd_work *work);
+bool is_smb2_rsp(struct ksmbd_work *work);
+
+u16 get_smb2_cmd_val(struct ksmbd_work *work);
+void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
+int init_smb2_rsp_hdr(struct ksmbd_work *work);
+int smb2_allocate_rsp_buf(struct ksmbd_work *work);
+bool is_chained_smb2_message(struct ksmbd_work *work);
+int init_smb2_neg_rsp(struct ksmbd_work *work);
+void smb2_set_err_rsp(struct ksmbd_work *work);
+int smb2_check_user_session(struct ksmbd_work *work);
+int smb2_get_ksmbd_tcon(struct ksmbd_work *work);
+bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command);
+int smb2_check_sign_req(struct ksmbd_work *work);
+void smb2_set_sign_rsp(struct ksmbd_work *work);
+int smb3_check_sign_req(struct ksmbd_work *work);
+void smb3_set_sign_rsp(struct ksmbd_work *work);
+int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects,
+                              __le16 dialects_count);
+struct file_lock *smb_flock_init(struct file *f);
+int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
+                    void **arg);
+void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
+struct channel *lookup_chann_list(struct ksmbd_session *sess,
+                                 struct ksmbd_conn *conn);
+void smb3_preauth_hash_rsp(struct ksmbd_work *work);
+bool smb3_is_transform_hdr(void *buf);
+int smb3_decrypt_req(struct ksmbd_work *work);
+int smb3_encrypt_resp(struct ksmbd_work *work);
+bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work);
+int smb2_set_rsp_credits(struct ksmbd_work *work);
+
+/* smb2 misc functions */
+int ksmbd_smb2_check_message(struct ksmbd_work *work);
+
+/* smb2 command handlers */
+int smb2_handle_negotiate(struct ksmbd_work *work);
+int smb2_negotiate_request(struct ksmbd_work *work);
+int smb2_sess_setup(struct ksmbd_work *work);
+int smb2_tree_connect(struct ksmbd_work *work);
+int smb2_tree_disconnect(struct ksmbd_work *work);
+int smb2_session_logoff(struct ksmbd_work *work);
+int smb2_open(struct ksmbd_work *work);
+int smb2_query_info(struct ksmbd_work *work);
+int smb2_query_dir(struct ksmbd_work *work);
+int smb2_close(struct ksmbd_work *work);
+int smb2_echo(struct ksmbd_work *work);
+int smb2_set_info(struct ksmbd_work *work);
+int smb2_read(struct ksmbd_work *work);
+int smb2_write(struct ksmbd_work *work);
+int smb2_flush(struct ksmbd_work *work);
+int smb2_cancel(struct ksmbd_work *work);
+int smb2_lock(struct ksmbd_work *work);
+int smb2_ioctl(struct ksmbd_work *work);
+int smb2_oplock_break(struct ksmbd_work *work);
+int smb2_notify(struct ksmbd_work *ksmbd_work);
+
+#endif /* _SMB2PDU_H */
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
new file mode 100644 (file)
index 0000000..b108b91
--- /dev/null
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ *   Copyright (C) 2018 Namjae Jeon <linkinjeon@kernel.org>
+ */
+
+#include "smb_common.h"
+#include "server.h"
+#include "misc.h"
+#include "smbstatus.h"
+#include "connection.h"
+#include "ksmbd_work.h"
+#include "mgmt/user_session.h"
+#include "mgmt/user_config.h"
+#include "mgmt/tree_connect.h"
+#include "mgmt/share_config.h"
+
+/*for shortname implementation */
+static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
+#define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1)
+#define MAGIC_CHAR '~'
+#define PERIOD '.'
+#define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
+#define KSMBD_MIN_SUPPORTED_HEADER_SIZE        (sizeof(struct smb2_hdr))
+
+struct smb_protocol {
+       int             index;
+       char            *name;
+       char            *prot;
+       __u16           prot_id;
+};
+
+static struct smb_protocol smb1_protos[] = {
+       {
+               SMB21_PROT,
+               "\2SMB 2.1",
+               "SMB2_10",
+               SMB21_PROT_ID
+       },
+       {
+               SMB2X_PROT,
+               "\2SMB 2.???",
+               "SMB2_22",
+               SMB2X_PROT_ID
+       },
+};
+
+static struct smb_protocol smb2_protos[] = {
+       {
+               SMB21_PROT,
+               "\2SMB 2.1",
+               "SMB2_10",
+               SMB21_PROT_ID
+       },
+       {
+               SMB30_PROT,
+               "\2SMB 3.0",
+               "SMB3_00",
+               SMB30_PROT_ID
+       },
+       {
+               SMB302_PROT,
+               "\2SMB 3.02",
+               "SMB3_02",
+               SMB302_PROT_ID
+       },
+       {
+               SMB311_PROT,
+               "\2SMB 3.1.1",
+               "SMB3_11",
+               SMB311_PROT_ID
+       },
+};
+
+unsigned int ksmbd_server_side_copy_max_chunk_count(void)
+{
+       return 256;
+}
+
+unsigned int ksmbd_server_side_copy_max_chunk_size(void)
+{
+       return (2U << 30) - 1;
+}
+
+unsigned int ksmbd_server_side_copy_max_total_size(void)
+{
+       return (2U << 30) - 1;
+}
+
+inline int ksmbd_min_protocol(void)
+{
+       return SMB2_PROT;
+}
+
+inline int ksmbd_max_protocol(void)
+{
+       return SMB311_PROT;
+}
+
+int ksmbd_lookup_protocol_idx(char *str)
+{
+       int offt = ARRAY_SIZE(smb1_protos) - 1;
+       int len = strlen(str);
+
+       while (offt >= 0) {
+               if (!strncmp(str, smb1_protos[offt].prot, len)) {
+                       ksmbd_debug(SMB, "selected %s dialect idx = %d\n",
+                                   smb1_protos[offt].prot, offt);
+                       return smb1_protos[offt].index;
+               }
+               offt--;
+       }
+
+       offt = ARRAY_SIZE(smb2_protos) - 1;
+       while (offt >= 0) {
+               if (!strncmp(str, smb2_protos[offt].prot, len)) {
+                       ksmbd_debug(SMB, "selected %s dialect idx = %d\n",
+                                   smb2_protos[offt].prot, offt);
+                       return smb2_protos[offt].index;
+               }
+               offt--;
+       }
+       return -1;
+}
+
+/**
+ * ksmbd_verify_smb_message() - check for valid smb2 request header
+ * @work:      smb work
+ *
+ * check for valid smb signature and packet direction(request/response)
+ *
+ * Return:      0 on success, otherwise 1
+ */
+int ksmbd_verify_smb_message(struct ksmbd_work *work)
+{
+       struct smb2_hdr *smb2_hdr = work->request_buf;
+
+       if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
+               return ksmbd_smb2_check_message(work);
+
+       return 0;
+}
+
+/**
+ * ksmbd_smb_request() - check for valid smb request type
+ * @conn:      connection instance
+ *
+ * Return:      true on success, otherwise false
+ */
+bool ksmbd_smb_request(struct ksmbd_conn *conn)
+{
+       int type = *(char *)conn->request_buf;
+
+       switch (type) {
+       case RFC1002_SESSION_MESSAGE:
+               /* Regular SMB request */
+               return true;
+       case RFC1002_SESSION_KEEP_ALIVE:
+               ksmbd_debug(SMB, "RFC 1002 session keep alive\n");
+               break;
+       default:
+               ksmbd_debug(SMB, "RFC 1002 unknown request type 0x%x\n", type);
+       }
+
+       return false;
+}
+
+static bool supported_protocol(int idx)
+{
+       if (idx == SMB2X_PROT &&
+           (server_conf.min_protocol >= SMB21_PROT ||
+            server_conf.max_protocol <= SMB311_PROT))
+               return true;
+
+       return (server_conf.min_protocol <= idx &&
+               idx <= server_conf.max_protocol);
+}
+
+static char *next_dialect(char *dialect, int *next_off)
+{
+       dialect = dialect + *next_off;
+       *next_off = strlen(dialect);
+       return dialect;
+}
+
+static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count)
+{
+       int i, seq_num, bcount, next;
+       char *dialect;
+
+       for (i = ARRAY_SIZE(smb1_protos) - 1; i >= 0; i--) {
+               seq_num = 0;
+               next = 0;
+               dialect = cli_dialects;
+               bcount = le16_to_cpu(byte_count);
+               do {
+                       dialect = next_dialect(dialect, &next);
+                       ksmbd_debug(SMB, "client requested dialect %s\n",
+                                   dialect);
+                       if (!strcmp(dialect, smb1_protos[i].name)) {
+                               if (supported_protocol(smb1_protos[i].index)) {
+                                       ksmbd_debug(SMB,
+                                                   "selected %s dialect\n",
+                                                   smb1_protos[i].name);
+                                       if (smb1_protos[i].index == SMB1_PROT)
+                                               return seq_num;
+                                       return smb1_protos[i].prot_id;
+                               }
+                       }
+                       seq_num++;
+                       bcount -= (++next);
+               } while (bcount > 0);
+       }
+
+       return BAD_PROT_ID;
+}
+
+int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
+{
+       int i;
+       int count;
+
+       for (i = ARRAY_SIZE(smb2_protos) - 1; i >= 0; i--) {
+               count = le16_to_cpu(dialects_count);
+               while (--count >= 0) {
+                       ksmbd_debug(SMB, "client requested dialect 0x%x\n",
+                                   le16_to_cpu(cli_dialects[count]));
+                       if (le16_to_cpu(cli_dialects[count]) !=
+                                       smb2_protos[i].prot_id)
+                               continue;
+
+                       if (supported_protocol(smb2_protos[i].index)) {
+                               ksmbd_debug(SMB, "selected %s dialect\n",
+                                           smb2_protos[i].name);
+                               return smb2_protos[i].prot_id;
+                       }
+               }
+       }
+
+       return BAD_PROT_ID;
+}
+
+static int ksmbd_negotiate_smb_dialect(void *buf)
+{
+       __le32 proto;
+
+       proto = ((struct smb2_hdr *)buf)->ProtocolId;
+       if (proto == SMB2_PROTO_NUMBER) {
+               struct smb2_negotiate_req *req;
+
+               req = (struct smb2_negotiate_req *)buf;
+               return ksmbd_lookup_dialect_by_id(req->Dialects,
+                                                 req->DialectCount);
+       }
+
+       proto = *(__le32 *)((struct smb_hdr *)buf)->Protocol;
+       if (proto == SMB1_PROTO_NUMBER) {
+               struct smb_negotiate_req *req;
+
+               req = (struct smb_negotiate_req *)buf;
+               return ksmbd_lookup_dialect_by_name(req->DialectsArray,
+                                                   req->ByteCount);
+       }
+
+       return BAD_PROT_ID;
+}
+
+#define SMB_COM_NEGOTIATE      0x72
+int ksmbd_init_smb_server(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+
+       if (conn->need_neg == false)
+               return 0;
+
+       init_smb3_11_server(conn);
+
+       if (conn->ops->get_cmd_val(work) != SMB_COM_NEGOTIATE)
+               conn->need_neg = false;
+       return 0;
+}
+
+bool ksmbd_pdu_size_has_room(unsigned int pdu)
+{
+       return (pdu >= KSMBD_MIN_SUPPORTED_HEADER_SIZE - 4);
+}
+
+int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
+                                     struct ksmbd_file *dir,
+                                     struct ksmbd_dir_info *d_info,
+                                     char *search_pattern,
+                                     int (*fn)(struct ksmbd_conn *, int,
+                                               struct ksmbd_dir_info *,
+                                               struct user_namespace *,
+                                               struct ksmbd_kstat *))
+{
+       int i, rc = 0;
+       struct ksmbd_conn *conn = work->conn;
+       struct user_namespace *user_ns = file_mnt_user_ns(dir->filp);
+
+       for (i = 0; i < 2; i++) {
+               struct kstat kstat;
+               struct ksmbd_kstat ksmbd_kstat;
+
+               if (!dir->dot_dotdot[i]) { /* fill dot entry info */
+                       if (i == 0) {
+                               d_info->name = ".";
+                               d_info->name_len = 1;
+                       } else {
+                               d_info->name = "..";
+                               d_info->name_len = 2;
+                       }
+
+                       if (!match_pattern(d_info->name, d_info->name_len,
+                                          search_pattern)) {
+                               dir->dot_dotdot[i] = 1;
+                               continue;
+                       }
+
+                       ksmbd_kstat.kstat = &kstat;
+                       ksmbd_vfs_fill_dentry_attrs(work,
+                                                   user_ns,
+                                                   dir->filp->f_path.dentry->d_parent,
+                                                   &ksmbd_kstat);
+                       rc = fn(conn, info_level, d_info,
+                               user_ns, &ksmbd_kstat);
+                       if (rc)
+                               break;
+                       if (d_info->out_buf_len <= 0)
+                               break;
+
+                       dir->dot_dotdot[i] = 1;
+                       if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY) {
+                               d_info->out_buf_len = 0;
+                               break;
+                       }
+               }
+       }
+
+       return rc;
+}
+
+/**
+ * ksmbd_extract_shortname() - get shortname from long filename
+ * @conn:      connection instance
+ * @longname:  source long filename
+ * @shortname: destination short filename
+ *
+ * Return:     shortname length or 0 when source long name is '.' or '..'
+ * TODO: Though this function comforms the restriction of 8.3 Filename spec,
+ * but the result is different with Windows 7's one. need to check.
+ */
+int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
+                           char *shortname)
+{
+       const char *p;
+       char base[9], extension[4];
+       char out[13] = {0};
+       int baselen = 0;
+       int extlen = 0, len = 0;
+       unsigned int csum = 0;
+       const unsigned char *ptr;
+       bool dot_present = true;
+
+       p = longname;
+       if ((*p == '.') || (!(strcmp(p, "..")))) {
+               /*no mangling required */
+               return 0;
+       }
+
+       p = strrchr(longname, '.');
+       if (p == longname) { /*name starts with a dot*/
+               strscpy(extension, "___", strlen("___"));
+       } else {
+               if (p) {
+                       p++;
+                       while (*p && extlen < 3) {
+                               if (*p != '.')
+                                       extension[extlen++] = toupper(*p);
+                               p++;
+                       }
+                       extension[extlen] = '\0';
+               } else {
+                       dot_present = false;
+               }
+       }
+
+       p = longname;
+       if (*p == '.') {
+               p++;
+               longname++;
+       }
+       while (*p && (baselen < 5)) {
+               if (*p != '.')
+                       base[baselen++] = toupper(*p);
+               p++;
+       }
+
+       base[baselen] = MAGIC_CHAR;
+       memcpy(out, base, baselen + 1);
+
+       ptr = longname;
+       len = strlen(longname);
+       for (; len > 0; len--, ptr++)
+               csum += *ptr;
+
+       csum = csum % (MANGLE_BASE * MANGLE_BASE);
+       out[baselen + 1] = mangle(csum / MANGLE_BASE);
+       out[baselen + 2] = mangle(csum);
+       out[baselen + 3] = PERIOD;
+
+       if (dot_present)
+               memcpy(&out[baselen + 4], extension, 4);
+       else
+               out[baselen + 4] = '\0';
+       smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX,
+                         conn->local_nls, 0);
+       len = strlen(out) * 2;
+       return len;
+}
+
+static int __smb2_negotiate(struct ksmbd_conn *conn)
+{
+       return (conn->dialect >= SMB20_PROT_ID &&
+               conn->dialect <= SMB311_PROT_ID);
+}
+
+static int smb_handle_negotiate(struct ksmbd_work *work)
+{
+       struct smb_negotiate_rsp *neg_rsp = work->response_buf;
+
+       ksmbd_debug(SMB, "Unsupported SMB protocol\n");
+       neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
+       return -EINVAL;
+}
+
+int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+{
+       struct ksmbd_conn *conn = work->conn;
+       int ret;
+
+       conn->dialect = ksmbd_negotiate_smb_dialect(work->request_buf);
+       ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+
+       if (command == SMB2_NEGOTIATE_HE) {
+               struct smb2_hdr *smb2_hdr = work->request_buf;
+
+               if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) {
+                       ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n");
+                       command = SMB_COM_NEGOTIATE;
+               }
+       }
+
+       if (command == SMB2_NEGOTIATE_HE) {
+               ret = smb2_handle_negotiate(work);
+               init_smb2_neg_rsp(work);
+               return ret;
+       }
+
+       if (command == SMB_COM_NEGOTIATE) {
+               if (__smb2_negotiate(conn)) {
+                       conn->need_neg = true;
+                       init_smb3_11_server(conn);
+                       init_smb2_neg_rsp(work);
+                       ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n");
+                       return 0;
+               }
+               return smb_handle_negotiate(work);
+       }
+
+       pr_err("Unknown SMB negotiation command: %u\n", command);
+       return -EINVAL;
+}
+
+enum SHARED_MODE_ERRORS {
+       SHARE_DELETE_ERROR,
+       SHARE_READ_ERROR,
+       SHARE_WRITE_ERROR,
+       FILE_READ_ERROR,
+       FILE_WRITE_ERROR,
+       FILE_DELETE_ERROR,
+};
+
+static const char * const shared_mode_errors[] = {
+       "Current access mode does not permit SHARE_DELETE",
+       "Current access mode does not permit SHARE_READ",
+       "Current access mode does not permit SHARE_WRITE",
+       "Desired access mode does not permit FILE_READ",
+       "Desired access mode does not permit FILE_WRITE",
+       "Desired access mode does not permit FILE_DELETE",
+};
+
+static void smb_shared_mode_error(int error, struct ksmbd_file *prev_fp,
+                                 struct ksmbd_file *curr_fp)
+{
+       ksmbd_debug(SMB, "%s\n", shared_mode_errors[error]);
+       ksmbd_debug(SMB, "Current mode: 0x%x Desired mode: 0x%x\n",
+                   prev_fp->saccess, curr_fp->daccess);
+}
+
+int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
+{
+       int rc = 0;
+       struct ksmbd_file *prev_fp;
+
+       /*
+        * Lookup fp in master fp list, and check desired access and
+        * shared mode between previous open and current open.
+        */
+       read_lock(&curr_fp->f_ci->m_lock);
+       list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) {
+               if (file_inode(filp) != file_inode(prev_fp->filp))
+                       continue;
+
+               if (filp == prev_fp->filp)
+                       continue;
+
+               if (ksmbd_stream_fd(prev_fp) && ksmbd_stream_fd(curr_fp))
+                       if (strcmp(prev_fp->stream.name, curr_fp->stream.name))
+                               continue;
+
+               if (prev_fp->attrib_only != curr_fp->attrib_only)
+                       continue;
+
+               if (!(prev_fp->saccess & FILE_SHARE_DELETE_LE) &&
+                   curr_fp->daccess & FILE_DELETE_LE) {
+                       smb_shared_mode_error(SHARE_DELETE_ERROR,
+                                             prev_fp,
+                                             curr_fp);
+                       rc = -EPERM;
+                       break;
+               }
+
+               /*
+                * Only check FILE_SHARE_DELETE if stream opened and
+                * normal file opened.
+                */
+               if (ksmbd_stream_fd(prev_fp) && !ksmbd_stream_fd(curr_fp))
+                       continue;
+
+               if (!(prev_fp->saccess & FILE_SHARE_READ_LE) &&
+                   curr_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE)) {
+                       smb_shared_mode_error(SHARE_READ_ERROR,
+                                             prev_fp,
+                                             curr_fp);
+                       rc = -EPERM;
+                       break;
+               }
+
+               if (!(prev_fp->saccess & FILE_SHARE_WRITE_LE) &&
+                   curr_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) {
+                       smb_shared_mode_error(SHARE_WRITE_ERROR,
+                                             prev_fp,
+                                             curr_fp);
+                       rc = -EPERM;
+                       break;
+               }
+
+               if (prev_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE) &&
+                   !(curr_fp->saccess & FILE_SHARE_READ_LE)) {
+                       smb_shared_mode_error(FILE_READ_ERROR,
+                                             prev_fp,
+                                             curr_fp);
+                       rc = -EPERM;
+                       break;
+               }
+
+               if (prev_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE) &&
+                   !(curr_fp->saccess & FILE_SHARE_WRITE_LE)) {
+                       smb_shared_mode_error(FILE_WRITE_ERROR,
+                                             prev_fp,
+                                             curr_fp);
+                       rc = -EPERM;
+                       break;
+               }
+
+               if (prev_fp->daccess & FILE_DELETE_LE &&
+                   !(curr_fp->saccess & FILE_SHARE_DELETE_LE)) {
+                       smb_shared_mode_error(FILE_DELETE_ERROR,
+                                             prev_fp,
+                                             curr_fp);
+                       rc = -EPERM;
+                       break;
+               }
+       }
+       read_unlock(&curr_fp->f_ci->m_lock);
+
+       return rc;
+}
+
+bool is_asterisk(char *p)
+{
+       return p && p[0] == '*';
+}
+
+int ksmbd_override_fsids(struct ksmbd_work *work)
+{
+       struct ksmbd_session *sess = work->sess;
+       struct ksmbd_share_config *share = work->tcon->share_conf;
+       struct cred *cred;
+       struct group_info *gi;
+       unsigned int uid;
+       unsigned int gid;
+
+       uid = user_uid(sess->user);
+       gid = user_gid(sess->user);
+       if (share->force_uid != KSMBD_SHARE_INVALID_UID)
+               uid = share->force_uid;
+       if (share->force_gid != KSMBD_SHARE_INVALID_GID)
+               gid = share->force_gid;
+
+       cred = prepare_kernel_cred(NULL);
+       if (!cred)
+               return -ENOMEM;
+
+       cred->fsuid = make_kuid(current_user_ns(), uid);
+       cred->fsgid = make_kgid(current_user_ns(), gid);
+
+       gi = groups_alloc(0);
+       if (!gi) {
+               abort_creds(cred);
+               return -ENOMEM;
+       }
+       set_groups(cred, gi);
+       put_group_info(gi);
+
+       if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
+               cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
+
+       WARN_ON(work->saved_cred);
+       work->saved_cred = override_creds(cred);
+       if (!work->saved_cred) {
+               abort_creds(cred);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+void ksmbd_revert_fsids(struct ksmbd_work *work)
+{
+       const struct cred *cred;
+
+       WARN_ON(!work->saved_cred);
+
+       cred = current_cred();
+       revert_creds(work->saved_cred);
+       put_cred(cred);
+       work->saved_cred = NULL;
+}
+
+__le32 smb_map_generic_desired_access(__le32 daccess)
+{
+       if (daccess & FILE_GENERIC_READ_LE) {
+               daccess |= cpu_to_le32(GENERIC_READ_FLAGS);
+               daccess &= ~FILE_GENERIC_READ_LE;
+       }
+
+       if (daccess & FILE_GENERIC_WRITE_LE) {
+               daccess |= cpu_to_le32(GENERIC_WRITE_FLAGS);
+               daccess &= ~FILE_GENERIC_WRITE_LE;
+       }
+
+       if (daccess & FILE_GENERIC_EXECUTE_LE) {
+               daccess |= cpu_to_le32(GENERIC_EXECUTE_FLAGS);
+               daccess &= ~FILE_GENERIC_EXECUTE_LE;
+       }
+
+       if (daccess & FILE_GENERIC_ALL_LE) {
+               daccess |= cpu_to_le32(GENERIC_ALL_FLAGS);
+               daccess &= ~FILE_GENERIC_ALL_LE;
+       }
+
+       return daccess;
+}
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
new file mode 100644 (file)
index 0000000..eb667d8
--- /dev/null
@@ -0,0 +1,542 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __SMB_COMMON_H__
+#define __SMB_COMMON_H__
+
+#include <linux/kernel.h>
+
+#include "glob.h"
+#include "nterr.h"
+#include "smb2pdu.h"
+
+/* ksmbd's Specific ERRNO */
+#define ESHARE                 50000
+
+#define SMB1_PROT              0
+#define SMB2_PROT              1
+#define SMB21_PROT             2
+/* multi-protocol negotiate request */
+#define SMB2X_PROT             3
+#define SMB30_PROT             4
+#define SMB302_PROT            5
+#define SMB311_PROT            6
+#define BAD_PROT               0xFFFF
+
+#define SMB1_VERSION_STRING    "1.0"
+#define SMB20_VERSION_STRING   "2.0"
+#define SMB21_VERSION_STRING   "2.1"
+#define SMB30_VERSION_STRING   "3.0"
+#define SMB302_VERSION_STRING  "3.02"
+#define SMB311_VERSION_STRING  "3.1.1"
+
+/* Dialects */
+#define SMB10_PROT_ID          0x00
+#define SMB20_PROT_ID          0x0202
+#define SMB21_PROT_ID          0x0210
+/* multi-protocol negotiate request */
+#define SMB2X_PROT_ID          0x02FF
+#define SMB30_PROT_ID          0x0300
+#define SMB302_PROT_ID         0x0302
+#define SMB311_PROT_ID         0x0311
+#define BAD_PROT_ID            0xFFFF
+
+#define SMB_ECHO_INTERVAL      (60 * HZ)
+
+#define CIFS_DEFAULT_IOSIZE    (64 * 1024)
+#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
+
+/* RFC 1002 session packet types */
+#define RFC1002_SESSION_MESSAGE                        0x00
+#define RFC1002_SESSION_REQUEST                        0x81
+#define RFC1002_POSITIVE_SESSION_RESPONSE      0x82
+#define RFC1002_NEGATIVE_SESSION_RESPONSE      0x83
+#define RFC1002_RETARGET_SESSION_RESPONSE      0x84
+#define RFC1002_SESSION_KEEP_ALIVE             0x85
+
+/* Responses when opening a file. */
+#define F_SUPERSEDED   0
+#define F_OPENED       1
+#define F_CREATED      2
+#define F_OVERWRITTEN  3
+
+/*
+ * File Attribute flags
+ */
+#define ATTR_READONLY                  0x0001
+#define ATTR_HIDDEN                    0x0002
+#define ATTR_SYSTEM                    0x0004
+#define ATTR_VOLUME                    0x0008
+#define ATTR_DIRECTORY                 0x0010
+#define ATTR_ARCHIVE                   0x0020
+#define ATTR_DEVICE                    0x0040
+#define ATTR_NORMAL                    0x0080
+#define ATTR_TEMPORARY                 0x0100
+#define ATTR_SPARSE                    0x0200
+#define ATTR_REPARSE                   0x0400
+#define ATTR_COMPRESSED                        0x0800
+#define ATTR_OFFLINE                   0x1000
+#define ATTR_NOT_CONTENT_INDEXED       0x2000
+#define ATTR_ENCRYPTED                 0x4000
+#define ATTR_POSIX_SEMANTICS           0x01000000
+#define ATTR_BACKUP_SEMANTICS          0x02000000
+#define ATTR_DELETE_ON_CLOSE           0x04000000
+#define ATTR_SEQUENTIAL_SCAN           0x08000000
+#define ATTR_RANDOM_ACCESS             0x10000000
+#define ATTR_NO_BUFFERING              0x20000000
+#define ATTR_WRITE_THROUGH             0x80000000
+
+#define ATTR_READONLY_LE               cpu_to_le32(ATTR_READONLY)
+#define ATTR_HIDDEN_LE                 cpu_to_le32(ATTR_HIDDEN)
+#define ATTR_SYSTEM_LE                 cpu_to_le32(ATTR_SYSTEM)
+#define ATTR_DIRECTORY_LE              cpu_to_le32(ATTR_DIRECTORY)
+#define ATTR_ARCHIVE_LE                        cpu_to_le32(ATTR_ARCHIVE)
+#define ATTR_NORMAL_LE                 cpu_to_le32(ATTR_NORMAL)
+#define ATTR_TEMPORARY_LE              cpu_to_le32(ATTR_TEMPORARY)
+#define ATTR_SPARSE_FILE_LE            cpu_to_le32(ATTR_SPARSE)
+#define ATTR_REPARSE_POINT_LE          cpu_to_le32(ATTR_REPARSE)
+#define ATTR_COMPRESSED_LE             cpu_to_le32(ATTR_COMPRESSED)
+#define ATTR_OFFLINE_LE                        cpu_to_le32(ATTR_OFFLINE)
+#define ATTR_NOT_CONTENT_INDEXED_LE    cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
+#define ATTR_ENCRYPTED_LE              cpu_to_le32(ATTR_ENCRYPTED)
+#define ATTR_INTEGRITY_STREAML_LE      cpu_to_le32(0x00008000)
+#define ATTR_NO_SCRUB_DATA_LE          cpu_to_le32(0x00020000)
+#define ATTR_MASK_LE                   cpu_to_le32(0x00007FB7)
+
+/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
+#define FILE_SUPPORTS_SPARSE_VDL       0x10000000 /* faster nonsparse extend */
+#define FILE_SUPPORTS_BLOCK_REFCOUNTING        0x08000000 /* allow ioctl dup extents */
+#define FILE_SUPPORT_INTEGRITY_STREAMS 0x04000000
+#define FILE_SUPPORTS_USN_JOURNAL      0x02000000
+#define FILE_SUPPORTS_OPEN_BY_FILE_ID  0x01000000
+#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
+#define FILE_SUPPORTS_HARD_LINKS       0x00400000
+#define FILE_SUPPORTS_TRANSACTIONS     0x00200000
+#define FILE_SEQUENTIAL_WRITE_ONCE     0x00100000
+#define FILE_READ_ONLY_VOLUME          0x00080000
+#define FILE_NAMED_STREAMS             0x00040000
+#define FILE_SUPPORTS_ENCRYPTION       0x00020000
+#define FILE_SUPPORTS_OBJECT_IDS       0x00010000
+#define FILE_VOLUME_IS_COMPRESSED      0x00008000
+#define FILE_SUPPORTS_REMOTE_STORAGE   0x00000100
+#define FILE_SUPPORTS_REPARSE_POINTS   0x00000080
+#define FILE_SUPPORTS_SPARSE_FILES     0x00000040
+#define FILE_VOLUME_QUOTAS             0x00000020
+#define FILE_FILE_COMPRESSION          0x00000010
+#define FILE_PERSISTENT_ACLS           0x00000008
+#define FILE_UNICODE_ON_DISK           0x00000004
+#define FILE_CASE_PRESERVED_NAMES      0x00000002
+#define FILE_CASE_SENSITIVE_SEARCH     0x00000001
+
+#define FILE_READ_DATA        0x00000001  /* Data can be read from the file   */
+#define FILE_WRITE_DATA       0x00000002  /* Data can be written to the file  */
+#define FILE_APPEND_DATA      0x00000004  /* Data can be appended to the file */
+#define FILE_READ_EA          0x00000008  /* Extended attributes associated   */
+/* with the file can be read        */
+#define FILE_WRITE_EA         0x00000010  /* Extended attributes associated   */
+/* with the file can be written     */
+#define FILE_EXECUTE          0x00000020  /*Data can be read into memory from */
+/* the file using system paging I/O */
+#define FILE_DELETE_CHILD     0x00000040
+#define FILE_READ_ATTRIBUTES  0x00000080  /* Attributes associated with the   */
+/* file can be read                 */
+#define FILE_WRITE_ATTRIBUTES 0x00000100  /* Attributes associated with the   */
+/* file can be written              */
+#define DELETE                0x00010000  /* The file can be deleted          */
+#define READ_CONTROL          0x00020000  /* The access control list and      */
+/* ownership associated with the    */
+/* file can be read                 */
+#define WRITE_DAC             0x00040000  /* The access control list and      */
+/* ownership associated with the    */
+/* file can be written.             */
+#define WRITE_OWNER           0x00080000  /* Ownership information associated */
+/* with the file can be written     */
+#define SYNCHRONIZE           0x00100000  /* The file handle can waited on to */
+/* synchronize with the completion  */
+/* of an input/output request       */
+#define GENERIC_ALL           0x10000000
+#define GENERIC_EXECUTE       0x20000000
+#define GENERIC_WRITE         0x40000000
+#define GENERIC_READ          0x80000000
+/* In summary - Relevant file       */
+/* access flags from CIFS are       */
+/* file_read_data, file_write_data  */
+/* file_execute, file_read_attributes*/
+/* write_dac, and delete.           */
+
+#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES)
+#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
+               | FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
+#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
+
+#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
+               | FILE_READ_ATTRIBUTES \
+               | DELETE | READ_CONTROL | WRITE_DAC \
+               | WRITE_OWNER | SYNCHRONIZE)
+#define SET_FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
+               | FILE_WRITE_EA \
+               | FILE_DELETE_CHILD \
+               | FILE_WRITE_ATTRIBUTES \
+               | DELETE | READ_CONTROL | WRITE_DAC \
+               | WRITE_OWNER | SYNCHRONIZE)
+#define SET_FILE_EXEC_RIGHTS (FILE_READ_EA | FILE_WRITE_EA | FILE_EXECUTE \
+               | FILE_READ_ATTRIBUTES \
+               | FILE_WRITE_ATTRIBUTES \
+               | DELETE | READ_CONTROL | WRITE_DAC \
+               | WRITE_OWNER | SYNCHRONIZE)
+
+#define SET_MINIMUM_RIGHTS (FILE_READ_EA | FILE_READ_ATTRIBUTES \
+               | READ_CONTROL | SYNCHRONIZE)
+
+/* generic flags for file open */
+#define GENERIC_READ_FLAGS     (READ_CONTROL | FILE_READ_DATA | \
+               FILE_READ_ATTRIBUTES | \
+               FILE_READ_EA | SYNCHRONIZE)
+
+#define GENERIC_WRITE_FLAGS    (READ_CONTROL | FILE_WRITE_DATA | \
+               FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | \
+               FILE_APPEND_DATA | SYNCHRONIZE)
+
+#define GENERIC_EXECUTE_FLAGS  (READ_CONTROL | FILE_EXECUTE | \
+               FILE_READ_ATTRIBUTES | SYNCHRONIZE)
+
+#define GENERIC_ALL_FLAGS      (DELETE | READ_CONTROL | WRITE_DAC | \
+               WRITE_OWNER | SYNCHRONIZE | FILE_READ_DATA | \
+               FILE_WRITE_DATA | FILE_APPEND_DATA | \
+               FILE_READ_EA | FILE_WRITE_EA | \
+               FILE_EXECUTE | FILE_DELETE_CHILD | \
+               FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
+
+#define SMB1_PROTO_NUMBER              cpu_to_le32(0x424d53ff)
+
+#define SMB1_CLIENT_GUID_SIZE          (16)
+struct smb_hdr {
+       __be32 smb_buf_length;
+       __u8 Protocol[4];
+       __u8 Command;
+       union {
+               struct {
+                       __u8 ErrorClass;
+                       __u8 Reserved;
+                       __le16 Error;
+               } __packed DosError;
+               __le32 CifsError;
+       } __packed Status;
+       __u8 Flags;
+       __le16 Flags2;          /* note: le */
+       __le16 PidHigh;
+       union {
+               struct {
+                       __le32 SequenceNumber;  /* le */
+                       __u32 Reserved; /* zero */
+               } __packed Sequence;
+               __u8 SecuritySignature[8];      /* le */
+       } __packed Signature;
+       __u8 pad[2];
+       __le16 Tid;
+       __le16 Pid;
+       __le16 Uid;
+       __le16 Mid;
+       __u8 WordCount;
+} __packed;
+
+struct smb_negotiate_req {
+       struct smb_hdr hdr;     /* wct = 0 */
+       __le16 ByteCount;
+       unsigned char DialectsArray[1];
+} __packed;
+
+struct smb_negotiate_rsp {
+       struct smb_hdr hdr;     /* wct = 17 */
+       __le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
+       __u8 SecurityMode;
+       __le16 MaxMpxCount;
+       __le16 MaxNumberVcs;
+       __le32 MaxBufferSize;
+       __le32 MaxRawSize;
+       __le32 SessionKey;
+       __le32 Capabilities;    /* see below */
+       __le32 SystemTimeLow;
+       __le32 SystemTimeHigh;
+       __le16 ServerTimeZone;
+       __u8 EncryptionKeyLength;
+       __le16 ByteCount;
+       union {
+               unsigned char EncryptionKey[8]; /* cap extended security off */
+               /* followed by Domain name - if extended security is off */
+               /* followed by 16 bytes of server GUID */
+               /* then security blob if cap_extended_security negotiated */
+               struct {
+                       unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
+                       unsigned char SecurityBlob[1];
+               } __packed extended_response;
+       } __packed u;
+} __packed;
+
+struct filesystem_attribute_info {
+       __le32 Attributes;
+       __le32 MaxPathNameComponentLength;
+       __le32 FileSystemNameLen;
+       __le16 FileSystemName[1]; /* do not have to save this - get subset? */
+} __packed;
+
+struct filesystem_device_info {
+       __le32 DeviceType;
+       __le32 DeviceCharacteristics;
+} __packed; /* device info level 0x104 */
+
+struct filesystem_vol_info {
+       __le64 VolumeCreationTime;
+       __le32 SerialNumber;
+       __le32 VolumeLabelSize;
+       __le16 Reserved;
+       __le16 VolumeLabel[1];
+} __packed;
+
+struct filesystem_info {
+       __le64 TotalAllocationUnits;
+       __le64 FreeAllocationUnits;
+       __le32 SectorsPerAllocationUnit;
+       __le32 BytesPerSector;
+} __packed;     /* size info, level 0x103 */
+
+#define EXTENDED_INFO_MAGIC 0x43667364 /* Cfsd */
+#define STRING_LENGTH 28
+
+struct fs_extended_info {
+       __le32 magic;
+       __le32 version;
+       __le32 release;
+       __u64 rel_date;
+       char    version_string[STRING_LENGTH];
+} __packed;
+
+struct object_id_info {
+       char objid[16];
+       struct fs_extended_info extended_info;
+} __packed;
+
+struct file_directory_info {
+       __le32 NextEntryOffset;
+       __u32 FileIndex;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 ExtFileAttributes;
+       __le32 FileNameLength;
+       char FileName[1];
+} __packed;   /* level 0x101 FF resp data */
+
+struct file_names_info {
+       __le32 NextEntryOffset;
+       __u32 FileIndex;
+       __le32 FileNameLength;
+       char FileName[1];
+} __packed;   /* level 0xc FF resp data */
+
+struct file_full_directory_info {
+       __le32 NextEntryOffset;
+       __u32 FileIndex;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 ExtFileAttributes;
+       __le32 FileNameLength;
+       __le32 EaSize;
+       char FileName[1];
+} __packed; /* level 0x102 FF resp */
+
+struct file_both_directory_info {
+       __le32 NextEntryOffset;
+       __u32 FileIndex;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 ExtFileAttributes;
+       __le32 FileNameLength;
+       __le32 EaSize; /* length of the xattrs */
+       __u8   ShortNameLength;
+       __u8   Reserved;
+       __u8   ShortName[24];
+       char FileName[1];
+} __packed; /* level 0x104 FFrsp data */
+
+struct file_id_both_directory_info {
+       __le32 NextEntryOffset;
+       __u32 FileIndex;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 ExtFileAttributes;
+       __le32 FileNameLength;
+       __le32 EaSize; /* length of the xattrs */
+       __u8   ShortNameLength;
+       __u8   Reserved;
+       __u8   ShortName[24];
+       __le16 Reserved2;
+       __le64 UniqueId;
+       char FileName[1];
+} __packed;
+
+struct file_id_full_dir_info {
+       __le32 NextEntryOffset;
+       __u32 FileIndex;
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 ExtFileAttributes;
+       __le32 FileNameLength;
+       __le32 EaSize; /* EA size */
+       __le32 Reserved;
+       __le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
+       char FileName[1];
+} __packed; /* level 0x105 FF rsp data */
+
+struct smb_version_values {
+       char            *version_string;
+       __u16           protocol_id;
+       __le16          lock_cmd;
+       __u32           capabilities;
+       __u32           max_read_size;
+       __u32           max_write_size;
+       __u32           max_trans_size;
+       __u32           large_lock_type;
+       __u32           exclusive_lock_type;
+       __u32           shared_lock_type;
+       __u32           unlock_lock_type;
+       size_t          header_size;
+       size_t          max_header_size;
+       size_t          read_rsp_size;
+       unsigned int    cap_unix;
+       unsigned int    cap_nt_find;
+       unsigned int    cap_large_files;
+       __u16           signing_enabled;
+       __u16           signing_required;
+       size_t          create_lease_size;
+       size_t          create_durable_size;
+       size_t          create_durable_v2_size;
+       size_t          create_mxac_size;
+       size_t          create_disk_id_size;
+       size_t          create_posix_size;
+};
+
+struct filesystem_posix_info {
+       /* For undefined recommended transfer size return -1 in that field */
+       __le32 OptimalTransferSize;  /* bsize on some os, iosize on other os */
+       __le32 BlockSize;
+       /* The next three fields are in terms of the block size.
+        * (above). If block size is unknown, 4096 would be a
+        * reasonable block size for a server to report.
+        * Note that returning the blocks/blocksavail removes need
+        * to make a second call (to QFSInfo level 0x103 to get this info.
+        * UserBlockAvail is typically less than or equal to BlocksAvail,
+        * if no distinction is made return the same value in each
+        */
+       __le64 TotalBlocks;
+       __le64 BlocksAvail;       /* bfree */
+       __le64 UserBlocksAvail;   /* bavail */
+       /* For undefined Node fields or FSID return -1 */
+       __le64 TotalFileNodes;
+       __le64 FreeFileNodes;
+       __le64 FileSysIdentifier;   /* fsid */
+       /* NB Namelen comes from FILE_SYSTEM_ATTRIBUTE_INFO call */
+       /* NB flags can come from FILE_SYSTEM_DEVICE_INFO call   */
+} __packed;
+
+struct smb_version_ops {
+       u16 (*get_cmd_val)(struct ksmbd_work *swork);
+       int (*init_rsp_hdr)(struct ksmbd_work *swork);
+       void (*set_rsp_status)(struct ksmbd_work *swork, __le32 err);
+       int (*allocate_rsp_buf)(struct ksmbd_work *work);
+       int (*set_rsp_credits)(struct ksmbd_work *work);
+       int (*check_user_session)(struct ksmbd_work *work);
+       int (*get_ksmbd_tcon)(struct ksmbd_work *work);
+       bool (*is_sign_req)(struct ksmbd_work *work, unsigned int command);
+       int (*check_sign_req)(struct ksmbd_work *work);
+       void (*set_sign_rsp)(struct ksmbd_work *work);
+       int (*generate_signingkey)(struct ksmbd_session *sess, struct ksmbd_conn *conn);
+       int (*generate_encryptionkey)(struct ksmbd_session *sess);
+       bool (*is_transform_hdr)(void *buf);
+       int (*decrypt_req)(struct ksmbd_work *work);
+       int (*encrypt_resp)(struct ksmbd_work *work);
+};
+
+struct smb_version_cmds {
+       int (*proc)(struct ksmbd_work *swork);
+};
+
+static inline size_t
+smb2_hdr_size_no_buflen(struct smb_version_values *vals)
+{
+       return vals->header_size - 4;
+}
+
+int ksmbd_min_protocol(void);
+int ksmbd_max_protocol(void);
+
+int ksmbd_lookup_protocol_idx(char *str);
+
+int ksmbd_verify_smb_message(struct ksmbd_work *work);
+bool ksmbd_smb_request(struct ksmbd_conn *conn);
+
+int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
+
+int ksmbd_init_smb_server(struct ksmbd_work *work);
+
+bool ksmbd_pdu_size_has_room(unsigned int pdu);
+
+struct ksmbd_kstat;
+int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
+                                     int info_level,
+                                     struct ksmbd_file *dir,
+                                     struct ksmbd_dir_info *d_info,
+                                     char *search_pattern,
+                                     int (*fn)(struct ksmbd_conn *,
+                                               int,
+                                               struct ksmbd_dir_info *,
+                                               struct user_namespace *,
+                                               struct ksmbd_kstat *));
+
+int ksmbd_extract_shortname(struct ksmbd_conn *conn,
+                           const char *longname,
+                           char *shortname);
+
+int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
+
+int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
+int ksmbd_override_fsids(struct ksmbd_work *work);
+void ksmbd_revert_fsids(struct ksmbd_work *work);
+
+unsigned int ksmbd_server_side_copy_max_chunk_count(void);
+unsigned int ksmbd_server_side_copy_max_chunk_size(void);
+unsigned int ksmbd_server_side_copy_max_total_size(void);
+bool is_asterisk(char *p);
+__le32 smb_map_generic_desired_access(__le32 daccess);
+
+static inline unsigned int get_rfc1002_len(void *buf)
+{
+       return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
+}
+
+static inline void inc_rfc1001_len(void *buf, int count)
+{
+       be32_add_cpu((__be32 *)buf, count);
+}
+#endif /* __SMB_COMMON_H__ */
diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c
new file mode 100644 (file)
index 0000000..5456e3a
--- /dev/null
@@ -0,0 +1,1366 @@
+// SPDX-License-Identifier: LGPL-2.1+
+/*
+ *   Copyright (C) International Business Machines  Corp., 2007,2008
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *   Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "smbacl.h"
+#include "smb_common.h"
+#include "server.h"
+#include "misc.h"
+#include "mgmt/share_config.h"
+
+static const struct smb_sid domain = {1, 4, {0, 0, 0, 0, 0, 5},
+       {cpu_to_le32(21), cpu_to_le32(1), cpu_to_le32(2), cpu_to_le32(3),
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* security id for everyone/world system group */
+static const struct smb_sid creator_owner = {
+       1, 1, {0, 0, 0, 0, 0, 3}, {0} };
+/* security id for everyone/world system group */
+static const struct smb_sid creator_group = {
+       1, 1, {0, 0, 0, 0, 0, 3}, {cpu_to_le32(1)} };
+
+/* security id for everyone/world system group */
+static const struct smb_sid sid_everyone = {
+       1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+/* security id for Authenticated Users system group */
+static const struct smb_sid sid_authusers = {
+       1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
+
+/* S-1-22-1 Unmapped Unix users */
+static const struct smb_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
+               {cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-22-2 Unmapped Unix groups */
+static const struct smb_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+               {cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/*
+ * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+ */
+
+/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
+
+/* S-1-5-88-1 Unix uid */
+static const struct smb_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
+       {cpu_to_le32(88),
+        cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-5-88-2 Unix gid */
+static const struct smb_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
+       {cpu_to_le32(88),
+        cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-5-88-3 Unix mode */
+static const struct smb_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
+       {cpu_to_le32(88),
+        cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/*
+ * if the two SIDs (roughly equivalent to a UUID for a user or group) are
+ * the same returns zero, if they do not match returns non-zero.
+ */
+int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
+{
+       int i;
+       int num_subauth, num_sat, num_saw;
+
+       if (!ctsid || !cwsid)
+               return 1;
+
+       /* compare the revision */
+       if (ctsid->revision != cwsid->revision) {
+               if (ctsid->revision > cwsid->revision)
+                       return 1;
+               else
+                       return -1;
+       }
+
+       /* compare all of the six auth values */
+       for (i = 0; i < NUM_AUTHS; ++i) {
+               if (ctsid->authority[i] != cwsid->authority[i]) {
+                       if (ctsid->authority[i] > cwsid->authority[i])
+                               return 1;
+                       else
+                               return -1;
+               }
+       }
+
+       /* compare all of the subauth values if any */
+       num_sat = ctsid->num_subauth;
+       num_saw = cwsid->num_subauth;
+       num_subauth = num_sat < num_saw ? num_sat : num_saw;
+       if (num_subauth) {
+               for (i = 0; i < num_subauth; ++i) {
+                       if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
+                               if (le32_to_cpu(ctsid->sub_auth[i]) >
+                                   le32_to_cpu(cwsid->sub_auth[i]))
+                                       return 1;
+                               else
+                                       return -1;
+                       }
+               }
+       }
+
+       return 0; /* sids compare/match */
+}
+
+static void smb_copy_sid(struct smb_sid *dst, const struct smb_sid *src)
+{
+       int i;
+
+       dst->revision = src->revision;
+       dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
+       for (i = 0; i < NUM_AUTHS; ++i)
+               dst->authority[i] = src->authority[i];
+       for (i = 0; i < dst->num_subauth; ++i)
+               dst->sub_auth[i] = src->sub_auth[i];
+}
+
+/*
+ * change posix mode to reflect permissions
+ * pmode is the existing mode (we only want to overwrite part of this
+ * bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
+ */
+static umode_t access_flags_to_mode(struct smb_fattr *fattr, __le32 ace_flags,
+                                   int type)
+{
+       __u32 flags = le32_to_cpu(ace_flags);
+       umode_t mode = 0;
+
+       if (flags & GENERIC_ALL) {
+               mode = 0777;
+               ksmbd_debug(SMB, "all perms\n");
+               return mode;
+       }
+
+       if ((flags & GENERIC_READ) || (flags & FILE_READ_RIGHTS))
+               mode = 0444;
+       if ((flags & GENERIC_WRITE) || (flags & FILE_WRITE_RIGHTS)) {
+               mode |= 0222;
+               if (S_ISDIR(fattr->cf_mode))
+                       mode |= 0111;
+       }
+       if ((flags & GENERIC_EXECUTE) || (flags & FILE_EXEC_RIGHTS))
+               mode |= 0111;
+
+       if (type == ACCESS_DENIED_ACE_TYPE || type == ACCESS_DENIED_OBJECT_ACE_TYPE)
+               mode = ~mode;
+
+       ksmbd_debug(SMB, "access flags 0x%x mode now %04o\n", flags, mode);
+
+       return mode;
+}
+
+/*
+ * Generate access flags to reflect permissions mode is the existing mode.
+ * This function is called for every ACE in the DACL whose SID matches
+ * with either owner or group or everyone.
+ */
+static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
+                                __u32 *pace_flags)
+{
+       /* reset access mask */
+       *pace_flags = 0x0;
+
+       /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
+       mode &= bits_to_use;
+
+       /*
+        * check for R/W/X UGO since we do not know whose flags
+        * is this but we have cleared all the bits sans RWX for
+        * either user or group or other as per bits_to_use
+        */
+       if (mode & 0444)
+               *pace_flags |= SET_FILE_READ_RIGHTS;
+       if (mode & 0222)
+               *pace_flags |= FILE_WRITE_RIGHTS;
+       if (mode & 0111)
+               *pace_flags |= SET_FILE_EXEC_RIGHTS;
+
+       ksmbd_debug(SMB, "mode: %o, access flags now 0x%x\n",
+                   mode, *pace_flags);
+}
+
+static __u16 fill_ace_for_sid(struct smb_ace *pntace,
+                             const struct smb_sid *psid, int type, int flags,
+                             umode_t mode, umode_t bits)
+{
+       int i;
+       __u16 size = 0;
+       __u32 access_req = 0;
+
+       pntace->type = type;
+       pntace->flags = flags;
+       mode_to_access_flags(mode, bits, &access_req);
+       if (!access_req)
+               access_req = SET_MINIMUM_RIGHTS;
+       pntace->access_req = cpu_to_le32(access_req);
+
+       pntace->sid.revision = psid->revision;
+       pntace->sid.num_subauth = psid->num_subauth;
+       for (i = 0; i < NUM_AUTHS; i++)
+               pntace->sid.authority[i] = psid->authority[i];
+       for (i = 0; i < psid->num_subauth; i++)
+               pntace->sid.sub_auth[i] = psid->sub_auth[i];
+
+       size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
+       pntace->size = cpu_to_le16(size);
+
+       return size;
+}
+
+void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid)
+{
+       switch (sidtype) {
+       case SIDOWNER:
+               smb_copy_sid(ssid, &server_conf.domain_sid);
+               break;
+       case SIDUNIX_USER:
+               smb_copy_sid(ssid, &sid_unix_users);
+               break;
+       case SIDUNIX_GROUP:
+               smb_copy_sid(ssid, &sid_unix_groups);
+               break;
+       case SIDCREATOR_OWNER:
+               smb_copy_sid(ssid, &creator_owner);
+               return;
+       case SIDCREATOR_GROUP:
+               smb_copy_sid(ssid, &creator_group);
+               return;
+       case SIDNFS_USER:
+               smb_copy_sid(ssid, &sid_unix_NFS_users);
+               break;
+       case SIDNFS_GROUP:
+               smb_copy_sid(ssid, &sid_unix_NFS_groups);
+               break;
+       case SIDNFS_MODE:
+               smb_copy_sid(ssid, &sid_unix_NFS_mode);
+               break;
+       default:
+               return;
+       }
+
+       /* RID */
+       ssid->sub_auth[ssid->num_subauth] = cpu_to_le32(cid);
+       ssid->num_subauth++;
+}
+
+static int sid_to_id(struct user_namespace *user_ns,
+                    struct smb_sid *psid, uint sidtype,
+                    struct smb_fattr *fattr)
+{
+       int rc = -EINVAL;
+
+       /*
+        * If we have too many subauthorities, then something is really wrong.
+        * Just return an error.
+        */
+       if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) {
+               pr_err("%s: %u subauthorities is too many!\n",
+                      __func__, psid->num_subauth);
+               return -EIO;
+       }
+
+       if (sidtype == SIDOWNER) {
+               kuid_t uid;
+               uid_t id;
+
+               id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
+               if (id > 0) {
+                       uid = make_kuid(user_ns, id);
+                       if (uid_valid(uid) && kuid_has_mapping(user_ns, uid)) {
+                               fattr->cf_uid = uid;
+                               rc = 0;
+                       }
+               }
+       } else {
+               kgid_t gid;
+               gid_t id;
+
+               id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
+               if (id > 0) {
+                       gid = make_kgid(user_ns, id);
+                       if (gid_valid(gid) && kgid_has_mapping(user_ns, gid)) {
+                               fattr->cf_gid = gid;
+                               rc = 0;
+                       }
+               }
+       }
+
+       return rc;
+}
+
+void posix_state_to_acl(struct posix_acl_state *state,
+                       struct posix_acl_entry *pace)
+{
+       int i;
+
+       pace->e_tag = ACL_USER_OBJ;
+       pace->e_perm = state->owner.allow;
+       for (i = 0; i < state->users->n; i++) {
+               pace++;
+               pace->e_tag = ACL_USER;
+               pace->e_uid = state->users->aces[i].uid;
+               pace->e_perm = state->users->aces[i].perms.allow;
+       }
+
+       pace++;
+       pace->e_tag = ACL_GROUP_OBJ;
+       pace->e_perm = state->group.allow;
+
+       for (i = 0; i < state->groups->n; i++) {
+               pace++;
+               pace->e_tag = ACL_GROUP;
+               pace->e_gid = state->groups->aces[i].gid;
+               pace->e_perm = state->groups->aces[i].perms.allow;
+       }
+
+       if (state->users->n || state->groups->n) {
+               pace++;
+               pace->e_tag = ACL_MASK;
+               pace->e_perm = state->mask.allow;
+       }
+
+       pace++;
+       pace->e_tag = ACL_OTHER;
+       pace->e_perm = state->other.allow;
+}
+
+int init_acl_state(struct posix_acl_state *state, int cnt)
+{
+       int alloc;
+
+       memset(state, 0, sizeof(struct posix_acl_state));
+       /*
+        * In the worst case, each individual acl could be for a distinct
+        * named user or group, but we don't know which, so we allocate
+        * enough space for either:
+        */
+       alloc = sizeof(struct posix_ace_state_array)
+               + cnt * sizeof(struct posix_user_ace_state);
+       state->users = kzalloc(alloc, GFP_KERNEL);
+       if (!state->users)
+               return -ENOMEM;
+       state->groups = kzalloc(alloc, GFP_KERNEL);
+       if (!state->groups) {
+               kfree(state->users);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void free_acl_state(struct posix_acl_state *state)
+{
+       kfree(state->users);
+       kfree(state->groups);
+}
+
+static void parse_dacl(struct user_namespace *user_ns,
+                      struct smb_acl *pdacl, char *end_of_acl,
+                      struct smb_sid *pownersid, struct smb_sid *pgrpsid,
+                      struct smb_fattr *fattr)
+{
+       int i, ret;
+       int num_aces = 0;
+       int acl_size;
+       char *acl_base;
+       struct smb_ace **ppace;
+       struct posix_acl_entry *cf_pace, *cf_pdace;
+       struct posix_acl_state acl_state, default_acl_state;
+       umode_t mode = 0, acl_mode;
+       bool owner_found = false, group_found = false, others_found = false;
+
+       if (!pdacl)
+               return;
+
+       /* validate that we do not go past end of acl */
+       if (end_of_acl <= (char *)pdacl ||
+           end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
+               pr_err("ACL too small to parse DACL\n");
+               return;
+       }
+
+       ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n",
+                   le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+                   le32_to_cpu(pdacl->num_aces));
+
+       acl_base = (char *)pdacl;
+       acl_size = sizeof(struct smb_acl);
+
+       num_aces = le32_to_cpu(pdacl->num_aces);
+       if (num_aces <= 0)
+               return;
+
+       if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
+               return;
+
+       ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
+       if (!ppace)
+               return;
+
+       ret = init_acl_state(&acl_state, num_aces);
+       if (ret)
+               return;
+       ret = init_acl_state(&default_acl_state, num_aces);
+       if (ret) {
+               free_acl_state(&acl_state);
+               return;
+       }
+
+       /*
+        * reset rwx permissions for user/group/other.
+        * Also, if num_aces is 0 i.e. DACL has no ACEs,
+        * user/group/other have no permissions
+        */
+       for (i = 0; i < num_aces; ++i) {
+               ppace[i] = (struct smb_ace *)(acl_base + acl_size);
+               acl_base = (char *)ppace[i];
+               acl_size = le16_to_cpu(ppace[i]->size);
+               ppace[i]->access_req =
+                       smb_map_generic_desired_access(ppace[i]->access_req);
+
+               if (!(compare_sids(&ppace[i]->sid, &sid_unix_NFS_mode))) {
+                       fattr->cf_mode =
+                               le32_to_cpu(ppace[i]->sid.sub_auth[2]);
+                       break;
+               } else if (!compare_sids(&ppace[i]->sid, pownersid)) {
+                       acl_mode = access_flags_to_mode(fattr,
+                                                       ppace[i]->access_req,
+                                                       ppace[i]->type);
+                       acl_mode &= 0700;
+
+                       if (!owner_found) {
+                               mode &= ~(0700);
+                               mode |= acl_mode;
+                       }
+                       owner_found = true;
+               } else if (!compare_sids(&ppace[i]->sid, pgrpsid) ||
+                          ppace[i]->sid.sub_auth[ppace[i]->sid.num_subauth - 1] ==
+                           DOMAIN_USER_RID_LE) {
+                       acl_mode = access_flags_to_mode(fattr,
+                                                       ppace[i]->access_req,
+                                                       ppace[i]->type);
+                       acl_mode &= 0070;
+                       if (!group_found) {
+                               mode &= ~(0070);
+                               mode |= acl_mode;
+                       }
+                       group_found = true;
+               } else if (!compare_sids(&ppace[i]->sid, &sid_everyone)) {
+                       acl_mode = access_flags_to_mode(fattr,
+                                                       ppace[i]->access_req,
+                                                       ppace[i]->type);
+                       acl_mode &= 0007;
+                       if (!others_found) {
+                               mode &= ~(0007);
+                               mode |= acl_mode;
+                       }
+                       others_found = true;
+               } else if (!compare_sids(&ppace[i]->sid, &creator_owner)) {
+                       continue;
+               } else if (!compare_sids(&ppace[i]->sid, &creator_group)) {
+                       continue;
+               } else if (!compare_sids(&ppace[i]->sid, &sid_authusers)) {
+                       continue;
+               } else {
+                       struct smb_fattr temp_fattr;
+
+                       acl_mode = access_flags_to_mode(fattr, ppace[i]->access_req,
+                                                       ppace[i]->type);
+                       temp_fattr.cf_uid = INVALID_UID;
+                       ret = sid_to_id(user_ns, &ppace[i]->sid, SIDOWNER, &temp_fattr);
+                       if (ret || uid_eq(temp_fattr.cf_uid, INVALID_UID)) {
+                               pr_err("%s: Error %d mapping Owner SID to uid\n",
+                                      __func__, ret);
+                               continue;
+                       }
+
+                       acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004;
+                       acl_state.users->aces[acl_state.users->n].uid =
+                               temp_fattr.cf_uid;
+                       acl_state.users->aces[acl_state.users->n++].perms.allow =
+                               ((acl_mode & 0700) >> 6) | 0004;
+                       default_acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004;
+                       default_acl_state.users->aces[default_acl_state.users->n].uid =
+                               temp_fattr.cf_uid;
+                       default_acl_state.users->aces[default_acl_state.users->n++].perms.allow =
+                               ((acl_mode & 0700) >> 6) | 0004;
+               }
+       }
+       kfree(ppace);
+
+       if (owner_found) {
+               /* The owner must be set to at least read-only. */
+               acl_state.owner.allow = ((mode & 0700) >> 6) | 0004;
+               acl_state.users->aces[acl_state.users->n].uid = fattr->cf_uid;
+               acl_state.users->aces[acl_state.users->n++].perms.allow =
+                       ((mode & 0700) >> 6) | 0004;
+               default_acl_state.owner.allow = ((mode & 0700) >> 6) | 0004;
+               default_acl_state.users->aces[default_acl_state.users->n].uid =
+                       fattr->cf_uid;
+               default_acl_state.users->aces[default_acl_state.users->n++].perms.allow =
+                       ((mode & 0700) >> 6) | 0004;
+       }
+
+       if (group_found) {
+               acl_state.group.allow = (mode & 0070) >> 3;
+               acl_state.groups->aces[acl_state.groups->n].gid =
+                       fattr->cf_gid;
+               acl_state.groups->aces[acl_state.groups->n++].perms.allow =
+                       (mode & 0070) >> 3;
+               default_acl_state.group.allow = (mode & 0070) >> 3;
+               default_acl_state.groups->aces[default_acl_state.groups->n].gid =
+                       fattr->cf_gid;
+               default_acl_state.groups->aces[default_acl_state.groups->n++].perms.allow =
+                       (mode & 0070) >> 3;
+       }
+
+       if (others_found) {
+               fattr->cf_mode &= ~(0007);
+               fattr->cf_mode |= mode & 0007;
+
+               acl_state.other.allow = mode & 0007;
+               default_acl_state.other.allow = mode & 0007;
+       }
+
+       if (acl_state.users->n || acl_state.groups->n) {
+               acl_state.mask.allow = 0x07;
+
+               if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+                       fattr->cf_acls =
+                               posix_acl_alloc(acl_state.users->n +
+                                       acl_state.groups->n + 4, GFP_KERNEL);
+                       if (fattr->cf_acls) {
+                               cf_pace = fattr->cf_acls->a_entries;
+                               posix_state_to_acl(&acl_state, cf_pace);
+                       }
+               }
+       }
+
+       if (default_acl_state.users->n || default_acl_state.groups->n) {
+               default_acl_state.mask.allow = 0x07;
+
+               if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+                       fattr->cf_dacls =
+                               posix_acl_alloc(default_acl_state.users->n +
+                               default_acl_state.groups->n + 4, GFP_KERNEL);
+                       if (fattr->cf_dacls) {
+                               cf_pdace = fattr->cf_dacls->a_entries;
+                               posix_state_to_acl(&default_acl_state, cf_pdace);
+                       }
+               }
+       }
+       free_acl_state(&acl_state);
+       free_acl_state(&default_acl_state);
+}
+
+static void set_posix_acl_entries_dacl(struct user_namespace *user_ns,
+                                      struct smb_ace *pndace,
+                                      struct smb_fattr *fattr, u32 *num_aces,
+                                      u16 *size, u32 nt_aces_num)
+{
+       struct posix_acl_entry *pace;
+       struct smb_sid *sid;
+       struct smb_ace *ntace;
+       int i, j;
+
+       if (!fattr->cf_acls)
+               goto posix_default_acl;
+
+       pace = fattr->cf_acls->a_entries;
+       for (i = 0; i < fattr->cf_acls->a_count; i++, pace++) {
+               int flags = 0;
+
+               sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+               if (!sid)
+                       break;
+
+               if (pace->e_tag == ACL_USER) {
+                       uid_t uid;
+                       unsigned int sid_type = SIDOWNER;
+
+                       uid = from_kuid(user_ns, pace->e_uid);
+                       if (!uid)
+                               sid_type = SIDUNIX_USER;
+                       id_to_sid(uid, sid_type, sid);
+               } else if (pace->e_tag == ACL_GROUP) {
+                       gid_t gid;
+
+                       gid = from_kgid(user_ns, pace->e_gid);
+                       id_to_sid(gid, SIDUNIX_GROUP, sid);
+               } else if (pace->e_tag == ACL_OTHER && !nt_aces_num) {
+                       smb_copy_sid(sid, &sid_everyone);
+               } else {
+                       kfree(sid);
+                       continue;
+               }
+               ntace = pndace;
+               for (j = 0; j < nt_aces_num; j++) {
+                       if (ntace->sid.sub_auth[ntace->sid.num_subauth - 1] ==
+                                       sid->sub_auth[sid->num_subauth - 1])
+                               goto pass_same_sid;
+                       ntace = (struct smb_ace *)((char *)ntace +
+                                       le16_to_cpu(ntace->size));
+               }
+
+               if (S_ISDIR(fattr->cf_mode) && pace->e_tag == ACL_OTHER)
+                       flags = 0x03;
+
+               ntace = (struct smb_ace *)((char *)pndace + *size);
+               *size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, flags,
+                               pace->e_perm, 0777);
+               (*num_aces)++;
+               if (pace->e_tag == ACL_USER)
+                       ntace->access_req |=
+                               FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
+
+               if (S_ISDIR(fattr->cf_mode) &&
+                   (pace->e_tag == ACL_USER || pace->e_tag == ACL_GROUP)) {
+                       ntace = (struct smb_ace *)((char *)pndace + *size);
+                       *size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED,
+                                       0x03, pace->e_perm, 0777);
+                       (*num_aces)++;
+                       if (pace->e_tag == ACL_USER)
+                               ntace->access_req |=
+                                       FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
+               }
+
+pass_same_sid:
+               kfree(sid);
+       }
+
+       if (nt_aces_num)
+               return;
+
+posix_default_acl:
+       if (!fattr->cf_dacls)
+               return;
+
+       pace = fattr->cf_dacls->a_entries;
+       for (i = 0; i < fattr->cf_dacls->a_count; i++, pace++) {
+               sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+               if (!sid)
+                       break;
+
+               if (pace->e_tag == ACL_USER) {
+                       uid_t uid;
+
+                       uid = from_kuid(user_ns, pace->e_uid);
+                       id_to_sid(uid, SIDCREATOR_OWNER, sid);
+               } else if (pace->e_tag == ACL_GROUP) {
+                       gid_t gid;
+
+                       gid = from_kgid(user_ns, pace->e_gid);
+                       id_to_sid(gid, SIDCREATOR_GROUP, sid);
+               } else {
+                       kfree(sid);
+                       continue;
+               }
+
+               ntace = (struct smb_ace *)((char *)pndace + *size);
+               *size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, 0x0b,
+                               pace->e_perm, 0777);
+               (*num_aces)++;
+               if (pace->e_tag == ACL_USER)
+                       ntace->access_req |=
+                               FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
+               kfree(sid);
+       }
+}
+
+static void set_ntacl_dacl(struct user_namespace *user_ns,
+                          struct smb_acl *pndacl,
+                          struct smb_acl *nt_dacl,
+                          const struct smb_sid *pownersid,
+                          const struct smb_sid *pgrpsid,
+                          struct smb_fattr *fattr)
+{
+       struct smb_ace *ntace, *pndace;
+       int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0;
+       unsigned short size = 0;
+       int i;
+
+       pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl));
+       if (nt_num_aces) {
+               ntace = (struct smb_ace *)((char *)nt_dacl + sizeof(struct smb_acl));
+               for (i = 0; i < nt_num_aces; i++) {
+                       memcpy((char *)pndace + size, ntace, le16_to_cpu(ntace->size));
+                       size += le16_to_cpu(ntace->size);
+                       ntace = (struct smb_ace *)((char *)ntace + le16_to_cpu(ntace->size));
+                       num_aces++;
+               }
+       }
+
+       set_posix_acl_entries_dacl(user_ns, pndace, fattr,
+                                  &num_aces, &size, nt_num_aces);
+       pndacl->num_aces = cpu_to_le32(num_aces);
+       pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+}
+
+static void set_mode_dacl(struct user_namespace *user_ns,
+                         struct smb_acl *pndacl, struct smb_fattr *fattr)
+{
+       struct smb_ace *pace, *pndace;
+       u32 num_aces = 0;
+       u16 size = 0, ace_size = 0;
+       uid_t uid;
+       const struct smb_sid *sid;
+
+       pace = pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl));
+
+       if (fattr->cf_acls) {
+               set_posix_acl_entries_dacl(user_ns, pndace, fattr,
+                                          &num_aces, &size, num_aces);
+               goto out;
+       }
+
+       /* owner RID */
+       uid = from_kuid(user_ns, fattr->cf_uid);
+       if (uid)
+               sid = &server_conf.domain_sid;
+       else
+               sid = &sid_unix_users;
+       ace_size = fill_ace_for_sid(pace, sid, ACCESS_ALLOWED, 0,
+                                   fattr->cf_mode, 0700);
+       pace->sid.sub_auth[pace->sid.num_subauth++] = cpu_to_le32(uid);
+       pace->size = cpu_to_le16(ace_size + 4);
+       size += le16_to_cpu(pace->size);
+       pace = (struct smb_ace *)((char *)pndace + size);
+
+       /* Group RID */
+       ace_size = fill_ace_for_sid(pace, &sid_unix_groups,
+                                   ACCESS_ALLOWED, 0, fattr->cf_mode, 0070);
+       pace->sid.sub_auth[pace->sid.num_subauth++] =
+               cpu_to_le32(from_kgid(user_ns, fattr->cf_gid));
+       pace->size = cpu_to_le16(ace_size + 4);
+       size += le16_to_cpu(pace->size);
+       pace = (struct smb_ace *)((char *)pndace + size);
+       num_aces = 3;
+
+       if (S_ISDIR(fattr->cf_mode)) {
+               pace = (struct smb_ace *)((char *)pndace + size);
+
+               /* creator owner */
+               size += fill_ace_for_sid(pace, &creator_owner, ACCESS_ALLOWED,
+                                        0x0b, fattr->cf_mode, 0700);
+               pace = (struct smb_ace *)((char *)pndace + size);
+
+               /* creator group */
+               size += fill_ace_for_sid(pace, &creator_group, ACCESS_ALLOWED,
+                                        0x0b, fattr->cf_mode, 0070);
+               pace = (struct smb_ace *)((char *)pndace + size);
+               num_aces = 5;
+       }
+
+       /* other */
+       size += fill_ace_for_sid(pace, &sid_everyone, ACCESS_ALLOWED, 0,
+                                fattr->cf_mode, 0007);
+
+out:
+       pndacl->num_aces = cpu_to_le32(num_aces);
+       pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+}
+
+static int parse_sid(struct smb_sid *psid, char *end_of_acl)
+{
+       /*
+        * validate that we do not go past end of ACL - sid must be at least 8
+        * bytes long (assuming no sub-auths - e.g. the null SID
+        */
+       if (end_of_acl < (char *)psid + 8) {
+               pr_err("ACL too small to parse SID %p\n", psid);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Convert CIFS ACL to POSIX form */
+int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
+                  int acl_len, struct smb_fattr *fattr)
+{
+       int rc = 0;
+       struct smb_sid *owner_sid_ptr, *group_sid_ptr;
+       struct smb_acl *dacl_ptr; /* no need for SACL ptr */
+       char *end_of_acl = ((char *)pntsd) + acl_len;
+       __u32 dacloffset;
+       int pntsd_type;
+
+       if (!pntsd)
+               return -EIO;
+
+       owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
+                       le32_to_cpu(pntsd->osidoffset));
+       group_sid_ptr = (struct smb_sid *)((char *)pntsd +
+                       le32_to_cpu(pntsd->gsidoffset));
+       dacloffset = le32_to_cpu(pntsd->dacloffset);
+       dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+       ksmbd_debug(SMB,
+                   "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
+                   pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
+                   le32_to_cpu(pntsd->gsidoffset),
+                   le32_to_cpu(pntsd->sacloffset), dacloffset);
+
+       pntsd_type = le16_to_cpu(pntsd->type);
+       if (!(pntsd_type & DACL_PRESENT)) {
+               ksmbd_debug(SMB, "DACL_PRESENT in DACL type is not set\n");
+               return rc;
+       }
+
+       pntsd->type = cpu_to_le16(DACL_PRESENT);
+
+       if (pntsd->osidoffset) {
+               rc = parse_sid(owner_sid_ptr, end_of_acl);
+               if (rc) {
+                       pr_err("%s: Error %d parsing Owner SID\n", __func__, rc);
+                       return rc;
+               }
+
+               rc = sid_to_id(user_ns, owner_sid_ptr, SIDOWNER, fattr);
+               if (rc) {
+                       pr_err("%s: Error %d mapping Owner SID to uid\n",
+                              __func__, rc);
+                       owner_sid_ptr = NULL;
+               }
+       }
+
+       if (pntsd->gsidoffset) {
+               rc = parse_sid(group_sid_ptr, end_of_acl);
+               if (rc) {
+                       pr_err("%s: Error %d mapping Owner SID to gid\n",
+                              __func__, rc);
+                       return rc;
+               }
+               rc = sid_to_id(user_ns, group_sid_ptr, SIDUNIX_GROUP, fattr);
+               if (rc) {
+                       pr_err("%s: Error %d mapping Group SID to gid\n",
+                              __func__, rc);
+                       group_sid_ptr = NULL;
+               }
+       }
+
+       if ((pntsd_type & (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ)) ==
+           (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ))
+               pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED);
+       if (pntsd_type & DACL_PROTECTED)
+               pntsd->type |= cpu_to_le16(DACL_PROTECTED);
+
+       if (dacloffset) {
+               parse_dacl(user_ns, dacl_ptr, end_of_acl,
+                          owner_sid_ptr, group_sid_ptr, fattr);
+       }
+
+       return 0;
+}
+
+/* Convert permission bits from mode to equivalent CIFS ACL */
+int build_sec_desc(struct user_namespace *user_ns,
+                  struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
+                  int addition_info, __u32 *secdesclen,
+                  struct smb_fattr *fattr)
+{
+       int rc = 0;
+       __u32 offset;
+       struct smb_sid *owner_sid_ptr, *group_sid_ptr;
+       struct smb_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+       struct smb_acl *dacl_ptr = NULL; /* no need for SACL ptr */
+       uid_t uid;
+       gid_t gid;
+       unsigned int sid_type = SIDOWNER;
+
+       nowner_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+       if (!nowner_sid_ptr)
+               return -ENOMEM;
+
+       uid = from_kuid(user_ns, fattr->cf_uid);
+       if (!uid)
+               sid_type = SIDUNIX_USER;
+       id_to_sid(uid, sid_type, nowner_sid_ptr);
+
+       ngroup_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+       if (!ngroup_sid_ptr) {
+               kfree(nowner_sid_ptr);
+               return -ENOMEM;
+       }
+
+       gid = from_kgid(user_ns, fattr->cf_gid);
+       id_to_sid(gid, SIDUNIX_GROUP, ngroup_sid_ptr);
+
+       offset = sizeof(struct smb_ntsd);
+       pntsd->sacloffset = 0;
+       pntsd->revision = cpu_to_le16(1);
+       pntsd->type = cpu_to_le16(SELF_RELATIVE);
+       if (ppntsd)
+               pntsd->type |= ppntsd->type;
+
+       if (addition_info & OWNER_SECINFO) {
+               pntsd->osidoffset = cpu_to_le32(offset);
+               owner_sid_ptr = (struct smb_sid *)((char *)pntsd + offset);
+               smb_copy_sid(owner_sid_ptr, nowner_sid_ptr);
+               offset += 1 + 1 + 6 + (nowner_sid_ptr->num_subauth * 4);
+       }
+
+       if (addition_info & GROUP_SECINFO) {
+               pntsd->gsidoffset = cpu_to_le32(offset);
+               group_sid_ptr = (struct smb_sid *)((char *)pntsd + offset);
+               smb_copy_sid(group_sid_ptr, ngroup_sid_ptr);
+               offset += 1 + 1 + 6 + (ngroup_sid_ptr->num_subauth * 4);
+       }
+
+       if (addition_info & DACL_SECINFO) {
+               pntsd->type |= cpu_to_le16(DACL_PRESENT);
+               dacl_ptr = (struct smb_acl *)((char *)pntsd + offset);
+               dacl_ptr->revision = cpu_to_le16(2);
+               dacl_ptr->size = cpu_to_le16(sizeof(struct smb_acl));
+               dacl_ptr->num_aces = 0;
+
+               if (!ppntsd) {
+                       set_mode_dacl(user_ns, dacl_ptr, fattr);
+               } else if (!ppntsd->dacloffset) {
+                       goto out;
+               } else {
+                       struct smb_acl *ppdacl_ptr;
+
+                       ppdacl_ptr = (struct smb_acl *)((char *)ppntsd +
+                                               le32_to_cpu(ppntsd->dacloffset));
+                       set_ntacl_dacl(user_ns, dacl_ptr, ppdacl_ptr,
+                                      nowner_sid_ptr, ngroup_sid_ptr, fattr);
+               }
+               pntsd->dacloffset = cpu_to_le32(offset);
+               offset += le16_to_cpu(dacl_ptr->size);
+       }
+
+out:
+       kfree(nowner_sid_ptr);
+       kfree(ngroup_sid_ptr);
+       *secdesclen = offset;
+       return rc;
+}
+
+static void smb_set_ace(struct smb_ace *ace, const struct smb_sid *sid, u8 type,
+                       u8 flags, __le32 access_req)
+{
+       ace->type = type;
+       ace->flags = flags;
+       ace->access_req = access_req;
+       smb_copy_sid(&ace->sid, sid);
+       ace->size = cpu_to_le16(1 + 1 + 2 + 4 + 1 + 1 + 6 + (sid->num_subauth * 4));
+}
+
+int smb_inherit_dacl(struct ksmbd_conn *conn,
+                    struct path *path,
+                    unsigned int uid, unsigned int gid)
+{
+       const struct smb_sid *psid, *creator = NULL;
+       struct smb_ace *parent_aces, *aces;
+       struct smb_acl *parent_pdacl;
+       struct smb_ntsd *parent_pntsd = NULL;
+       struct smb_sid owner_sid, group_sid;
+       struct dentry *parent = path->dentry->d_parent;
+       struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+       int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0;
+       int rc = 0, num_aces, dacloffset, pntsd_type, acl_len;
+       char *aces_base;
+       bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
+
+       acl_len = ksmbd_vfs_get_sd_xattr(conn, user_ns,
+                                        parent, &parent_pntsd);
+       if (acl_len <= 0)
+               return -ENOENT;
+       dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
+       if (!dacloffset) {
+               rc = -EINVAL;
+               goto free_parent_pntsd;
+       }
+
+       parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
+       num_aces = le32_to_cpu(parent_pdacl->num_aces);
+       pntsd_type = le16_to_cpu(parent_pntsd->type);
+
+       aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL);
+       if (!aces_base) {
+               rc = -ENOMEM;
+               goto free_parent_pntsd;
+       }
+
+       aces = (struct smb_ace *)aces_base;
+       parent_aces = (struct smb_ace *)((char *)parent_pdacl +
+                       sizeof(struct smb_acl));
+
+       if (pntsd_type & DACL_AUTO_INHERITED)
+               inherited_flags = INHERITED_ACE;
+
+       for (i = 0; i < num_aces; i++) {
+               flags = parent_aces->flags;
+               if (!smb_inherit_flags(flags, is_dir))
+                       goto pass;
+               if (is_dir) {
+                       flags &= ~(INHERIT_ONLY_ACE | INHERITED_ACE);
+                       if (!(flags & CONTAINER_INHERIT_ACE))
+                               flags |= INHERIT_ONLY_ACE;
+                       if (flags & NO_PROPAGATE_INHERIT_ACE)
+                               flags = 0;
+               } else {
+                       flags = 0;
+               }
+
+               if (!compare_sids(&creator_owner, &parent_aces->sid)) {
+                       creator = &creator_owner;
+                       id_to_sid(uid, SIDOWNER, &owner_sid);
+                       psid = &owner_sid;
+               } else if (!compare_sids(&creator_group, &parent_aces->sid)) {
+                       creator = &creator_group;
+                       id_to_sid(gid, SIDUNIX_GROUP, &group_sid);
+                       psid = &group_sid;
+               } else {
+                       creator = NULL;
+                       psid = &parent_aces->sid;
+               }
+
+               if (is_dir && creator && flags & CONTAINER_INHERIT_ACE) {
+                       smb_set_ace(aces, psid, parent_aces->type, inherited_flags,
+                                   parent_aces->access_req);
+                       nt_size += le16_to_cpu(aces->size);
+                       ace_cnt++;
+                       aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
+                       flags |= INHERIT_ONLY_ACE;
+                       psid = creator;
+               } else if (is_dir && !(parent_aces->flags & NO_PROPAGATE_INHERIT_ACE)) {
+                       psid = &parent_aces->sid;
+               }
+
+               smb_set_ace(aces, psid, parent_aces->type, flags | inherited_flags,
+                           parent_aces->access_req);
+               nt_size += le16_to_cpu(aces->size);
+               aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
+               ace_cnt++;
+pass:
+               parent_aces =
+                       (struct smb_ace *)((char *)parent_aces + le16_to_cpu(parent_aces->size));
+       }
+
+       if (nt_size > 0) {
+               struct smb_ntsd *pntsd;
+               struct smb_acl *pdacl;
+               struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
+               int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
+
+               if (parent_pntsd->osidoffset) {
+                       powner_sid = (struct smb_sid *)((char *)parent_pntsd +
+                                       le32_to_cpu(parent_pntsd->osidoffset));
+                       powner_sid_size = 1 + 1 + 6 + (powner_sid->num_subauth * 4);
+               }
+               if (parent_pntsd->gsidoffset) {
+                       pgroup_sid = (struct smb_sid *)((char *)parent_pntsd +
+                                       le32_to_cpu(parent_pntsd->gsidoffset));
+                       pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
+               }
+
+               pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
+                               pgroup_sid_size + sizeof(struct smb_acl) +
+                               nt_size, GFP_KERNEL);
+               if (!pntsd) {
+                       rc = -ENOMEM;
+                       goto free_aces_base;
+               }
+
+               pntsd->revision = cpu_to_le16(1);
+               pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PRESENT);
+               if (le16_to_cpu(parent_pntsd->type) & DACL_AUTO_INHERITED)
+                       pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED);
+               pntsd_size = sizeof(struct smb_ntsd);
+               pntsd->osidoffset = parent_pntsd->osidoffset;
+               pntsd->gsidoffset = parent_pntsd->gsidoffset;
+               pntsd->dacloffset = parent_pntsd->dacloffset;
+
+               if (pntsd->osidoffset) {
+                       struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
+                                       le32_to_cpu(pntsd->osidoffset));
+                       memcpy(owner_sid, powner_sid, powner_sid_size);
+                       pntsd_size += powner_sid_size;
+               }
+
+               if (pntsd->gsidoffset) {
+                       struct smb_sid *group_sid = (struct smb_sid *)((char *)pntsd +
+                                       le32_to_cpu(pntsd->gsidoffset));
+                       memcpy(group_sid, pgroup_sid, pgroup_sid_size);
+                       pntsd_size += pgroup_sid_size;
+               }
+
+               if (pntsd->dacloffset) {
+                       struct smb_ace *pace;
+
+                       pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
+                       pdacl->revision = cpu_to_le16(2);
+                       pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size);
+                       pdacl->num_aces = cpu_to_le32(ace_cnt);
+                       pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+                       memcpy(pace, aces_base, nt_size);
+                       pntsd_size += sizeof(struct smb_acl) + nt_size;
+               }
+
+               ksmbd_vfs_set_sd_xattr(conn, user_ns,
+                                      path->dentry, pntsd, pntsd_size);
+               kfree(pntsd);
+       }
+
+free_aces_base:
+       kfree(aces_base);
+free_parent_pntsd:
+       kfree(parent_pntsd);
+       return rc;
+}
+
+bool smb_inherit_flags(int flags, bool is_dir)
+{
+       if (!is_dir)
+               return (flags & OBJECT_INHERIT_ACE) != 0;
+
+       if (flags & OBJECT_INHERIT_ACE && !(flags & NO_PROPAGATE_INHERIT_ACE))
+               return true;
+
+       if (flags & CONTAINER_INHERIT_ACE)
+               return true;
+       return false;
+}
+
+int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
+                       __le32 *pdaccess, int uid)
+{
+       struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+       struct smb_ntsd *pntsd = NULL;
+       struct smb_acl *pdacl;
+       struct posix_acl *posix_acls;
+       int rc = 0, acl_size;
+       struct smb_sid sid;
+       int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
+       struct smb_ace *ace;
+       int i, found = 0;
+       unsigned int access_bits = 0;
+       struct smb_ace *others_ace = NULL;
+       struct posix_acl_entry *pa_entry;
+       unsigned int sid_type = SIDOWNER;
+       char *end_of_acl;
+
+       ksmbd_debug(SMB, "check permission using windows acl\n");
+       acl_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
+                                         path->dentry, &pntsd);
+       if (acl_size <= 0 || !pntsd || !pntsd->dacloffset) {
+               kfree(pntsd);
+               return 0;
+       }
+
+       pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
+       end_of_acl = ((char *)pntsd) + acl_size;
+       if (end_of_acl <= (char *)pdacl) {
+               kfree(pntsd);
+               return 0;
+       }
+
+       if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size) ||
+           le16_to_cpu(pdacl->size) < sizeof(struct smb_acl)) {
+               kfree(pntsd);
+               return 0;
+       }
+
+       if (!pdacl->num_aces) {
+               if (!(le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) &&
+                   *pdaccess & ~(FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE)) {
+                       rc = -EACCES;
+                       goto err_out;
+               }
+               kfree(pntsd);
+               return 0;
+       }
+
+       if (*pdaccess & FILE_MAXIMAL_ACCESS_LE) {
+               granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES |
+                       DELETE;
+
+               ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+               for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+                       granted |= le32_to_cpu(ace->access_req);
+                       ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
+                       if (end_of_acl < (char *)ace)
+                               goto err_out;
+               }
+
+               if (!pdacl->num_aces)
+                       granted = GENERIC_ALL_FLAGS;
+       }
+
+       if (!uid)
+               sid_type = SIDUNIX_USER;
+       id_to_sid(uid, sid_type, &sid);
+
+       ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+       for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+               if (!compare_sids(&sid, &ace->sid) ||
+                   !compare_sids(&sid_unix_NFS_mode, &ace->sid)) {
+                       found = 1;
+                       break;
+               }
+               if (!compare_sids(&sid_everyone, &ace->sid))
+                       others_ace = ace;
+
+               ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
+               if (end_of_acl < (char *)ace)
+                       goto err_out;
+       }
+
+       if (*pdaccess & FILE_MAXIMAL_ACCESS_LE && found) {
+               granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES |
+                       DELETE;
+
+               granted |= le32_to_cpu(ace->access_req);
+
+               if (!pdacl->num_aces)
+                       granted = GENERIC_ALL_FLAGS;
+       }
+
+       if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+               posix_acls = get_acl(d_inode(path->dentry), ACL_TYPE_ACCESS);
+               if (posix_acls && !found) {
+                       unsigned int id = -1;
+
+                       pa_entry = posix_acls->a_entries;
+                       for (i = 0; i < posix_acls->a_count; i++, pa_entry++) {
+                               if (pa_entry->e_tag == ACL_USER)
+                                       id = from_kuid(user_ns,
+                                                      pa_entry->e_uid);
+                               else if (pa_entry->e_tag == ACL_GROUP)
+                                       id = from_kgid(user_ns,
+                                                      pa_entry->e_gid);
+                               else
+                                       continue;
+
+                               if (id == uid) {
+                                       mode_to_access_flags(pa_entry->e_perm,
+                                                            0777,
+                                                            &access_bits);
+                                       if (!access_bits)
+                                               access_bits =
+                                                       SET_MINIMUM_RIGHTS;
+                                       goto check_access_bits;
+                               }
+                       }
+               }
+               if (posix_acls)
+                       posix_acl_release(posix_acls);
+       }
+
+       if (!found) {
+               if (others_ace) {
+                       ace = others_ace;
+               } else {
+                       ksmbd_debug(SMB, "Can't find corresponding sid\n");
+                       rc = -EACCES;
+                       goto err_out;
+               }
+       }
+
+       switch (ace->type) {
+       case ACCESS_ALLOWED_ACE_TYPE:
+               access_bits = le32_to_cpu(ace->access_req);
+               break;
+       case ACCESS_DENIED_ACE_TYPE:
+       case ACCESS_DENIED_CALLBACK_ACE_TYPE:
+               access_bits = le32_to_cpu(~ace->access_req);
+               break;
+       }
+
+check_access_bits:
+       if (granted &
+           ~(access_bits | FILE_READ_ATTRIBUTES | READ_CONTROL | WRITE_DAC | DELETE)) {
+               ksmbd_debug(SMB, "Access denied with winACL, granted : %x, access_req : %x\n",
+                           granted, le32_to_cpu(ace->access_req));
+               rc = -EACCES;
+               goto err_out;
+       }
+
+       *pdaccess = cpu_to_le32(granted);
+err_out:
+       kfree(pntsd);
+       return rc;
+}
+
+int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+                struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+                bool type_check)
+{
+       int rc;
+       struct smb_fattr fattr = {{0}};
+       struct inode *inode = d_inode(path->dentry);
+       struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+       struct iattr newattrs;
+
+       fattr.cf_uid = INVALID_UID;
+       fattr.cf_gid = INVALID_GID;
+       fattr.cf_mode = inode->i_mode;
+
+       rc = parse_sec_desc(user_ns, pntsd, ntsd_len, &fattr);
+       if (rc)
+               goto out;
+
+       newattrs.ia_valid = ATTR_CTIME;
+       if (!uid_eq(fattr.cf_uid, INVALID_UID)) {
+               newattrs.ia_valid |= ATTR_UID;
+               newattrs.ia_uid = fattr.cf_uid;
+       }
+       if (!gid_eq(fattr.cf_gid, INVALID_GID)) {
+               newattrs.ia_valid |= ATTR_GID;
+               newattrs.ia_gid = fattr.cf_gid;
+       }
+       newattrs.ia_valid |= ATTR_MODE;
+       newattrs.ia_mode = (inode->i_mode & ~0777) | (fattr.cf_mode & 0777);
+
+       inode_lock(inode);
+       rc = notify_change(user_ns, path->dentry, &newattrs, NULL);
+       inode_unlock(inode);
+       if (rc)
+               goto out;
+
+       ksmbd_vfs_remove_acl_xattrs(user_ns, path->dentry);
+       /* Update posix acls */
+       if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && fattr.cf_dacls) {
+               rc = set_posix_acl(user_ns, inode,
+                                  ACL_TYPE_ACCESS, fattr.cf_acls);
+               if (S_ISDIR(inode->i_mode) && fattr.cf_dacls)
+                       rc = set_posix_acl(user_ns, inode,
+                                          ACL_TYPE_DEFAULT, fattr.cf_dacls);
+       }
+
+       /* Check it only calling from SD BUFFER context */
+       if (type_check && !(le16_to_cpu(pntsd->type) & DACL_PRESENT))
+               goto out;
+
+       if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
+               /* Update WinACL in xattr */
+               ksmbd_vfs_remove_sd_xattrs(user_ns, path->dentry);
+               ksmbd_vfs_set_sd_xattr(conn, user_ns,
+                                      path->dentry, pntsd, ntsd_len);
+       }
+
+out:
+       posix_acl_release(fattr.cf_acls);
+       posix_acl_release(fattr.cf_dacls);
+       mark_inode_dirty(inode);
+       return rc;
+}
+
+void ksmbd_init_domain(u32 *sub_auth)
+{
+       int i;
+
+       memcpy(&server_conf.domain_sid, &domain, sizeof(struct smb_sid));
+       for (i = 0; i < 3; ++i)
+               server_conf.domain_sid.sub_auth[i + 1] = cpu_to_le32(sub_auth[i]);
+}
diff --git a/fs/ksmbd/smbacl.h b/fs/ksmbd/smbacl.h
new file mode 100644 (file)
index 0000000..940f686
--- /dev/null
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ *   Copyright (c) International Business Machines  Corp., 2007
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *   Modified by Namjae Jeon (linkinjeon@kernel.org)
+ */
+
+#ifndef _SMBACL_H
+#define _SMBACL_H
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/posix_acl.h>
+
+#include "mgmt/tree_connect.h"
+
+#define NUM_AUTHS (6)  /* number of authority fields */
+#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
+
+/*
+ * ACE types - see MS-DTYP 2.4.4.1
+ */
+enum {
+       ACCESS_ALLOWED,
+       ACCESS_DENIED,
+};
+
+/*
+ * Security ID types
+ */
+enum {
+       SIDOWNER = 1,
+       SIDGROUP,
+       SIDCREATOR_OWNER,
+       SIDCREATOR_GROUP,
+       SIDUNIX_USER,
+       SIDUNIX_GROUP,
+       SIDNFS_USER,
+       SIDNFS_GROUP,
+       SIDNFS_MODE,
+};
+
+/* Revision for ACLs */
+#define SD_REVISION    1
+
+/* Control flags for Security Descriptor */
+#define OWNER_DEFAULTED                0x0001
+#define GROUP_DEFAULTED                0x0002
+#define DACL_PRESENT           0x0004
+#define DACL_DEFAULTED         0x0008
+#define SACL_PRESENT           0x0010
+#define SACL_DEFAULTED         0x0020
+#define DACL_TRUSTED           0x0040
+#define SERVER_SECURITY                0x0080
+#define DACL_AUTO_INHERIT_REQ  0x0100
+#define SACL_AUTO_INHERIT_REQ  0x0200
+#define DACL_AUTO_INHERITED    0x0400
+#define SACL_AUTO_INHERITED    0x0800
+#define DACL_PROTECTED         0x1000
+#define SACL_PROTECTED         0x2000
+#define RM_CONTROL_VALID       0x4000
+#define SELF_RELATIVE          0x8000
+
+/* ACE types - see MS-DTYP 2.4.4.1 */
+#define ACCESS_ALLOWED_ACE_TYPE 0x00
+#define ACCESS_DENIED_ACE_TYPE  0x01
+#define SYSTEM_AUDIT_ACE_TYPE   0x02
+#define SYSTEM_ALARM_ACE_TYPE   0x03
+#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
+#define ACCESS_ALLOWED_OBJECT_ACE_TYPE  0x05
+#define ACCESS_DENIED_OBJECT_ACE_TYPE   0x06
+#define SYSTEM_AUDIT_OBJECT_ACE_TYPE    0x07
+#define SYSTEM_ALARM_OBJECT_ACE_TYPE    0x08
+#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
+#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
+#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
+#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE  0x0C
+#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE  0x0D
+#define SYSTEM_ALARM_CALLBACK_ACE_TYPE  0x0E /* Reserved */
+#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
+#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
+#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
+#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
+#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
+
+/* ACE flags */
+#define OBJECT_INHERIT_ACE             0x01
+#define CONTAINER_INHERIT_ACE          0x02
+#define NO_PROPAGATE_INHERIT_ACE       0x04
+#define INHERIT_ONLY_ACE               0x08
+#define INHERITED_ACE                  0x10
+#define SUCCESSFUL_ACCESS_ACE_FLAG     0x40
+#define FAILED_ACCESS_ACE_FLAG         0x80
+
+/*
+ * Maximum size of a string representation of a SID:
+ *
+ * The fields are unsigned values in decimal. So:
+ *
+ * u8:  max 3 bytes in decimal
+ * u32: max 10 bytes in decimal
+ *
+ * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
+ *
+ * For authority field, max is when all 6 values are non-zero and it must be
+ * represented in hex. So "-0x" + 12 hex digits.
+ *
+ * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
+ */
+#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
+#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
+
+#define DOMAIN_USER_RID_LE     cpu_to_le32(513)
+
+struct ksmbd_conn;
+
+struct smb_ntsd {
+       __le16 revision; /* revision level */
+       __le16 type;
+       __le32 osidoffset;
+       __le32 gsidoffset;
+       __le32 sacloffset;
+       __le32 dacloffset;
+} __packed;
+
+struct smb_sid {
+       __u8 revision; /* revision level */
+       __u8 num_subauth;
+       __u8 authority[NUM_AUTHS];
+       __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
+} __packed;
+
+/* size of a struct cifs_sid, sans sub_auth array */
+#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
+
+struct smb_acl {
+       __le16 revision; /* revision level */
+       __le16 size;
+       __le32 num_aces;
+} __packed;
+
+struct smb_ace {
+       __u8 type;
+       __u8 flags;
+       __le16 size;
+       __le32 access_req;
+       struct smb_sid sid; /* ie UUID of user or group who gets these perms */
+} __packed;
+
+struct smb_fattr {
+       kuid_t  cf_uid;
+       kgid_t  cf_gid;
+       umode_t cf_mode;
+       __le32 daccess;
+       struct posix_acl *cf_acls;
+       struct posix_acl *cf_dacls;
+};
+
+struct posix_ace_state {
+       u32 allow;
+       u32 deny;
+};
+
+struct posix_user_ace_state {
+       union {
+               kuid_t uid;
+               kgid_t gid;
+       };
+       struct posix_ace_state perms;
+};
+
+struct posix_ace_state_array {
+       int n;
+       struct posix_user_ace_state aces[];
+};
+
+/*
+ * while processing the nfsv4 ace, this maintains the partial permissions
+ * calculated so far:
+ */
+
+struct posix_acl_state {
+       struct posix_ace_state owner;
+       struct posix_ace_state group;
+       struct posix_ace_state other;
+       struct posix_ace_state everyone;
+       struct posix_ace_state mask; /* deny unused in this case */
+       struct posix_ace_state_array *users;
+       struct posix_ace_state_array *groups;
+};
+
+int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
+                  int acl_len, struct smb_fattr *fattr);
+int build_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
+                  struct smb_ntsd *ppntsd, int addition_info,
+                  __u32 *secdesclen, struct smb_fattr *fattr);
+int init_acl_state(struct posix_acl_state *state, int cnt);
+void free_acl_state(struct posix_acl_state *state);
+void posix_state_to_acl(struct posix_acl_state *state,
+                       struct posix_acl_entry *pace);
+int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid);
+bool smb_inherit_flags(int flags, bool is_dir);
+int smb_inherit_dacl(struct ksmbd_conn *conn, struct path *path,
+                    unsigned int uid, unsigned int gid);
+int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
+                       __le32 *pdaccess, int uid);
+int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+                struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+                bool type_check);
+void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
+void ksmbd_init_domain(u32 *sub_auth);
+#endif /* _SMBACL_H */
diff --git a/fs/ksmbd/smbfsctl.h b/fs/ksmbd/smbfsctl.h
new file mode 100644 (file)
index 0000000..b98418a
--- /dev/null
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ *   fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002,2009
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ */
+
+/* IOCTL information */
+/*
+ * List of ioctl/fsctl function codes that are or could be useful in the
+ * future to remote clients like cifs or SMB2 client.  There is probably
+ * a slightly larger set of fsctls that NTFS local filesystem could handle,
+ * including the seven below that we do not have struct definitions for.
+ * Even with protocol definitions for most of these now available, we still
+ * need to do some experimentation to identify which are practical to do
+ * remotely.  Some of the following, such as the encryption/compression ones
+ * could be invoked from tools via a specialized hook into the VFS rather
+ * than via the standard vfs entry points
+ */
+
+#ifndef __KSMBD_SMBFSCTL_H
+#define __KSMBD_SMBFSCTL_H
+
+#define FSCTL_DFS_GET_REFERRALS      0x00060194
+#define FSCTL_DFS_GET_REFERRALS_EX   0x000601B0
+#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
+#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
+#define FSCTL_REQUEST_BATCH_OPLOCK   0x00090008
+#define FSCTL_LOCK_VOLUME            0x00090018
+#define FSCTL_UNLOCK_VOLUME          0x0009001C
+#define FSCTL_IS_PATHNAME_VALID      0x0009002C /* BB add struct */
+#define FSCTL_GET_COMPRESSION        0x0009003C /* BB add struct */
+#define FSCTL_SET_COMPRESSION        0x0009C040 /* BB add struct */
+#define FSCTL_QUERY_FAT_BPB          0x00090058 /* BB add struct */
+/* Verify the next FSCTL number, we had it as 0x00090090 before */
+#define FSCTL_FILESYSTEM_GET_STATS   0x00090060 /* BB add struct */
+#define FSCTL_GET_NTFS_VOLUME_DATA   0x00090064 /* BB add struct */
+#define FSCTL_GET_RETRIEVAL_POINTERS 0x00090073 /* BB add struct */
+#define FSCTL_IS_VOLUME_DIRTY        0x00090078 /* BB add struct */
+#define FSCTL_ALLOW_EXTENDED_DASD_IO 0x00090083 /* BB add struct */
+#define FSCTL_REQUEST_FILTER_OPLOCK  0x0009008C
+#define FSCTL_FIND_FILES_BY_SID      0x0009008F /* BB add struct */
+#define FSCTL_SET_OBJECT_ID          0x00090098 /* BB add struct */
+#define FSCTL_GET_OBJECT_ID          0x0009009C /* BB add struct */
+#define FSCTL_DELETE_OBJECT_ID       0x000900A0 /* BB add struct */
+#define FSCTL_SET_REPARSE_POINT      0x000900A4 /* BB add struct */
+#define FSCTL_GET_REPARSE_POINT      0x000900A8 /* BB add struct */
+#define FSCTL_DELETE_REPARSE_POINT   0x000900AC /* BB add struct */
+#define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
+#define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
+#define FSCTL_SET_SPARSE             0x000900C4 /* BB add struct */
+#define FSCTL_SET_ZERO_DATA          0x000980C8 /* BB add struct */
+#define FSCTL_SET_ENCRYPTION         0x000900D7 /* BB add struct */
+#define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB /* BB add struct */
+#define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF /* BB add struct */
+#define FSCTL_READ_RAW_ENCRYPTED     0x000900E3 /* BB add struct */
+#define FSCTL_READ_FILE_USN_DATA     0x000900EB /* BB add struct */
+#define FSCTL_WRITE_USN_CLOSE_RECORD 0x000900EF /* BB add struct */
+#define FSCTL_SIS_COPYFILE           0x00090100 /* BB add struct */
+#define FSCTL_RECALL_FILE            0x00090117 /* BB add struct */
+#define FSCTL_QUERY_SPARING_INFO     0x00090138 /* BB add struct */
+#define FSCTL_SET_ZERO_ON_DEALLOC    0x00090194 /* BB add struct */
+#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
+#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF /* BB add struct */
+#define FSCTL_SET_DEFECT_MANAGEMENT  0x00098134 /* BB add struct */
+#define FSCTL_DUPLICATE_EXTENTS_TO_FILE 0x00098344
+#define FSCTL_SIS_LINK_FILES         0x0009C104
+#define FSCTL_PIPE_PEEK              0x0011400C /* BB add struct */
+#define FSCTL_PIPE_TRANSCEIVE        0x0011C017 /* BB add struct */
+/* strange that the number for this op is not sequential with previous op */
+#define FSCTL_PIPE_WAIT              0x00110018 /* BB add struct */
+#define FSCTL_REQUEST_RESUME_KEY     0x00140078
+#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
+#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
+#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
+#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC
+#define FSCTL_COPYCHUNK              0x001440F2
+#define FSCTL_COPYCHUNK_WRITE        0x001480F2
+
+#define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
+#define IO_REPARSE_TAG_HSM           0xC0000004
+#define IO_REPARSE_TAG_SIS           0x80000007
+
+/* WSL reparse tags */
+#define IO_REPARSE_TAG_LX_SYMLINK_LE   cpu_to_le32(0xA000001D)
+#define IO_REPARSE_TAG_AF_UNIX_LE      cpu_to_le32(0x80000023)
+#define IO_REPARSE_TAG_LX_FIFO_LE      cpu_to_le32(0x80000024)
+#define IO_REPARSE_TAG_LX_CHR_LE       cpu_to_le32(0x80000025)
+#define IO_REPARSE_TAG_LX_BLK_LE       cpu_to_le32(0x80000026)
+#endif /* __KSMBD_SMBFSCTL_H */
diff --git a/fs/ksmbd/smbstatus.h b/fs/ksmbd/smbstatus.h
new file mode 100644 (file)
index 0000000..108a8b6
--- /dev/null
@@ -0,0 +1,1822 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ *   fs/cifs/smb2status.h
+ *
+ *   SMB2 Status code (network error) definitions
+ *   Definitions are from MS-ERREF
+ *
+ *   Copyright (c) International Business Machines  Corp., 2009,2011
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ */
+
+/*
+ *  0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ *  SEV C N <-------Facility--------> <------Error Status Code------>
+ *
+ *  C is set if "customer defined" error, N bit is reserved and MBZ
+ */
+
+#define STATUS_SEVERITY_SUCCESS cpu_to_le32(0x0000)
+#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
+#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
+#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
+
+struct ntstatus {
+       /* Facility is the high 12 bits of the following field */
+       __le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
+       __le32 Code;
+};
+
+#define STATUS_SUCCESS 0x00000000
+#define STATUS_WAIT_0 cpu_to_le32(0x00000000)
+#define STATUS_WAIT_1 cpu_to_le32(0x00000001)
+#define STATUS_WAIT_2 cpu_to_le32(0x00000002)
+#define STATUS_WAIT_3 cpu_to_le32(0x00000003)
+#define STATUS_WAIT_63 cpu_to_le32(0x0000003F)
+#define STATUS_ABANDONED cpu_to_le32(0x00000080)
+#define STATUS_ABANDONED_WAIT_0 cpu_to_le32(0x00000080)
+#define STATUS_ABANDONED_WAIT_63 cpu_to_le32(0x000000BF)
+#define STATUS_USER_APC cpu_to_le32(0x000000C0)
+#define STATUS_KERNEL_APC cpu_to_le32(0x00000100)
+#define STATUS_ALERTED cpu_to_le32(0x00000101)
+#define STATUS_TIMEOUT cpu_to_le32(0x00000102)
+#define STATUS_PENDING cpu_to_le32(0x00000103)
+#define STATUS_REPARSE cpu_to_le32(0x00000104)
+#define STATUS_MORE_ENTRIES cpu_to_le32(0x00000105)
+#define STATUS_NOT_ALL_ASSIGNED cpu_to_le32(0x00000106)
+#define STATUS_SOME_NOT_MAPPED cpu_to_le32(0x00000107)
+#define STATUS_OPLOCK_BREAK_IN_PROGRESS cpu_to_le32(0x00000108)
+#define STATUS_VOLUME_MOUNTED cpu_to_le32(0x00000109)
+#define STATUS_RXACT_COMMITTED cpu_to_le32(0x0000010A)
+#define STATUS_NOTIFY_CLEANUP cpu_to_le32(0x0000010B)
+#define STATUS_NOTIFY_ENUM_DIR cpu_to_le32(0x0000010C)
+#define STATUS_NO_QUOTAS_FOR_ACCOUNT cpu_to_le32(0x0000010D)
+#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED cpu_to_le32(0x0000010E)
+#define STATUS_PAGE_FAULT_TRANSITION cpu_to_le32(0x00000110)
+#define STATUS_PAGE_FAULT_DEMAND_ZERO cpu_to_le32(0x00000111)
+#define STATUS_PAGE_FAULT_COPY_ON_WRITE cpu_to_le32(0x00000112)
+#define STATUS_PAGE_FAULT_GUARD_PAGE cpu_to_le32(0x00000113)
+#define STATUS_PAGE_FAULT_PAGING_FILE cpu_to_le32(0x00000114)
+#define STATUS_CACHE_PAGE_LOCKED cpu_to_le32(0x00000115)
+#define STATUS_CRASH_DUMP cpu_to_le32(0x00000116)
+#define STATUS_BUFFER_ALL_ZEROS cpu_to_le32(0x00000117)
+#define STATUS_REPARSE_OBJECT cpu_to_le32(0x00000118)
+#define STATUS_RESOURCE_REQUIREMENTS_CHANGED cpu_to_le32(0x00000119)
+#define STATUS_TRANSLATION_COMPLETE cpu_to_le32(0x00000120)
+#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY cpu_to_le32(0x00000121)
+#define STATUS_NOTHING_TO_TERMINATE cpu_to_le32(0x00000122)
+#define STATUS_PROCESS_NOT_IN_JOB cpu_to_le32(0x00000123)
+#define STATUS_PROCESS_IN_JOB cpu_to_le32(0x00000124)
+#define STATUS_VOLSNAP_HIBERNATE_READY cpu_to_le32(0x00000125)
+#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY cpu_to_le32(0x00000126)
+#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED cpu_to_le32(0x00000127)
+#define STATUS_INTERRUPT_STILL_CONNECTED cpu_to_le32(0x00000128)
+#define STATUS_PROCESS_CLONED cpu_to_le32(0x00000129)
+#define STATUS_FILE_LOCKED_WITH_ONLY_READERS cpu_to_le32(0x0000012A)
+#define STATUS_FILE_LOCKED_WITH_WRITERS cpu_to_le32(0x0000012B)
+#define STATUS_RESOURCEMANAGER_READ_ONLY cpu_to_le32(0x00000202)
+#define STATUS_WAIT_FOR_OPLOCK cpu_to_le32(0x00000367)
+#define DBG_EXCEPTION_HANDLED cpu_to_le32(0x00010001)
+#define DBG_CONTINUE cpu_to_le32(0x00010002)
+#define STATUS_FLT_IO_COMPLETE cpu_to_le32(0x001C0001)
+#define STATUS_OBJECT_NAME_EXISTS cpu_to_le32(0x40000000)
+#define STATUS_THREAD_WAS_SUSPENDED cpu_to_le32(0x40000001)
+#define STATUS_WORKING_SET_LIMIT_RANGE cpu_to_le32(0x40000002)
+#define STATUS_IMAGE_NOT_AT_BASE cpu_to_le32(0x40000003)
+#define STATUS_RXACT_STATE_CREATED cpu_to_le32(0x40000004)
+#define STATUS_SEGMENT_NOTIFICATION cpu_to_le32(0x40000005)
+#define STATUS_LOCAL_USER_SESSION_KEY cpu_to_le32(0x40000006)
+#define STATUS_BAD_CURRENT_DIRECTORY cpu_to_le32(0x40000007)
+#define STATUS_SERIAL_MORE_WRITES cpu_to_le32(0x40000008)
+#define STATUS_REGISTRY_RECOVERED cpu_to_le32(0x40000009)
+#define STATUS_FT_READ_RECOVERY_FROM_BACKUP cpu_to_le32(0x4000000A)
+#define STATUS_FT_WRITE_RECOVERY cpu_to_le32(0x4000000B)
+#define STATUS_SERIAL_COUNTER_TIMEOUT cpu_to_le32(0x4000000C)
+#define STATUS_NULL_LM_PASSWORD cpu_to_le32(0x4000000D)
+#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH cpu_to_le32(0x4000000E)
+#define STATUS_RECEIVE_PARTIAL cpu_to_le32(0x4000000F)
+#define STATUS_RECEIVE_EXPEDITED cpu_to_le32(0x40000010)
+#define STATUS_RECEIVE_PARTIAL_EXPEDITED cpu_to_le32(0x40000011)
+#define STATUS_EVENT_DONE cpu_to_le32(0x40000012)
+#define STATUS_EVENT_PENDING cpu_to_le32(0x40000013)
+#define STATUS_CHECKING_FILE_SYSTEM cpu_to_le32(0x40000014)
+#define STATUS_FATAL_APP_EXIT cpu_to_le32(0x40000015)
+#define STATUS_PREDEFINED_HANDLE cpu_to_le32(0x40000016)
+#define STATUS_WAS_UNLOCKED cpu_to_le32(0x40000017)
+#define STATUS_SERVICE_NOTIFICATION cpu_to_le32(0x40000018)
+#define STATUS_WAS_LOCKED cpu_to_le32(0x40000019)
+#define STATUS_LOG_HARD_ERROR cpu_to_le32(0x4000001A)
+#define STATUS_ALREADY_WIN32 cpu_to_le32(0x4000001B)
+#define STATUS_WX86_UNSIMULATE cpu_to_le32(0x4000001C)
+#define STATUS_WX86_CONTINUE cpu_to_le32(0x4000001D)
+#define STATUS_WX86_SINGLE_STEP cpu_to_le32(0x4000001E)
+#define STATUS_WX86_BREAKPOINT cpu_to_le32(0x4000001F)
+#define STATUS_WX86_EXCEPTION_CONTINUE cpu_to_le32(0x40000020)
+#define STATUS_WX86_EXCEPTION_LASTCHANCE cpu_to_le32(0x40000021)
+#define STATUS_WX86_EXCEPTION_CHAIN cpu_to_le32(0x40000022)
+#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE cpu_to_le32(0x40000023)
+#define STATUS_NO_YIELD_PERFORMED cpu_to_le32(0x40000024)
+#define STATUS_TIMER_RESUME_IGNORED cpu_to_le32(0x40000025)
+#define STATUS_ARBITRATION_UNHANDLED cpu_to_le32(0x40000026)
+#define STATUS_CARDBUS_NOT_SUPPORTED cpu_to_le32(0x40000027)
+#define STATUS_WX86_CREATEWX86TIB cpu_to_le32(0x40000028)
+#define STATUS_MP_PROCESSOR_MISMATCH cpu_to_le32(0x40000029)
+#define STATUS_HIBERNATED cpu_to_le32(0x4000002A)
+#define STATUS_RESUME_HIBERNATION cpu_to_le32(0x4000002B)
+#define STATUS_FIRMWARE_UPDATED cpu_to_le32(0x4000002C)
+#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES cpu_to_le32(0x4000002D)
+#define STATUS_MESSAGE_RETRIEVED cpu_to_le32(0x4000002E)
+#define STATUS_SYSTEM_POWERSTATE_TRANSITION cpu_to_le32(0x4000002F)
+#define STATUS_ALPC_CHECK_COMPLETION_LIST cpu_to_le32(0x40000030)
+#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION cpu_to_le32(0x40000031)
+#define STATUS_ACCESS_AUDIT_BY_POLICY cpu_to_le32(0x40000032)
+#define STATUS_ABANDON_HIBERFILE cpu_to_le32(0x40000033)
+#define STATUS_BIZRULES_NOT_ENABLED cpu_to_le32(0x40000034)
+#define STATUS_WAKE_SYSTEM cpu_to_le32(0x40000294)
+#define STATUS_DS_SHUTTING_DOWN cpu_to_le32(0x40000370)
+#define DBG_REPLY_LATER cpu_to_le32(0x40010001)
+#define DBG_UNABLE_TO_PROVIDE_HANDLE cpu_to_le32(0x40010002)
+#define DBG_TERMINATE_THREAD cpu_to_le32(0x40010003)
+#define DBG_TERMINATE_PROCESS cpu_to_le32(0x40010004)
+#define DBG_CONTROL_C cpu_to_le32(0x40010005)
+#define DBG_PRINTEXCEPTION_C cpu_to_le32(0x40010006)
+#define DBG_RIPEXCEPTION cpu_to_le32(0x40010007)
+#define DBG_CONTROL_BREAK cpu_to_le32(0x40010008)
+#define DBG_COMMAND_EXCEPTION cpu_to_le32(0x40010009)
+#define RPC_NT_UUID_LOCAL_ONLY cpu_to_le32(0x40020056)
+#define RPC_NT_SEND_INCOMPLETE cpu_to_le32(0x400200AF)
+#define STATUS_CTX_CDM_CONNECT cpu_to_le32(0x400A0004)
+#define STATUS_CTX_CDM_DISCONNECT cpu_to_le32(0x400A0005)
+#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT cpu_to_le32(0x4015000D)
+#define STATUS_RECOVERY_NOT_NEEDED cpu_to_le32(0x40190034)
+#define STATUS_RM_ALREADY_STARTED cpu_to_le32(0x40190035)
+#define STATUS_LOG_NO_RESTART cpu_to_le32(0x401A000C)
+#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST cpu_to_le32(0x401B00EC)
+#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED cpu_to_le32(0x401E000A)
+#define STATUS_GRAPHICS_DRIVER_MISMATCH cpu_to_le32(0x401E0117)
+#define STATUS_GRAPHICS_MODE_NOT_PINNED cpu_to_le32(0x401E0307)
+#define STATUS_GRAPHICS_NO_PREFERRED_MODE cpu_to_le32(0x401E031E)
+#define STATUS_GRAPHICS_DATASET_IS_EMPTY cpu_to_le32(0x401E034B)
+#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET cpu_to_le32(0x401E034C)
+#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED        \
+       cpu_to_le32(0x401E0351)
+#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS cpu_to_le32(0x401E042F)
+#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED cpu_to_le32(0x401E0437)
+#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY cpu_to_le32(0x401E0439)
+#define STATUS_GRAPHICS_START_DEFERRED cpu_to_le32(0x401E043A)
+#define STATUS_NDIS_INDICATION_REQUIRED cpu_to_le32(0x40230001)
+#define STATUS_GUARD_PAGE_VIOLATION cpu_to_le32(0x80000001)
+#define STATUS_DATATYPE_MISALIGNMENT cpu_to_le32(0x80000002)
+#define STATUS_BREAKPOINT cpu_to_le32(0x80000003)
+#define STATUS_SINGLE_STEP cpu_to_le32(0x80000004)
+#define STATUS_BUFFER_OVERFLOW cpu_to_le32(0x80000005)
+#define STATUS_NO_MORE_FILES cpu_to_le32(0x80000006)
+#define STATUS_WAKE_SYSTEM_DEBUGGER cpu_to_le32(0x80000007)
+#define STATUS_HANDLES_CLOSED cpu_to_le32(0x8000000A)
+#define STATUS_NO_INHERITANCE cpu_to_le32(0x8000000B)
+#define STATUS_GUID_SUBSTITUTION_MADE cpu_to_le32(0x8000000C)
+#define STATUS_PARTIAL_COPY cpu_to_le32(0x8000000D)
+#define STATUS_DEVICE_PAPER_EMPTY cpu_to_le32(0x8000000E)
+#define STATUS_DEVICE_POWERED_OFF cpu_to_le32(0x8000000F)
+#define STATUS_DEVICE_OFF_LINE cpu_to_le32(0x80000010)
+#define STATUS_DEVICE_BUSY cpu_to_le32(0x80000011)
+#define STATUS_NO_MORE_EAS cpu_to_le32(0x80000012)
+#define STATUS_INVALID_EA_NAME cpu_to_le32(0x80000013)
+#define STATUS_EA_LIST_INCONSISTENT cpu_to_le32(0x80000014)
+#define STATUS_INVALID_EA_FLAG cpu_to_le32(0x80000015)
+#define STATUS_VERIFY_REQUIRED cpu_to_le32(0x80000016)
+#define STATUS_EXTRANEOUS_INFORMATION cpu_to_le32(0x80000017)
+#define STATUS_RXACT_COMMIT_NECESSARY cpu_to_le32(0x80000018)
+#define STATUS_NO_MORE_ENTRIES cpu_to_le32(0x8000001A)
+#define STATUS_FILEMARK_DETECTED cpu_to_le32(0x8000001B)
+#define STATUS_MEDIA_CHANGED cpu_to_le32(0x8000001C)
+#define STATUS_BUS_RESET cpu_to_le32(0x8000001D)
+#define STATUS_END_OF_MEDIA cpu_to_le32(0x8000001E)
+#define STATUS_BEGINNING_OF_MEDIA cpu_to_le32(0x8000001F)
+#define STATUS_MEDIA_CHECK cpu_to_le32(0x80000020)
+#define STATUS_SETMARK_DETECTED cpu_to_le32(0x80000021)
+#define STATUS_NO_DATA_DETECTED cpu_to_le32(0x80000022)
+#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES cpu_to_le32(0x80000023)
+#define STATUS_SERVER_HAS_OPEN_HANDLES cpu_to_le32(0x80000024)
+#define STATUS_ALREADY_DISCONNECTED cpu_to_le32(0x80000025)
+#define STATUS_LONGJUMP cpu_to_le32(0x80000026)
+#define STATUS_CLEANER_CARTRIDGE_INSTALLED cpu_to_le32(0x80000027)
+#define STATUS_PLUGPLAY_QUERY_VETOED cpu_to_le32(0x80000028)
+#define STATUS_UNWIND_CONSOLIDATE cpu_to_le32(0x80000029)
+#define STATUS_REGISTRY_HIVE_RECOVERED cpu_to_le32(0x8000002A)
+#define STATUS_DLL_MIGHT_BE_INSECURE cpu_to_le32(0x8000002B)
+#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE cpu_to_le32(0x8000002C)
+#define STATUS_STOPPED_ON_SYMLINK cpu_to_le32(0x8000002D)
+#define STATUS_DEVICE_REQUIRES_CLEANING cpu_to_le32(0x80000288)
+#define STATUS_DEVICE_DOOR_OPEN cpu_to_le32(0x80000289)
+#define STATUS_DATA_LOST_REPAIR cpu_to_le32(0x80000803)
+#define DBG_EXCEPTION_NOT_HANDLED cpu_to_le32(0x80010001)
+#define STATUS_CLUSTER_NODE_ALREADY_UP cpu_to_le32(0x80130001)
+#define STATUS_CLUSTER_NODE_ALREADY_DOWN cpu_to_le32(0x80130002)
+#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE cpu_to_le32(0x80130003)
+#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE cpu_to_le32(0x80130004)
+#define STATUS_CLUSTER_NODE_ALREADY_MEMBER cpu_to_le32(0x80130005)
+#define STATUS_COULD_NOT_RESIZE_LOG cpu_to_le32(0x80190009)
+#define STATUS_NO_TXF_METADATA cpu_to_le32(0x80190029)
+#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN cpu_to_le32(0x80190031)
+#define STATUS_TXF_METADATA_ALREADY_PRESENT cpu_to_le32(0x80190041)
+#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET cpu_to_le32(0x80190042)
+#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED      \
+       cpu_to_le32(0x801B00EB)
+#define STATUS_FLT_BUFFER_TOO_SMALL cpu_to_le32(0x801C0001)
+#define STATUS_FVE_PARTIAL_METADATA cpu_to_le32(0x80210001)
+#define STATUS_UNSUCCESSFUL cpu_to_le32(0xC0000001)
+#define STATUS_NOT_IMPLEMENTED cpu_to_le32(0xC0000002)
+#define STATUS_INVALID_INFO_CLASS cpu_to_le32(0xC0000003)
+#define STATUS_INFO_LENGTH_MISMATCH cpu_to_le32(0xC0000004)
+#define STATUS_ACCESS_VIOLATION cpu_to_le32(0xC0000005)
+#define STATUS_IN_PAGE_ERROR cpu_to_le32(0xC0000006)
+#define STATUS_PAGEFILE_QUOTA cpu_to_le32(0xC0000007)
+#define STATUS_INVALID_HANDLE cpu_to_le32(0xC0000008)
+#define STATUS_BAD_INITIAL_STACK cpu_to_le32(0xC0000009)
+#define STATUS_BAD_INITIAL_PC cpu_to_le32(0xC000000A)
+#define STATUS_INVALID_CID cpu_to_le32(0xC000000B)
+#define STATUS_TIMER_NOT_CANCELED cpu_to_le32(0xC000000C)
+#define STATUS_INVALID_PARAMETER cpu_to_le32(0xC000000D)
+#define STATUS_NO_SUCH_DEVICE cpu_to_le32(0xC000000E)
+#define STATUS_NO_SUCH_FILE cpu_to_le32(0xC000000F)
+#define STATUS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0000010)
+#define STATUS_END_OF_FILE cpu_to_le32(0xC0000011)
+#define STATUS_WRONG_VOLUME cpu_to_le32(0xC0000012)
+#define STATUS_NO_MEDIA_IN_DEVICE cpu_to_le32(0xC0000013)
+#define STATUS_UNRECOGNIZED_MEDIA cpu_to_le32(0xC0000014)
+#define STATUS_NONEXISTENT_SECTOR cpu_to_le32(0xC0000015)
+#define STATUS_MORE_PROCESSING_REQUIRED cpu_to_le32(0xC0000016)
+#define STATUS_NO_MEMORY cpu_to_le32(0xC0000017)
+#define STATUS_CONFLICTING_ADDRESSES cpu_to_le32(0xC0000018)
+#define STATUS_NOT_MAPPED_VIEW cpu_to_le32(0xC0000019)
+#define STATUS_UNABLE_TO_FREE_VM cpu_to_le32(0xC000001A)
+#define STATUS_UNABLE_TO_DELETE_SECTION cpu_to_le32(0xC000001B)
+#define STATUS_INVALID_SYSTEM_SERVICE cpu_to_le32(0xC000001C)
+#define STATUS_ILLEGAL_INSTRUCTION cpu_to_le32(0xC000001D)
+#define STATUS_INVALID_LOCK_SEQUENCE cpu_to_le32(0xC000001E)
+#define STATUS_INVALID_VIEW_SIZE cpu_to_le32(0xC000001F)
+#define STATUS_INVALID_FILE_FOR_SECTION cpu_to_le32(0xC0000020)
+#define STATUS_ALREADY_COMMITTED cpu_to_le32(0xC0000021)
+#define STATUS_ACCESS_DENIED cpu_to_le32(0xC0000022)
+#define STATUS_BUFFER_TOO_SMALL cpu_to_le32(0xC0000023)
+#define STATUS_OBJECT_TYPE_MISMATCH cpu_to_le32(0xC0000024)
+#define STATUS_NONCONTINUABLE_EXCEPTION cpu_to_le32(0xC0000025)
+#define STATUS_INVALID_DISPOSITION cpu_to_le32(0xC0000026)
+#define STATUS_UNWIND cpu_to_le32(0xC0000027)
+#define STATUS_BAD_STACK cpu_to_le32(0xC0000028)
+#define STATUS_INVALID_UNWIND_TARGET cpu_to_le32(0xC0000029)
+#define STATUS_NOT_LOCKED cpu_to_le32(0xC000002A)
+#define STATUS_PARITY_ERROR cpu_to_le32(0xC000002B)
+#define STATUS_UNABLE_TO_DECOMMIT_VM cpu_to_le32(0xC000002C)
+#define STATUS_NOT_COMMITTED cpu_to_le32(0xC000002D)
+#define STATUS_INVALID_PORT_ATTRIBUTES cpu_to_le32(0xC000002E)
+#define STATUS_PORT_MESSAGE_TOO_LONG cpu_to_le32(0xC000002F)
+#define STATUS_INVALID_PARAMETER_MIX cpu_to_le32(0xC0000030)
+#define STATUS_INVALID_QUOTA_LOWER cpu_to_le32(0xC0000031)
+#define STATUS_DISK_CORRUPT_ERROR cpu_to_le32(0xC0000032)
+#define STATUS_OBJECT_NAME_INVALID cpu_to_le32(0xC0000033)
+#define STATUS_OBJECT_NAME_NOT_FOUND cpu_to_le32(0xC0000034)
+#define STATUS_OBJECT_NAME_COLLISION cpu_to_le32(0xC0000035)
+#define STATUS_PORT_DISCONNECTED cpu_to_le32(0xC0000037)
+#define STATUS_DEVICE_ALREADY_ATTACHED cpu_to_le32(0xC0000038)
+#define STATUS_OBJECT_PATH_INVALID cpu_to_le32(0xC0000039)
+#define STATUS_OBJECT_PATH_NOT_FOUND cpu_to_le32(0xC000003A)
+#define STATUS_OBJECT_PATH_SYNTAX_BAD cpu_to_le32(0xC000003B)
+#define STATUS_DATA_OVERRUN cpu_to_le32(0xC000003C)
+#define STATUS_DATA_LATE_ERROR cpu_to_le32(0xC000003D)
+#define STATUS_DATA_ERROR cpu_to_le32(0xC000003E)
+#define STATUS_CRC_ERROR cpu_to_le32(0xC000003F)
+#define STATUS_SECTION_TOO_BIG cpu_to_le32(0xC0000040)
+#define STATUS_PORT_CONNECTION_REFUSED cpu_to_le32(0xC0000041)
+#define STATUS_INVALID_PORT_HANDLE cpu_to_le32(0xC0000042)
+#define STATUS_SHARING_VIOLATION cpu_to_le32(0xC0000043)
+#define STATUS_QUOTA_EXCEEDED cpu_to_le32(0xC0000044)
+#define STATUS_INVALID_PAGE_PROTECTION cpu_to_le32(0xC0000045)
+#define STATUS_MUTANT_NOT_OWNED cpu_to_le32(0xC0000046)
+#define STATUS_SEMAPHORE_LIMIT_EXCEEDED cpu_to_le32(0xC0000047)
+#define STATUS_PORT_ALREADY_SET cpu_to_le32(0xC0000048)
+#define STATUS_SECTION_NOT_IMAGE cpu_to_le32(0xC0000049)
+#define STATUS_SUSPEND_COUNT_EXCEEDED cpu_to_le32(0xC000004A)
+#define STATUS_THREAD_IS_TERMINATING cpu_to_le32(0xC000004B)
+#define STATUS_BAD_WORKING_SET_LIMIT cpu_to_le32(0xC000004C)
+#define STATUS_INCOMPATIBLE_FILE_MAP cpu_to_le32(0xC000004D)
+#define STATUS_SECTION_PROTECTION cpu_to_le32(0xC000004E)
+#define STATUS_EAS_NOT_SUPPORTED cpu_to_le32(0xC000004F)
+#define STATUS_EA_TOO_LARGE cpu_to_le32(0xC0000050)
+#define STATUS_NONEXISTENT_EA_ENTRY cpu_to_le32(0xC0000051)
+#define STATUS_NO_EAS_ON_FILE cpu_to_le32(0xC0000052)
+#define STATUS_EA_CORRUPT_ERROR cpu_to_le32(0xC0000053)
+#define STATUS_FILE_LOCK_CONFLICT cpu_to_le32(0xC0000054)
+#define STATUS_LOCK_NOT_GRANTED cpu_to_le32(0xC0000055)
+#define STATUS_DELETE_PENDING cpu_to_le32(0xC0000056)
+#define STATUS_CTL_FILE_NOT_SUPPORTED cpu_to_le32(0xC0000057)
+#define STATUS_UNKNOWN_REVISION cpu_to_le32(0xC0000058)
+#define STATUS_REVISION_MISMATCH cpu_to_le32(0xC0000059)
+#define STATUS_INVALID_OWNER cpu_to_le32(0xC000005A)
+#define STATUS_INVALID_PRIMARY_GROUP cpu_to_le32(0xC000005B)
+#define STATUS_NO_IMPERSONATION_TOKEN cpu_to_le32(0xC000005C)
+#define STATUS_CANT_DISABLE_MANDATORY cpu_to_le32(0xC000005D)
+#define STATUS_NO_LOGON_SERVERS cpu_to_le32(0xC000005E)
+#define STATUS_NO_SUCH_LOGON_SESSION cpu_to_le32(0xC000005F)
+#define STATUS_NO_SUCH_PRIVILEGE cpu_to_le32(0xC0000060)
+#define STATUS_PRIVILEGE_NOT_HELD cpu_to_le32(0xC0000061)
+#define STATUS_INVALID_ACCOUNT_NAME cpu_to_le32(0xC0000062)
+#define STATUS_USER_EXISTS cpu_to_le32(0xC0000063)
+#define STATUS_NO_SUCH_USER cpu_to_le32(0xC0000064)
+#define STATUS_GROUP_EXISTS cpu_to_le32(0xC0000065)
+#define STATUS_NO_SUCH_GROUP cpu_to_le32(0xC0000066)
+#define STATUS_MEMBER_IN_GROUP cpu_to_le32(0xC0000067)
+#define STATUS_MEMBER_NOT_IN_GROUP cpu_to_le32(0xC0000068)
+#define STATUS_LAST_ADMIN cpu_to_le32(0xC0000069)
+#define STATUS_WRONG_PASSWORD cpu_to_le32(0xC000006A)
+#define STATUS_ILL_FORMED_PASSWORD cpu_to_le32(0xC000006B)
+#define STATUS_PASSWORD_RESTRICTION cpu_to_le32(0xC000006C)
+#define STATUS_LOGON_FAILURE cpu_to_le32(0xC000006D)
+#define STATUS_ACCOUNT_RESTRICTION cpu_to_le32(0xC000006E)
+#define STATUS_INVALID_LOGON_HOURS cpu_to_le32(0xC000006F)
+#define STATUS_INVALID_WORKSTATION cpu_to_le32(0xC0000070)
+#define STATUS_PASSWORD_EXPIRED cpu_to_le32(0xC0000071)
+#define STATUS_ACCOUNT_DISABLED cpu_to_le32(0xC0000072)
+#define STATUS_NONE_MAPPED cpu_to_le32(0xC0000073)
+#define STATUS_TOO_MANY_LUIDS_REQUESTED cpu_to_le32(0xC0000074)
+#define STATUS_LUIDS_EXHAUSTED cpu_to_le32(0xC0000075)
+#define STATUS_INVALID_SUB_AUTHORITY cpu_to_le32(0xC0000076)
+#define STATUS_INVALID_ACL cpu_to_le32(0xC0000077)
+#define STATUS_INVALID_SID cpu_to_le32(0xC0000078)
+#define STATUS_INVALID_SECURITY_DESCR cpu_to_le32(0xC0000079)
+#define STATUS_PROCEDURE_NOT_FOUND cpu_to_le32(0xC000007A)
+#define STATUS_INVALID_IMAGE_FORMAT cpu_to_le32(0xC000007B)
+#define STATUS_NO_TOKEN cpu_to_le32(0xC000007C)
+#define STATUS_BAD_INHERITANCE_ACL cpu_to_le32(0xC000007D)
+#define STATUS_RANGE_NOT_LOCKED cpu_to_le32(0xC000007E)
+#define STATUS_DISK_FULL cpu_to_le32(0xC000007F)
+#define STATUS_SERVER_DISABLED cpu_to_le32(0xC0000080)
+#define STATUS_SERVER_NOT_DISABLED cpu_to_le32(0xC0000081)
+#define STATUS_TOO_MANY_GUIDS_REQUESTED cpu_to_le32(0xC0000082)
+#define STATUS_GUIDS_EXHAUSTED cpu_to_le32(0xC0000083)
+#define STATUS_INVALID_ID_AUTHORITY cpu_to_le32(0xC0000084)
+#define STATUS_AGENTS_EXHAUSTED cpu_to_le32(0xC0000085)
+#define STATUS_INVALID_VOLUME_LABEL cpu_to_le32(0xC0000086)
+#define STATUS_SECTION_NOT_EXTENDED cpu_to_le32(0xC0000087)
+#define STATUS_NOT_MAPPED_DATA cpu_to_le32(0xC0000088)
+#define STATUS_RESOURCE_DATA_NOT_FOUND cpu_to_le32(0xC0000089)
+#define STATUS_RESOURCE_TYPE_NOT_FOUND cpu_to_le32(0xC000008A)
+#define STATUS_RESOURCE_NAME_NOT_FOUND cpu_to_le32(0xC000008B)
+#define STATUS_ARRAY_BOUNDS_EXCEEDED cpu_to_le32(0xC000008C)
+#define STATUS_FLOAT_DENORMAL_OPERAND cpu_to_le32(0xC000008D)
+#define STATUS_FLOAT_DIVIDE_BY_ZERO cpu_to_le32(0xC000008E)
+#define STATUS_FLOAT_INEXACT_RESULT cpu_to_le32(0xC000008F)
+#define STATUS_FLOAT_INVALID_OPERATION cpu_to_le32(0xC0000090)
+#define STATUS_FLOAT_OVERFLOW cpu_to_le32(0xC0000091)
+#define STATUS_FLOAT_STACK_CHECK cpu_to_le32(0xC0000092)
+#define STATUS_FLOAT_UNDERFLOW cpu_to_le32(0xC0000093)
+#define STATUS_INTEGER_DIVIDE_BY_ZERO cpu_to_le32(0xC0000094)
+#define STATUS_INTEGER_OVERFLOW cpu_to_le32(0xC0000095)
+#define STATUS_PRIVILEGED_INSTRUCTION cpu_to_le32(0xC0000096)
+#define STATUS_TOO_MANY_PAGING_FILES cpu_to_le32(0xC0000097)
+#define STATUS_FILE_INVALID cpu_to_le32(0xC0000098)
+#define STATUS_ALLOTTED_SPACE_EXCEEDED cpu_to_le32(0xC0000099)
+#define STATUS_INSUFFICIENT_RESOURCES cpu_to_le32(0xC000009A)
+#define STATUS_DFS_EXIT_PATH_FOUND cpu_to_le32(0xC000009B)
+#define STATUS_DEVICE_DATA_ERROR cpu_to_le32(0xC000009C)
+#define STATUS_DEVICE_NOT_CONNECTED cpu_to_le32(0xC000009D)
+#define STATUS_DEVICE_POWER_FAILURE cpu_to_le32(0xC000009E)
+#define STATUS_FREE_VM_NOT_AT_BASE cpu_to_le32(0xC000009F)
+#define STATUS_MEMORY_NOT_ALLOCATED cpu_to_le32(0xC00000A0)
+#define STATUS_WORKING_SET_QUOTA cpu_to_le32(0xC00000A1)
+#define STATUS_MEDIA_WRITE_PROTECTED cpu_to_le32(0xC00000A2)
+#define STATUS_DEVICE_NOT_READY cpu_to_le32(0xC00000A3)
+#define STATUS_INVALID_GROUP_ATTRIBUTES cpu_to_le32(0xC00000A4)
+#define STATUS_BAD_IMPERSONATION_LEVEL cpu_to_le32(0xC00000A5)
+#define STATUS_CANT_OPEN_ANONYMOUS cpu_to_le32(0xC00000A6)
+#define STATUS_BAD_VALIDATION_CLASS cpu_to_le32(0xC00000A7)
+#define STATUS_BAD_TOKEN_TYPE cpu_to_le32(0xC00000A8)
+#define STATUS_BAD_MASTER_BOOT_RECORD cpu_to_le32(0xC00000A9)
+#define STATUS_INSTRUCTION_MISALIGNMENT cpu_to_le32(0xC00000AA)
+#define STATUS_INSTANCE_NOT_AVAILABLE cpu_to_le32(0xC00000AB)
+#define STATUS_PIPE_NOT_AVAILABLE cpu_to_le32(0xC00000AC)
+#define STATUS_INVALID_PIPE_STATE cpu_to_le32(0xC00000AD)
+#define STATUS_PIPE_BUSY cpu_to_le32(0xC00000AE)
+#define STATUS_ILLEGAL_FUNCTION cpu_to_le32(0xC00000AF)
+#define STATUS_PIPE_DISCONNECTED cpu_to_le32(0xC00000B0)
+#define STATUS_PIPE_CLOSING cpu_to_le32(0xC00000B1)
+#define STATUS_PIPE_CONNECTED cpu_to_le32(0xC00000B2)
+#define STATUS_PIPE_LISTENING cpu_to_le32(0xC00000B3)
+#define STATUS_INVALID_READ_MODE cpu_to_le32(0xC00000B4)
+#define STATUS_IO_TIMEOUT cpu_to_le32(0xC00000B5)
+#define STATUS_FILE_FORCED_CLOSED cpu_to_le32(0xC00000B6)
+#define STATUS_PROFILING_NOT_STARTED cpu_to_le32(0xC00000B7)
+#define STATUS_PROFILING_NOT_STOPPED cpu_to_le32(0xC00000B8)
+#define STATUS_COULD_NOT_INTERPRET cpu_to_le32(0xC00000B9)
+#define STATUS_FILE_IS_A_DIRECTORY cpu_to_le32(0xC00000BA)
+#define STATUS_NOT_SUPPORTED cpu_to_le32(0xC00000BB)
+#define STATUS_REMOTE_NOT_LISTENING cpu_to_le32(0xC00000BC)
+#define STATUS_DUPLICATE_NAME cpu_to_le32(0xC00000BD)
+#define STATUS_BAD_NETWORK_PATH cpu_to_le32(0xC00000BE)
+#define STATUS_NETWORK_BUSY cpu_to_le32(0xC00000BF)
+#define STATUS_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC00000C0)
+#define STATUS_TOO_MANY_COMMANDS cpu_to_le32(0xC00000C1)
+#define STATUS_ADAPTER_HARDWARE_ERROR cpu_to_le32(0xC00000C2)
+#define STATUS_INVALID_NETWORK_RESPONSE cpu_to_le32(0xC00000C3)
+#define STATUS_UNEXPECTED_NETWORK_ERROR cpu_to_le32(0xC00000C4)
+#define STATUS_BAD_REMOTE_ADAPTER cpu_to_le32(0xC00000C5)
+#define STATUS_PRINT_QUEUE_FULL cpu_to_le32(0xC00000C6)
+#define STATUS_NO_SPOOL_SPACE cpu_to_le32(0xC00000C7)
+#define STATUS_PRINT_CANCELLED cpu_to_le32(0xC00000C8)
+#define STATUS_NETWORK_NAME_DELETED cpu_to_le32(0xC00000C9)
+#define STATUS_NETWORK_ACCESS_DENIED cpu_to_le32(0xC00000CA)
+#define STATUS_BAD_DEVICE_TYPE cpu_to_le32(0xC00000CB)
+#define STATUS_BAD_NETWORK_NAME cpu_to_le32(0xC00000CC)
+#define STATUS_TOO_MANY_NAMES cpu_to_le32(0xC00000CD)
+#define STATUS_TOO_MANY_SESSIONS cpu_to_le32(0xC00000CE)
+#define STATUS_SHARING_PAUSED cpu_to_le32(0xC00000CF)
+#define STATUS_REQUEST_NOT_ACCEPTED cpu_to_le32(0xC00000D0)
+#define STATUS_REDIRECTOR_PAUSED cpu_to_le32(0xC00000D1)
+#define STATUS_NET_WRITE_FAULT cpu_to_le32(0xC00000D2)
+#define STATUS_PROFILING_AT_LIMIT cpu_to_le32(0xC00000D3)
+#define STATUS_NOT_SAME_DEVICE cpu_to_le32(0xC00000D4)
+#define STATUS_FILE_RENAMED cpu_to_le32(0xC00000D5)
+#define STATUS_VIRTUAL_CIRCUIT_CLOSED cpu_to_le32(0xC00000D6)
+#define STATUS_NO_SECURITY_ON_OBJECT cpu_to_le32(0xC00000D7)
+#define STATUS_CANT_WAIT cpu_to_le32(0xC00000D8)
+#define STATUS_PIPE_EMPTY cpu_to_le32(0xC00000D9)
+#define STATUS_CANT_ACCESS_DOMAIN_INFO cpu_to_le32(0xC00000DA)
+#define STATUS_CANT_TERMINATE_SELF cpu_to_le32(0xC00000DB)
+#define STATUS_INVALID_SERVER_STATE cpu_to_le32(0xC00000DC)
+#define STATUS_INVALID_DOMAIN_STATE cpu_to_le32(0xC00000DD)
+#define STATUS_INVALID_DOMAIN_ROLE cpu_to_le32(0xC00000DE)
+#define STATUS_NO_SUCH_DOMAIN cpu_to_le32(0xC00000DF)
+#define STATUS_DOMAIN_EXISTS cpu_to_le32(0xC00000E0)
+#define STATUS_DOMAIN_LIMIT_EXCEEDED cpu_to_le32(0xC00000E1)
+#define STATUS_OPLOCK_NOT_GRANTED cpu_to_le32(0xC00000E2)
+#define STATUS_INVALID_OPLOCK_PROTOCOL cpu_to_le32(0xC00000E3)
+#define STATUS_INTERNAL_DB_CORRUPTION cpu_to_le32(0xC00000E4)
+#define STATUS_INTERNAL_ERROR cpu_to_le32(0xC00000E5)
+#define STATUS_GENERIC_NOT_MAPPED cpu_to_le32(0xC00000E6)
+#define STATUS_BAD_DESCRIPTOR_FORMAT cpu_to_le32(0xC00000E7)
+#define STATUS_INVALID_USER_BUFFER cpu_to_le32(0xC00000E8)
+#define STATUS_UNEXPECTED_IO_ERROR cpu_to_le32(0xC00000E9)
+#define STATUS_UNEXPECTED_MM_CREATE_ERR cpu_to_le32(0xC00000EA)
+#define STATUS_UNEXPECTED_MM_MAP_ERROR cpu_to_le32(0xC00000EB)
+#define STATUS_UNEXPECTED_MM_EXTEND_ERR cpu_to_le32(0xC00000EC)
+#define STATUS_NOT_LOGON_PROCESS cpu_to_le32(0xC00000ED)
+#define STATUS_LOGON_SESSION_EXISTS cpu_to_le32(0xC00000EE)
+#define STATUS_INVALID_PARAMETER_1 cpu_to_le32(0xC00000EF)
+#define STATUS_INVALID_PARAMETER_2 cpu_to_le32(0xC00000F0)
+#define STATUS_INVALID_PARAMETER_3 cpu_to_le32(0xC00000F1)
+#define STATUS_INVALID_PARAMETER_4 cpu_to_le32(0xC00000F2)
+#define STATUS_INVALID_PARAMETER_5 cpu_to_le32(0xC00000F3)
+#define STATUS_INVALID_PARAMETER_6 cpu_to_le32(0xC00000F4)
+#define STATUS_INVALID_PARAMETER_7 cpu_to_le32(0xC00000F5)
+#define STATUS_INVALID_PARAMETER_8 cpu_to_le32(0xC00000F6)
+#define STATUS_INVALID_PARAMETER_9 cpu_to_le32(0xC00000F7)
+#define STATUS_INVALID_PARAMETER_10 cpu_to_le32(0xC00000F8)
+#define STATUS_INVALID_PARAMETER_11 cpu_to_le32(0xC00000F9)
+#define STATUS_INVALID_PARAMETER_12 cpu_to_le32(0xC00000FA)
+#define STATUS_REDIRECTOR_NOT_STARTED cpu_to_le32(0xC00000FB)
+#define STATUS_REDIRECTOR_STARTED cpu_to_le32(0xC00000FC)
+#define STATUS_STACK_OVERFLOW cpu_to_le32(0xC00000FD)
+#define STATUS_NO_SUCH_PACKAGE cpu_to_le32(0xC00000FE)
+#define STATUS_BAD_FUNCTION_TABLE cpu_to_le32(0xC00000FF)
+#define STATUS_VARIABLE_NOT_FOUND cpu_to_le32(0xC0000100)
+#define STATUS_DIRECTORY_NOT_EMPTY cpu_to_le32(0xC0000101)
+#define STATUS_FILE_CORRUPT_ERROR cpu_to_le32(0xC0000102)
+#define STATUS_NOT_A_DIRECTORY cpu_to_le32(0xC0000103)
+#define STATUS_BAD_LOGON_SESSION_STATE cpu_to_le32(0xC0000104)
+#define STATUS_LOGON_SESSION_COLLISION cpu_to_le32(0xC0000105)
+#define STATUS_NAME_TOO_LONG cpu_to_le32(0xC0000106)
+#define STATUS_FILES_OPEN cpu_to_le32(0xC0000107)
+#define STATUS_CONNECTION_IN_USE cpu_to_le32(0xC0000108)
+#define STATUS_MESSAGE_NOT_FOUND cpu_to_le32(0xC0000109)
+#define STATUS_PROCESS_IS_TERMINATING cpu_to_le32(0xC000010A)
+#define STATUS_INVALID_LOGON_TYPE cpu_to_le32(0xC000010B)
+#define STATUS_NO_GUID_TRANSLATION cpu_to_le32(0xC000010C)
+#define STATUS_CANNOT_IMPERSONATE cpu_to_le32(0xC000010D)
+#define STATUS_IMAGE_ALREADY_LOADED cpu_to_le32(0xC000010E)
+#define STATUS_ABIOS_NOT_PRESENT cpu_to_le32(0xC000010F)
+#define STATUS_ABIOS_LID_NOT_EXIST cpu_to_le32(0xC0000110)
+#define STATUS_ABIOS_LID_ALREADY_OWNED cpu_to_le32(0xC0000111)
+#define STATUS_ABIOS_NOT_LID_OWNER cpu_to_le32(0xC0000112)
+#define STATUS_ABIOS_INVALID_COMMAND cpu_to_le32(0xC0000113)
+#define STATUS_ABIOS_INVALID_LID cpu_to_le32(0xC0000114)
+#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE cpu_to_le32(0xC0000115)
+#define STATUS_ABIOS_INVALID_SELECTOR cpu_to_le32(0xC0000116)
+#define STATUS_NO_LDT cpu_to_le32(0xC0000117)
+#define STATUS_INVALID_LDT_SIZE cpu_to_le32(0xC0000118)
+#define STATUS_INVALID_LDT_OFFSET cpu_to_le32(0xC0000119)
+#define STATUS_INVALID_LDT_DESCRIPTOR cpu_to_le32(0xC000011A)
+#define STATUS_INVALID_IMAGE_NE_FORMAT cpu_to_le32(0xC000011B)
+#define STATUS_RXACT_INVALID_STATE cpu_to_le32(0xC000011C)
+#define STATUS_RXACT_COMMIT_FAILURE cpu_to_le32(0xC000011D)
+#define STATUS_MAPPED_FILE_SIZE_ZERO cpu_to_le32(0xC000011E)
+#define STATUS_TOO_MANY_OPENED_FILES cpu_to_le32(0xC000011F)
+#define STATUS_CANCELLED cpu_to_le32(0xC0000120)
+#define STATUS_CANNOT_DELETE cpu_to_le32(0xC0000121)
+#define STATUS_INVALID_COMPUTER_NAME cpu_to_le32(0xC0000122)
+#define STATUS_FILE_DELETED cpu_to_le32(0xC0000123)
+#define STATUS_SPECIAL_ACCOUNT cpu_to_le32(0xC0000124)
+#define STATUS_SPECIAL_GROUP cpu_to_le32(0xC0000125)
+#define STATUS_SPECIAL_USER cpu_to_le32(0xC0000126)
+#define STATUS_MEMBERS_PRIMARY_GROUP cpu_to_le32(0xC0000127)
+#define STATUS_FILE_CLOSED cpu_to_le32(0xC0000128)
+#define STATUS_TOO_MANY_THREADS cpu_to_le32(0xC0000129)
+#define STATUS_THREAD_NOT_IN_PROCESS cpu_to_le32(0xC000012A)
+#define STATUS_TOKEN_ALREADY_IN_USE cpu_to_le32(0xC000012B)
+#define STATUS_PAGEFILE_QUOTA_EXCEEDED cpu_to_le32(0xC000012C)
+#define STATUS_COMMITMENT_LIMIT cpu_to_le32(0xC000012D)
+#define STATUS_INVALID_IMAGE_LE_FORMAT cpu_to_le32(0xC000012E)
+#define STATUS_INVALID_IMAGE_NOT_MZ cpu_to_le32(0xC000012F)
+#define STATUS_INVALID_IMAGE_PROTECT cpu_to_le32(0xC0000130)
+#define STATUS_INVALID_IMAGE_WIN_16 cpu_to_le32(0xC0000131)
+#define STATUS_LOGON_SERVER_CONFLICT cpu_to_le32(0xC0000132)
+#define STATUS_TIME_DIFFERENCE_AT_DC cpu_to_le32(0xC0000133)
+#define STATUS_SYNCHRONIZATION_REQUIRED cpu_to_le32(0xC0000134)
+#define STATUS_DLL_NOT_FOUND cpu_to_le32(0xC0000135)
+#define STATUS_OPEN_FAILED cpu_to_le32(0xC0000136)
+#define STATUS_IO_PRIVILEGE_FAILED cpu_to_le32(0xC0000137)
+#define STATUS_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000138)
+#define STATUS_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000139)
+#define STATUS_CONTROL_C_EXIT cpu_to_le32(0xC000013A)
+#define STATUS_LOCAL_DISCONNECT cpu_to_le32(0xC000013B)
+#define STATUS_REMOTE_DISCONNECT cpu_to_le32(0xC000013C)
+#define STATUS_REMOTE_RESOURCES cpu_to_le32(0xC000013D)
+#define STATUS_LINK_FAILED cpu_to_le32(0xC000013E)
+#define STATUS_LINK_TIMEOUT cpu_to_le32(0xC000013F)
+#define STATUS_INVALID_CONNECTION cpu_to_le32(0xC0000140)
+#define STATUS_INVALID_ADDRESS cpu_to_le32(0xC0000141)
+#define STATUS_DLL_INIT_FAILED cpu_to_le32(0xC0000142)
+#define STATUS_MISSING_SYSTEMFILE cpu_to_le32(0xC0000143)
+#define STATUS_UNHANDLED_EXCEPTION cpu_to_le32(0xC0000144)
+#define STATUS_APP_INIT_FAILURE cpu_to_le32(0xC0000145)
+#define STATUS_PAGEFILE_CREATE_FAILED cpu_to_le32(0xC0000146)
+#define STATUS_NO_PAGEFILE cpu_to_le32(0xC0000147)
+#define STATUS_INVALID_LEVEL cpu_to_le32(0xC0000148)
+#define STATUS_WRONG_PASSWORD_CORE cpu_to_le32(0xC0000149)
+#define STATUS_ILLEGAL_FLOAT_CONTEXT cpu_to_le32(0xC000014A)
+#define STATUS_PIPE_BROKEN cpu_to_le32(0xC000014B)
+#define STATUS_REGISTRY_CORRUPT cpu_to_le32(0xC000014C)
+#define STATUS_REGISTRY_IO_FAILED cpu_to_le32(0xC000014D)
+#define STATUS_NO_EVENT_PAIR cpu_to_le32(0xC000014E)
+#define STATUS_UNRECOGNIZED_VOLUME cpu_to_le32(0xC000014F)
+#define STATUS_SERIAL_NO_DEVICE_INITED cpu_to_le32(0xC0000150)
+#define STATUS_NO_SUCH_ALIAS cpu_to_le32(0xC0000151)
+#define STATUS_MEMBER_NOT_IN_ALIAS cpu_to_le32(0xC0000152)
+#define STATUS_MEMBER_IN_ALIAS cpu_to_le32(0xC0000153)
+#define STATUS_ALIAS_EXISTS cpu_to_le32(0xC0000154)
+#define STATUS_LOGON_NOT_GRANTED cpu_to_le32(0xC0000155)
+#define STATUS_TOO_MANY_SECRETS cpu_to_le32(0xC0000156)
+#define STATUS_SECRET_TOO_LONG cpu_to_le32(0xC0000157)
+#define STATUS_INTERNAL_DB_ERROR cpu_to_le32(0xC0000158)
+#define STATUS_FULLSCREEN_MODE cpu_to_le32(0xC0000159)
+#define STATUS_TOO_MANY_CONTEXT_IDS cpu_to_le32(0xC000015A)
+#define STATUS_LOGON_TYPE_NOT_GRANTED cpu_to_le32(0xC000015B)
+#define STATUS_NOT_REGISTRY_FILE cpu_to_le32(0xC000015C)
+#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000015D)
+#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR cpu_to_le32(0xC000015E)
+#define STATUS_FT_MISSING_MEMBER cpu_to_le32(0xC000015F)
+#define STATUS_ILL_FORMED_SERVICE_ENTRY cpu_to_le32(0xC0000160)
+#define STATUS_ILLEGAL_CHARACTER cpu_to_le32(0xC0000161)
+#define STATUS_UNMAPPABLE_CHARACTER cpu_to_le32(0xC0000162)
+#define STATUS_UNDEFINED_CHARACTER cpu_to_le32(0xC0000163)
+#define STATUS_FLOPPY_VOLUME cpu_to_le32(0xC0000164)
+#define STATUS_FLOPPY_ID_MARK_NOT_FOUND cpu_to_le32(0xC0000165)
+#define STATUS_FLOPPY_WRONG_CYLINDER cpu_to_le32(0xC0000166)
+#define STATUS_FLOPPY_UNKNOWN_ERROR cpu_to_le32(0xC0000167)
+#define STATUS_FLOPPY_BAD_REGISTERS cpu_to_le32(0xC0000168)
+#define STATUS_DISK_RECALIBRATE_FAILED cpu_to_le32(0xC0000169)
+#define STATUS_DISK_OPERATION_FAILED cpu_to_le32(0xC000016A)
+#define STATUS_DISK_RESET_FAILED cpu_to_le32(0xC000016B)
+#define STATUS_SHARED_IRQ_BUSY cpu_to_le32(0xC000016C)
+#define STATUS_FT_ORPHANING cpu_to_le32(0xC000016D)
+#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT cpu_to_le32(0xC000016E)
+#define STATUS_PARTITION_FAILURE cpu_to_le32(0xC0000172)
+#define STATUS_INVALID_BLOCK_LENGTH cpu_to_le32(0xC0000173)
+#define STATUS_DEVICE_NOT_PARTITIONED cpu_to_le32(0xC0000174)
+#define STATUS_UNABLE_TO_LOCK_MEDIA cpu_to_le32(0xC0000175)
+#define STATUS_UNABLE_TO_UNLOAD_MEDIA cpu_to_le32(0xC0000176)
+#define STATUS_EOM_OVERFLOW cpu_to_le32(0xC0000177)
+#define STATUS_NO_MEDIA cpu_to_le32(0xC0000178)
+#define STATUS_NO_SUCH_MEMBER cpu_to_le32(0xC000017A)
+#define STATUS_INVALID_MEMBER cpu_to_le32(0xC000017B)
+#define STATUS_KEY_DELETED cpu_to_le32(0xC000017C)
+#define STATUS_NO_LOG_SPACE cpu_to_le32(0xC000017D)
+#define STATUS_TOO_MANY_SIDS cpu_to_le32(0xC000017E)
+#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000017F)
+#define STATUS_KEY_HAS_CHILDREN cpu_to_le32(0xC0000180)
+#define STATUS_CHILD_MUST_BE_VOLATILE cpu_to_le32(0xC0000181)
+#define STATUS_DEVICE_CONFIGURATION_ERROR cpu_to_le32(0xC0000182)
+#define STATUS_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC0000183)
+#define STATUS_INVALID_DEVICE_STATE cpu_to_le32(0xC0000184)
+#define STATUS_IO_DEVICE_ERROR cpu_to_le32(0xC0000185)
+#define STATUS_DEVICE_PROTOCOL_ERROR cpu_to_le32(0xC0000186)
+#define STATUS_BACKUP_CONTROLLER cpu_to_le32(0xC0000187)
+#define STATUS_LOG_FILE_FULL cpu_to_le32(0xC0000188)
+#define STATUS_TOO_LATE cpu_to_le32(0xC0000189)
+#define STATUS_NO_TRUST_LSA_SECRET cpu_to_le32(0xC000018A)
+#define STATUS_NO_TRUST_SAM_ACCOUNT cpu_to_le32(0xC000018B)
+#define STATUS_TRUSTED_DOMAIN_FAILURE cpu_to_le32(0xC000018C)
+#define STATUS_TRUSTED_RELATIONSHIP_FAILURE cpu_to_le32(0xC000018D)
+#define STATUS_EVENTLOG_FILE_CORRUPT cpu_to_le32(0xC000018E)
+#define STATUS_EVENTLOG_CANT_START cpu_to_le32(0xC000018F)
+#define STATUS_TRUST_FAILURE cpu_to_le32(0xC0000190)
+#define STATUS_MUTANT_LIMIT_EXCEEDED cpu_to_le32(0xC0000191)
+#define STATUS_NETLOGON_NOT_STARTED cpu_to_le32(0xC0000192)
+#define STATUS_ACCOUNT_EXPIRED cpu_to_le32(0xC0000193)
+#define STATUS_POSSIBLE_DEADLOCK cpu_to_le32(0xC0000194)
+#define STATUS_NETWORK_CREDENTIAL_CONFLICT cpu_to_le32(0xC0000195)
+#define STATUS_REMOTE_SESSION_LIMIT cpu_to_le32(0xC0000196)
+#define STATUS_EVENTLOG_FILE_CHANGED cpu_to_le32(0xC0000197)
+#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT cpu_to_le32(0xC0000198)
+#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT cpu_to_le32(0xC0000199)
+#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT cpu_to_le32(0xC000019A)
+#define STATUS_DOMAIN_TRUST_INCONSISTENT cpu_to_le32(0xC000019B)
+#define STATUS_FS_DRIVER_REQUIRED cpu_to_le32(0xC000019C)
+#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL cpu_to_le32(0xC000019D)
+#define STATUS_NETWORK_OPEN_RESTRICTION cpu_to_le32(0xC0000201)
+#define STATUS_NO_USER_SESSION_KEY cpu_to_le32(0xC0000202)
+#define STATUS_USER_SESSION_DELETED cpu_to_le32(0xC0000203)
+#define STATUS_RESOURCE_LANG_NOT_FOUND cpu_to_le32(0xC0000204)
+#define STATUS_INSUFF_SERVER_RESOURCES cpu_to_le32(0xC0000205)
+#define STATUS_INVALID_BUFFER_SIZE cpu_to_le32(0xC0000206)
+#define STATUS_INVALID_ADDRESS_COMPONENT cpu_to_le32(0xC0000207)
+#define STATUS_INVALID_ADDRESS_WILDCARD cpu_to_le32(0xC0000208)
+#define STATUS_TOO_MANY_ADDRESSES cpu_to_le32(0xC0000209)
+#define STATUS_ADDRESS_ALREADY_EXISTS cpu_to_le32(0xC000020A)
+#define STATUS_ADDRESS_CLOSED cpu_to_le32(0xC000020B)
+#define STATUS_CONNECTION_DISCONNECTED cpu_to_le32(0xC000020C)
+#define STATUS_CONNECTION_RESET cpu_to_le32(0xC000020D)
+#define STATUS_TOO_MANY_NODES cpu_to_le32(0xC000020E)
+#define STATUS_TRANSACTION_ABORTED cpu_to_le32(0xC000020F)
+#define STATUS_TRANSACTION_TIMED_OUT cpu_to_le32(0xC0000210)
+#define STATUS_TRANSACTION_NO_RELEASE cpu_to_le32(0xC0000211)
+#define STATUS_TRANSACTION_NO_MATCH cpu_to_le32(0xC0000212)
+#define STATUS_TRANSACTION_RESPONDED cpu_to_le32(0xC0000213)
+#define STATUS_TRANSACTION_INVALID_ID cpu_to_le32(0xC0000214)
+#define STATUS_TRANSACTION_INVALID_TYPE cpu_to_le32(0xC0000215)
+#define STATUS_NOT_SERVER_SESSION cpu_to_le32(0xC0000216)
+#define STATUS_NOT_CLIENT_SESSION cpu_to_le32(0xC0000217)
+#define STATUS_CANNOT_LOAD_REGISTRY_FILE cpu_to_le32(0xC0000218)
+#define STATUS_DEBUG_ATTACH_FAILED cpu_to_le32(0xC0000219)
+#define STATUS_SYSTEM_PROCESS_TERMINATED cpu_to_le32(0xC000021A)
+#define STATUS_DATA_NOT_ACCEPTED cpu_to_le32(0xC000021B)
+#define STATUS_NO_BROWSER_SERVERS_FOUND cpu_to_le32(0xC000021C)
+#define STATUS_VDM_HARD_ERROR cpu_to_le32(0xC000021D)
+#define STATUS_DRIVER_CANCEL_TIMEOUT cpu_to_le32(0xC000021E)
+#define STATUS_REPLY_MESSAGE_MISMATCH cpu_to_le32(0xC000021F)
+#define STATUS_MAPPED_ALIGNMENT cpu_to_le32(0xC0000220)
+#define STATUS_IMAGE_CHECKSUM_MISMATCH cpu_to_le32(0xC0000221)
+#define STATUS_LOST_WRITEBEHIND_DATA cpu_to_le32(0xC0000222)
+#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID cpu_to_le32(0xC0000223)
+#define STATUS_PASSWORD_MUST_CHANGE cpu_to_le32(0xC0000224)
+#define STATUS_NOT_FOUND cpu_to_le32(0xC0000225)
+#define STATUS_NOT_TINY_STREAM cpu_to_le32(0xC0000226)
+#define STATUS_RECOVERY_FAILURE cpu_to_le32(0xC0000227)
+#define STATUS_STACK_OVERFLOW_READ cpu_to_le32(0xC0000228)
+#define STATUS_FAIL_CHECK cpu_to_le32(0xC0000229)
+#define STATUS_DUPLICATE_OBJECTID cpu_to_le32(0xC000022A)
+#define STATUS_OBJECTID_EXISTS cpu_to_le32(0xC000022B)
+#define STATUS_CONVERT_TO_LARGE cpu_to_le32(0xC000022C)
+#define STATUS_RETRY cpu_to_le32(0xC000022D)
+#define STATUS_FOUND_OUT_OF_SCOPE cpu_to_le32(0xC000022E)
+#define STATUS_ALLOCATE_BUCKET cpu_to_le32(0xC000022F)
+#define STATUS_PROPSET_NOT_FOUND cpu_to_le32(0xC0000230)
+#define STATUS_MARSHALL_OVERFLOW cpu_to_le32(0xC0000231)
+#define STATUS_INVALID_VARIANT cpu_to_le32(0xC0000232)
+#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND cpu_to_le32(0xC0000233)
+#define STATUS_ACCOUNT_LOCKED_OUT cpu_to_le32(0xC0000234)
+#define STATUS_HANDLE_NOT_CLOSABLE cpu_to_le32(0xC0000235)
+#define STATUS_CONNECTION_REFUSED cpu_to_le32(0xC0000236)
+#define STATUS_GRACEFUL_DISCONNECT cpu_to_le32(0xC0000237)
+#define STATUS_ADDRESS_ALREADY_ASSOCIATED cpu_to_le32(0xC0000238)
+#define STATUS_ADDRESS_NOT_ASSOCIATED cpu_to_le32(0xC0000239)
+#define STATUS_CONNECTION_INVALID cpu_to_le32(0xC000023A)
+#define STATUS_CONNECTION_ACTIVE cpu_to_le32(0xC000023B)
+#define STATUS_NETWORK_UNREACHABLE cpu_to_le32(0xC000023C)
+#define STATUS_HOST_UNREACHABLE cpu_to_le32(0xC000023D)
+#define STATUS_PROTOCOL_UNREACHABLE cpu_to_le32(0xC000023E)
+#define STATUS_PORT_UNREACHABLE cpu_to_le32(0xC000023F)
+#define STATUS_REQUEST_ABORTED cpu_to_le32(0xC0000240)
+#define STATUS_CONNECTION_ABORTED cpu_to_le32(0xC0000241)
+#define STATUS_BAD_COMPRESSION_BUFFER cpu_to_le32(0xC0000242)
+#define STATUS_USER_MAPPED_FILE cpu_to_le32(0xC0000243)
+#define STATUS_AUDIT_FAILED cpu_to_le32(0xC0000244)
+#define STATUS_TIMER_RESOLUTION_NOT_SET cpu_to_le32(0xC0000245)
+#define STATUS_CONNECTION_COUNT_LIMIT cpu_to_le32(0xC0000246)
+#define STATUS_LOGIN_TIME_RESTRICTION cpu_to_le32(0xC0000247)
+#define STATUS_LOGIN_WKSTA_RESTRICTION cpu_to_le32(0xC0000248)
+#define STATUS_IMAGE_MP_UP_MISMATCH cpu_to_le32(0xC0000249)
+#define STATUS_INSUFFICIENT_LOGON_INFO cpu_to_le32(0xC0000250)
+#define STATUS_BAD_DLL_ENTRYPOINT cpu_to_le32(0xC0000251)
+#define STATUS_BAD_SERVICE_ENTRYPOINT cpu_to_le32(0xC0000252)
+#define STATUS_LPC_REPLY_LOST cpu_to_le32(0xC0000253)
+#define STATUS_IP_ADDRESS_CONFLICT1 cpu_to_le32(0xC0000254)
+#define STATUS_IP_ADDRESS_CONFLICT2 cpu_to_le32(0xC0000255)
+#define STATUS_REGISTRY_QUOTA_LIMIT cpu_to_le32(0xC0000256)
+#define STATUS_PATH_NOT_COVERED cpu_to_le32(0xC0000257)
+#define STATUS_NO_CALLBACK_ACTIVE cpu_to_le32(0xC0000258)
+#define STATUS_LICENSE_QUOTA_EXCEEDED cpu_to_le32(0xC0000259)
+#define STATUS_PWD_TOO_SHORT cpu_to_le32(0xC000025A)
+#define STATUS_PWD_TOO_RECENT cpu_to_le32(0xC000025B)
+#define STATUS_PWD_HISTORY_CONFLICT cpu_to_le32(0xC000025C)
+#define STATUS_PLUGPLAY_NO_DEVICE cpu_to_le32(0xC000025E)
+#define STATUS_UNSUPPORTED_COMPRESSION cpu_to_le32(0xC000025F)
+#define STATUS_INVALID_HW_PROFILE cpu_to_le32(0xC0000260)
+#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH cpu_to_le32(0xC0000261)
+#define STATUS_DRIVER_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000262)
+#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000263)
+#define STATUS_RESOURCE_NOT_OWNED cpu_to_le32(0xC0000264)
+#define STATUS_TOO_MANY_LINKS cpu_to_le32(0xC0000265)
+#define STATUS_QUOTA_LIST_INCONSISTENT cpu_to_le32(0xC0000266)
+#define STATUS_FILE_IS_OFFLINE cpu_to_le32(0xC0000267)
+#define STATUS_EVALUATION_EXPIRATION cpu_to_le32(0xC0000268)
+#define STATUS_ILLEGAL_DLL_RELOCATION cpu_to_le32(0xC0000269)
+#define STATUS_LICENSE_VIOLATION cpu_to_le32(0xC000026A)
+#define STATUS_DLL_INIT_FAILED_LOGOFF cpu_to_le32(0xC000026B)
+#define STATUS_DRIVER_UNABLE_TO_LOAD cpu_to_le32(0xC000026C)
+#define STATUS_DFS_UNAVAILABLE cpu_to_le32(0xC000026D)
+#define STATUS_VOLUME_DISMOUNTED cpu_to_le32(0xC000026E)
+#define STATUS_WX86_INTERNAL_ERROR cpu_to_le32(0xC000026F)
+#define STATUS_WX86_FLOAT_STACK_CHECK cpu_to_le32(0xC0000270)
+#define STATUS_VALIDATE_CONTINUE cpu_to_le32(0xC0000271)
+#define STATUS_NO_MATCH cpu_to_le32(0xC0000272)
+#define STATUS_NO_MORE_MATCHES cpu_to_le32(0xC0000273)
+#define STATUS_NOT_A_REPARSE_POINT cpu_to_le32(0xC0000275)
+#define STATUS_IO_REPARSE_TAG_INVALID cpu_to_le32(0xC0000276)
+#define STATUS_IO_REPARSE_TAG_MISMATCH cpu_to_le32(0xC0000277)
+#define STATUS_IO_REPARSE_DATA_INVALID cpu_to_le32(0xC0000278)
+#define STATUS_IO_REPARSE_TAG_NOT_HANDLED cpu_to_le32(0xC0000279)
+#define STATUS_REPARSE_POINT_NOT_RESOLVED cpu_to_le32(0xC0000280)
+#define STATUS_DIRECTORY_IS_A_REPARSE_POINT cpu_to_le32(0xC0000281)
+#define STATUS_RANGE_LIST_CONFLICT cpu_to_le32(0xC0000282)
+#define STATUS_SOURCE_ELEMENT_EMPTY cpu_to_le32(0xC0000283)
+#define STATUS_DESTINATION_ELEMENT_FULL cpu_to_le32(0xC0000284)
+#define STATUS_ILLEGAL_ELEMENT_ADDRESS cpu_to_le32(0xC0000285)
+#define STATUS_MAGAZINE_NOT_PRESENT cpu_to_le32(0xC0000286)
+#define STATUS_REINITIALIZATION_NEEDED cpu_to_le32(0xC0000287)
+#define STATUS_ENCRYPTION_FAILED cpu_to_le32(0xC000028A)
+#define STATUS_DECRYPTION_FAILED cpu_to_le32(0xC000028B)
+#define STATUS_RANGE_NOT_FOUND cpu_to_le32(0xC000028C)
+#define STATUS_NO_RECOVERY_POLICY cpu_to_le32(0xC000028D)
+#define STATUS_NO_EFS cpu_to_le32(0xC000028E)
+#define STATUS_WRONG_EFS cpu_to_le32(0xC000028F)
+#define STATUS_NO_USER_KEYS cpu_to_le32(0xC0000290)
+#define STATUS_FILE_NOT_ENCRYPTED cpu_to_le32(0xC0000291)
+#define STATUS_NOT_EXPORT_FORMAT cpu_to_le32(0xC0000292)
+#define STATUS_FILE_ENCRYPTED cpu_to_le32(0xC0000293)
+#define STATUS_WMI_GUID_NOT_FOUND cpu_to_le32(0xC0000295)
+#define STATUS_WMI_INSTANCE_NOT_FOUND cpu_to_le32(0xC0000296)
+#define STATUS_WMI_ITEMID_NOT_FOUND cpu_to_le32(0xC0000297)
+#define STATUS_WMI_TRY_AGAIN cpu_to_le32(0xC0000298)
+#define STATUS_SHARED_POLICY cpu_to_le32(0xC0000299)
+#define STATUS_POLICY_OBJECT_NOT_FOUND cpu_to_le32(0xC000029A)
+#define STATUS_POLICY_ONLY_IN_DS cpu_to_le32(0xC000029B)
+#define STATUS_VOLUME_NOT_UPGRADED cpu_to_le32(0xC000029C)
+#define STATUS_REMOTE_STORAGE_NOT_ACTIVE cpu_to_le32(0xC000029D)
+#define STATUS_REMOTE_STORAGE_MEDIA_ERROR cpu_to_le32(0xC000029E)
+#define STATUS_NO_TRACKING_SERVICE cpu_to_le32(0xC000029F)
+#define STATUS_SERVER_SID_MISMATCH cpu_to_le32(0xC00002A0)
+#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE cpu_to_le32(0xC00002A1)
+#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX cpu_to_le32(0xC00002A2)
+#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED cpu_to_le32(0xC00002A3)
+#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS cpu_to_le32(0xC00002A4)
+#define STATUS_DS_BUSY cpu_to_le32(0xC00002A5)
+#define STATUS_DS_UNAVAILABLE cpu_to_le32(0xC00002A6)
+#define STATUS_DS_NO_RIDS_ALLOCATED cpu_to_le32(0xC00002A7)
+#define STATUS_DS_NO_MORE_RIDS cpu_to_le32(0xC00002A8)
+#define STATUS_DS_INCORRECT_ROLE_OWNER cpu_to_le32(0xC00002A9)
+#define STATUS_DS_RIDMGR_INIT_ERROR cpu_to_le32(0xC00002AA)
+#define STATUS_DS_OBJ_CLASS_VIOLATION cpu_to_le32(0xC00002AB)
+#define STATUS_DS_CANT_ON_NON_LEAF cpu_to_le32(0xC00002AC)
+#define STATUS_DS_CANT_ON_RDN cpu_to_le32(0xC00002AD)
+#define STATUS_DS_CANT_MOD_OBJ_CLASS cpu_to_le32(0xC00002AE)
+#define STATUS_DS_CROSS_DOM_MOVE_FAILED cpu_to_le32(0xC00002AF)
+#define STATUS_DS_GC_NOT_AVAILABLE cpu_to_le32(0xC00002B0)
+#define STATUS_DIRECTORY_SERVICE_REQUIRED cpu_to_le32(0xC00002B1)
+#define STATUS_REPARSE_ATTRIBUTE_CONFLICT cpu_to_le32(0xC00002B2)
+#define STATUS_CANT_ENABLE_DENY_ONLY cpu_to_le32(0xC00002B3)
+#define STATUS_FLOAT_MULTIPLE_FAULTS cpu_to_le32(0xC00002B4)
+#define STATUS_FLOAT_MULTIPLE_TRAPS cpu_to_le32(0xC00002B5)
+#define STATUS_DEVICE_REMOVED cpu_to_le32(0xC00002B6)
+#define STATUS_JOURNAL_DELETE_IN_PROGRESS cpu_to_le32(0xC00002B7)
+#define STATUS_JOURNAL_NOT_ACTIVE cpu_to_le32(0xC00002B8)
+#define STATUS_NOINTERFACE cpu_to_le32(0xC00002B9)
+#define STATUS_DS_ADMIN_LIMIT_EXCEEDED cpu_to_le32(0xC00002C1)
+#define STATUS_DRIVER_FAILED_SLEEP cpu_to_le32(0xC00002C2)
+#define STATUS_MUTUAL_AUTHENTICATION_FAILED cpu_to_le32(0xC00002C3)
+#define STATUS_CORRUPT_SYSTEM_FILE cpu_to_le32(0xC00002C4)
+#define STATUS_DATATYPE_MISALIGNMENT_ERROR cpu_to_le32(0xC00002C5)
+#define STATUS_WMI_READ_ONLY cpu_to_le32(0xC00002C6)
+#define STATUS_WMI_SET_FAILURE cpu_to_le32(0xC00002C7)
+#define STATUS_COMMITMENT_MINIMUM cpu_to_le32(0xC00002C8)
+#define STATUS_REG_NAT_CONSUMPTION cpu_to_le32(0xC00002C9)
+#define STATUS_TRANSPORT_FULL cpu_to_le32(0xC00002CA)
+#define STATUS_DS_SAM_INIT_FAILURE cpu_to_le32(0xC00002CB)
+#define STATUS_ONLY_IF_CONNECTED cpu_to_le32(0xC00002CC)
+#define STATUS_DS_SENSITIVE_GROUP_VIOLATION cpu_to_le32(0xC00002CD)
+#define STATUS_PNP_RESTART_ENUMERATION cpu_to_le32(0xC00002CE)
+#define STATUS_JOURNAL_ENTRY_DELETED cpu_to_le32(0xC00002CF)
+#define STATUS_DS_CANT_MOD_PRIMARYGROUPID cpu_to_le32(0xC00002D0)
+#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE cpu_to_le32(0xC00002D1)
+#define STATUS_PNP_REBOOT_REQUIRED cpu_to_le32(0xC00002D2)
+#define STATUS_POWER_STATE_INVALID cpu_to_le32(0xC00002D3)
+#define STATUS_DS_INVALID_GROUP_TYPE cpu_to_le32(0xC00002D4)
+#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D5)
+#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D6)
+#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D7)
+#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC00002D8)
+#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D9)
+#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER cpu_to_le32(0xC00002DA)
+#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER     \
+       cpu_to_le32(0xC00002DB)
+#define STATUS_DS_HAVE_PRIMARY_MEMBERS cpu_to_le32(0xC00002DC)
+#define STATUS_WMI_NOT_SUPPORTED cpu_to_le32(0xC00002DD)
+#define STATUS_INSUFFICIENT_POWER cpu_to_le32(0xC00002DE)
+#define STATUS_SAM_NEED_BOOTKEY_PASSWORD cpu_to_le32(0xC00002DF)
+#define STATUS_SAM_NEED_BOOTKEY_FLOPPY cpu_to_le32(0xC00002E0)
+#define STATUS_DS_CANT_START cpu_to_le32(0xC00002E1)
+#define STATUS_DS_INIT_FAILURE cpu_to_le32(0xC00002E2)
+#define STATUS_SAM_INIT_FAILURE cpu_to_le32(0xC00002E3)
+#define STATUS_DS_GC_REQUIRED cpu_to_le32(0xC00002E4)
+#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY cpu_to_le32(0xC00002E5)
+#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS cpu_to_le32(0xC00002E6)
+#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED cpu_to_le32(0xC00002E7)
+#define STATUS_MULTIPLE_FAULT_VIOLATION cpu_to_le32(0xC00002E8)
+#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED cpu_to_le32(0xC00002E9)
+#define STATUS_CANNOT_MAKE cpu_to_le32(0xC00002EA)
+#define STATUS_SYSTEM_SHUTDOWN cpu_to_le32(0xC00002EB)
+#define STATUS_DS_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002EC)
+#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002ED)
+#define STATUS_UNFINISHED_CONTEXT_DELETED cpu_to_le32(0xC00002EE)
+#define STATUS_NO_TGT_REPLY cpu_to_le32(0xC00002EF)
+#define STATUS_OBJECTID_NOT_FOUND cpu_to_le32(0xC00002F0)
+#define STATUS_NO_IP_ADDRESSES cpu_to_le32(0xC00002F1)
+#define STATUS_WRONG_CREDENTIAL_HANDLE cpu_to_le32(0xC00002F2)
+#define STATUS_CRYPTO_SYSTEM_INVALID cpu_to_le32(0xC00002F3)
+#define STATUS_MAX_REFERRALS_EXCEEDED cpu_to_le32(0xC00002F4)
+#define STATUS_MUST_BE_KDC cpu_to_le32(0xC00002F5)
+#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED cpu_to_le32(0xC00002F6)
+#define STATUS_TOO_MANY_PRINCIPALS cpu_to_le32(0xC00002F7)
+#define STATUS_NO_PA_DATA cpu_to_le32(0xC00002F8)
+#define STATUS_PKINIT_NAME_MISMATCH cpu_to_le32(0xC00002F9)
+#define STATUS_SMARTCARD_LOGON_REQUIRED cpu_to_le32(0xC00002FA)
+#define STATUS_KDC_INVALID_REQUEST cpu_to_le32(0xC00002FB)
+#define STATUS_KDC_UNABLE_TO_REFER cpu_to_le32(0xC00002FC)
+#define STATUS_KDC_UNKNOWN_ETYPE cpu_to_le32(0xC00002FD)
+#define STATUS_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FE)
+#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FF)
+#define STATUS_NOT_SUPPORTED_ON_SBS cpu_to_le32(0xC0000300)
+#define STATUS_WMI_GUID_DISCONNECTED cpu_to_le32(0xC0000301)
+#define STATUS_WMI_ALREADY_DISABLED cpu_to_le32(0xC0000302)
+#define STATUS_WMI_ALREADY_ENABLED cpu_to_le32(0xC0000303)
+#define STATUS_MFT_TOO_FRAGMENTED cpu_to_le32(0xC0000304)
+#define STATUS_COPY_PROTECTION_FAILURE cpu_to_le32(0xC0000305)
+#define STATUS_CSS_AUTHENTICATION_FAILURE cpu_to_le32(0xC0000306)
+#define STATUS_CSS_KEY_NOT_PRESENT cpu_to_le32(0xC0000307)
+#define STATUS_CSS_KEY_NOT_ESTABLISHED cpu_to_le32(0xC0000308)
+#define STATUS_CSS_SCRAMBLED_SECTOR cpu_to_le32(0xC0000309)
+#define STATUS_CSS_REGION_MISMATCH cpu_to_le32(0xC000030A)
+#define STATUS_CSS_RESETS_EXHAUSTED cpu_to_le32(0xC000030B)
+#define STATUS_PKINIT_FAILURE cpu_to_le32(0xC0000320)
+#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE cpu_to_le32(0xC0000321)
+#define STATUS_NO_KERB_KEY cpu_to_le32(0xC0000322)
+#define STATUS_HOST_DOWN cpu_to_le32(0xC0000350)
+#define STATUS_UNSUPPORTED_PREAUTH cpu_to_le32(0xC0000351)
+#define STATUS_EFS_ALG_BLOB_TOO_BIG cpu_to_le32(0xC0000352)
+#define STATUS_PORT_NOT_SET cpu_to_le32(0xC0000353)
+#define STATUS_DEBUGGER_INACTIVE cpu_to_le32(0xC0000354)
+#define STATUS_DS_VERSION_CHECK_FAILURE cpu_to_le32(0xC0000355)
+#define STATUS_AUDITING_DISABLED cpu_to_le32(0xC0000356)
+#define STATUS_PRENT4_MACHINE_ACCOUNT cpu_to_le32(0xC0000357)
+#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC0000358)
+#define STATUS_INVALID_IMAGE_WIN_32 cpu_to_le32(0xC0000359)
+#define STATUS_INVALID_IMAGE_WIN_64 cpu_to_le32(0xC000035A)
+#define STATUS_BAD_BINDINGS cpu_to_le32(0xC000035B)
+#define STATUS_NETWORK_SESSION_EXPIRED cpu_to_le32(0xC000035C)
+#define STATUS_APPHELP_BLOCK cpu_to_le32(0xC000035D)
+#define STATUS_ALL_SIDS_FILTERED cpu_to_le32(0xC000035E)
+#define STATUS_NOT_SAFE_MODE_DRIVER cpu_to_le32(0xC000035F)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT cpu_to_le32(0xC0000361)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH cpu_to_le32(0xC0000362)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER cpu_to_le32(0xC0000363)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER cpu_to_le32(0xC0000364)
+#define STATUS_FAILED_DRIVER_ENTRY cpu_to_le32(0xC0000365)
+#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
+#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
+#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
+#define STATUS_MCA_OCCURRED cpu_to_le32(0xC000036A)
+#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
+#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
+#define STATUS_DRIVER_DATABASE_ERROR cpu_to_le32(0xC000036D)
+#define STATUS_SYSTEM_HIVE_TOO_LARGE cpu_to_le32(0xC000036E)
+#define STATUS_INVALID_IMPORT_OF_NON_DLL cpu_to_le32(0xC000036F)
+#define STATUS_NO_SECRETS cpu_to_le32(0xC0000371)
+#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY cpu_to_le32(0xC0000372)
+#define STATUS_FAILED_STACK_SWITCH cpu_to_le32(0xC0000373)
+#define STATUS_HEAP_CORRUPTION cpu_to_le32(0xC0000374)
+#define STATUS_SMARTCARD_WRONG_PIN cpu_to_le32(0xC0000380)
+#define STATUS_SMARTCARD_CARD_BLOCKED cpu_to_le32(0xC0000381)
+#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED cpu_to_le32(0xC0000382)
+#define STATUS_SMARTCARD_NO_CARD cpu_to_le32(0xC0000383)
+#define STATUS_SMARTCARD_NO_KEY_CONTAINER cpu_to_le32(0xC0000384)
+#define STATUS_SMARTCARD_NO_CERTIFICATE cpu_to_le32(0xC0000385)
+#define STATUS_SMARTCARD_NO_KEYSET cpu_to_le32(0xC0000386)
+#define STATUS_SMARTCARD_IO_ERROR cpu_to_le32(0xC0000387)
+#define STATUS_DOWNGRADE_DETECTED cpu_to_le32(0xC0000388)
+#define STATUS_SMARTCARD_CERT_REVOKED cpu_to_le32(0xC0000389)
+#define STATUS_ISSUING_CA_UNTRUSTED cpu_to_le32(0xC000038A)
+#define STATUS_REVOCATION_OFFLINE_C cpu_to_le32(0xC000038B)
+#define STATUS_PKINIT_CLIENT_FAILURE cpu_to_le32(0xC000038C)
+#define STATUS_SMARTCARD_CERT_EXPIRED cpu_to_le32(0xC000038D)
+#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD cpu_to_le32(0xC000038E)
+#define STATUS_SMARTCARD_SILENT_CONTEXT cpu_to_le32(0xC000038F)
+#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000401)
+#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000402)
+#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000403)
+#define STATUS_DS_NAME_NOT_UNIQUE cpu_to_le32(0xC0000404)
+#define STATUS_DS_DUPLICATE_ID_FOUND cpu_to_le32(0xC0000405)
+#define STATUS_DS_GROUP_CONVERSION_ERROR cpu_to_le32(0xC0000406)
+#define STATUS_VOLSNAP_PREPARE_HIBERNATE cpu_to_le32(0xC0000407)
+#define STATUS_USER2USER_REQUIRED cpu_to_le32(0xC0000408)
+#define STATUS_STACK_BUFFER_OVERRUN cpu_to_le32(0xC0000409)
+#define STATUS_NO_S4U_PROT_SUPPORT cpu_to_le32(0xC000040A)
+#define STATUS_CROSSREALM_DELEGATION_FAILURE cpu_to_le32(0xC000040B)
+#define STATUS_REVOCATION_OFFLINE_KDC cpu_to_le32(0xC000040C)
+#define STATUS_ISSUING_CA_UNTRUSTED_KDC cpu_to_le32(0xC000040D)
+#define STATUS_KDC_CERT_EXPIRED cpu_to_le32(0xC000040E)
+#define STATUS_KDC_CERT_REVOKED cpu_to_le32(0xC000040F)
+#define STATUS_PARAMETER_QUOTA_EXCEEDED cpu_to_le32(0xC0000410)
+#define STATUS_HIBERNATION_FAILURE cpu_to_le32(0xC0000411)
+#define STATUS_DELAY_LOAD_FAILED cpu_to_le32(0xC0000412)
+#define STATUS_AUTHENTICATION_FIREWALL_FAILED cpu_to_le32(0xC0000413)
+#define STATUS_VDM_DISALLOWED cpu_to_le32(0xC0000414)
+#define STATUS_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC0000415)
+#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE \
+       cpu_to_le32(0xC0000416)
+#define STATUS_INVALID_CRUNTIME_PARAMETER cpu_to_le32(0xC0000417)
+#define STATUS_NTLM_BLOCKED cpu_to_le32(0xC0000418)
+#define STATUS_ASSERTION_FAILURE cpu_to_le32(0xC0000420)
+#define STATUS_VERIFIER_STOP cpu_to_le32(0xC0000421)
+#define STATUS_CALLBACK_POP_STACK cpu_to_le32(0xC0000423)
+#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED cpu_to_le32(0xC0000424)
+#define STATUS_HIVE_UNLOADED cpu_to_le32(0xC0000425)
+#define STATUS_COMPRESSION_DISABLED cpu_to_le32(0xC0000426)
+#define STATUS_FILE_SYSTEM_LIMITATION cpu_to_le32(0xC0000427)
+#define STATUS_INVALID_IMAGE_HASH cpu_to_le32(0xC0000428)
+#define STATUS_NOT_CAPABLE cpu_to_le32(0xC0000429)
+#define STATUS_REQUEST_OUT_OF_SEQUENCE cpu_to_le32(0xC000042A)
+#define STATUS_IMPLEMENTATION_LIMIT cpu_to_le32(0xC000042B)
+#define STATUS_ELEVATION_REQUIRED cpu_to_le32(0xC000042C)
+#define STATUS_BEYOND_VDL cpu_to_le32(0xC0000432)
+#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS cpu_to_le32(0xC0000433)
+#define STATUS_PTE_CHANGED cpu_to_le32(0xC0000434)
+#define STATUS_PURGE_FAILED cpu_to_le32(0xC0000435)
+#define STATUS_CRED_REQUIRES_CONFIRMATION cpu_to_le32(0xC0000440)
+#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE cpu_to_le32(0xC0000441)
+#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER cpu_to_le32(0xC0000442)
+#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE cpu_to_le32(0xC0000443)
+#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE cpu_to_le32(0xC0000444)
+#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE cpu_to_le32(0xC0000445)
+#define STATUS_INVALID_LABEL cpu_to_le32(0xC0000446)
+#define STATUS_DRIVER_PROCESS_TERMINATED cpu_to_le32(0xC0000450)
+#define STATUS_AMBIGUOUS_SYSTEM_DEVICE cpu_to_le32(0xC0000451)
+#define STATUS_SYSTEM_DEVICE_NOT_FOUND cpu_to_le32(0xC0000452)
+#define STATUS_RESTART_BOOT_APPLICATION cpu_to_le32(0xC0000453)
+#define STATUS_INVALID_TASK_NAME cpu_to_le32(0xC0000500)
+#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
+#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
+#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
+#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
+#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
+#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
+#define STATUS_REQUEST_CANCELED cpu_to_le32(0xC0000703)
+#define STATUS_RECURSIVE_DISPATCH cpu_to_le32(0xC0000704)
+#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED cpu_to_le32(0xC0000705)
+#define STATUS_LPC_INVALID_CONNECTION_USAGE cpu_to_le32(0xC0000706)
+#define STATUS_LPC_REQUESTS_NOT_ALLOWED cpu_to_le32(0xC0000707)
+#define STATUS_RESOURCE_IN_USE cpu_to_le32(0xC0000708)
+#define STATUS_HARDWARE_MEMORY_ERROR cpu_to_le32(0xC0000709)
+#define STATUS_THREADPOOL_HANDLE_EXCEPTION cpu_to_le32(0xC000070A)
+#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED cpu_to_le32(0xC000070B)
+#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED       \
+       cpu_to_le32(0xC000070C)
+#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED   \
+       cpu_to_le32(0xC000070D)
+#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED    \
+       cpu_to_le32(0xC000070E)
+#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION cpu_to_le32(0xC000070F)
+#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000710)
+#define STATUS_APC_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000711)
+#define STATUS_PROCESS_IS_PROTECTED cpu_to_le32(0xC0000712)
+#define STATUS_MCA_EXCEPTION cpu_to_le32(0xC0000713)
+#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE cpu_to_le32(0xC0000714)
+#define STATUS_SYMLINK_CLASS_DISABLED cpu_to_le32(0xC0000715)
+#define STATUS_INVALID_IDN_NORMALIZATION cpu_to_le32(0xC0000716)
+#define STATUS_NO_UNICODE_TRANSLATION cpu_to_le32(0xC0000717)
+#define STATUS_ALREADY_REGISTERED cpu_to_le32(0xC0000718)
+#define STATUS_CONTEXT_MISMATCH cpu_to_le32(0xC0000719)
+#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST cpu_to_le32(0xC000071A)
+#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY cpu_to_le32(0xC000071B)
+#define STATUS_INVALID_THREAD cpu_to_le32(0xC000071C)
+#define STATUS_CALLBACK_RETURNED_TRANSACTION cpu_to_le32(0xC000071D)
+#define STATUS_CALLBACK_RETURNED_LDR_LOCK cpu_to_le32(0xC000071E)
+#define STATUS_CALLBACK_RETURNED_LANG cpu_to_le32(0xC000071F)
+#define STATUS_CALLBACK_RETURNED_PRI_BACK cpu_to_le32(0xC0000720)
+#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY cpu_to_le32(0xC0000721)
+#define STATUS_DISK_REPAIR_DISABLED cpu_to_le32(0xC0000800)
+#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS cpu_to_le32(0xC0000801)
+#define STATUS_DISK_QUOTA_EXCEEDED cpu_to_le32(0xC0000802)
+#define STATUS_CONTENT_BLOCKED cpu_to_le32(0xC0000804)
+#define STATUS_BAD_CLUSTERS cpu_to_le32(0xC0000805)
+#define STATUS_VOLUME_DIRTY cpu_to_le32(0xC0000806)
+#define STATUS_FILE_CHECKED_OUT cpu_to_le32(0xC0000901)
+#define STATUS_CHECKOUT_REQUIRED cpu_to_le32(0xC0000902)
+#define STATUS_BAD_FILE_TYPE cpu_to_le32(0xC0000903)
+#define STATUS_FILE_TOO_LARGE cpu_to_le32(0xC0000904)
+#define STATUS_FORMS_AUTH_REQUIRED cpu_to_le32(0xC0000905)
+#define STATUS_VIRUS_INFECTED cpu_to_le32(0xC0000906)
+#define STATUS_VIRUS_DELETED cpu_to_le32(0xC0000907)
+#define STATUS_BAD_MCFG_TABLE cpu_to_le32(0xC0000908)
+#define STATUS_WOW_ASSERTION cpu_to_le32(0xC0009898)
+#define STATUS_INVALID_SIGNATURE cpu_to_le32(0xC000A000)
+#define STATUS_HMAC_NOT_SUPPORTED cpu_to_le32(0xC000A001)
+#define STATUS_IPSEC_QUEUE_OVERFLOW cpu_to_le32(0xC000A010)
+#define STATUS_ND_QUEUE_OVERFLOW cpu_to_le32(0xC000A011)
+#define STATUS_HOPLIMIT_EXCEEDED cpu_to_le32(0xC000A012)
+#define STATUS_PROTOCOL_NOT_SUPPORTED cpu_to_le32(0xC000A013)
+#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED      \
+       cpu_to_le32(0xC000A080)
+#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR      \
+       cpu_to_le32(0xC000A081)
+#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR cpu_to_le32(0xC000A082)
+#define STATUS_XML_PARSE_ERROR cpu_to_le32(0xC000A083)
+#define STATUS_XMLDSIG_ERROR cpu_to_le32(0xC000A084)
+#define STATUS_WRONG_COMPARTMENT cpu_to_le32(0xC000A085)
+#define STATUS_AUTHIP_FAILURE cpu_to_le32(0xC000A086)
+#define DBG_NO_STATE_CHANGE cpu_to_le32(0xC0010001)
+#define DBG_APP_NOT_IDLE cpu_to_le32(0xC0010002)
+#define RPC_NT_INVALID_STRING_BINDING cpu_to_le32(0xC0020001)
+#define RPC_NT_WRONG_KIND_OF_BINDING cpu_to_le32(0xC0020002)
+#define RPC_NT_INVALID_BINDING cpu_to_le32(0xC0020003)
+#define RPC_NT_PROTSEQ_NOT_SUPPORTED cpu_to_le32(0xC0020004)
+#define RPC_NT_INVALID_RPC_PROTSEQ cpu_to_le32(0xC0020005)
+#define RPC_NT_INVALID_STRING_UUID cpu_to_le32(0xC0020006)
+#define RPC_NT_INVALID_ENDPOINT_FORMAT cpu_to_le32(0xC0020007)
+#define RPC_NT_INVALID_NET_ADDR cpu_to_le32(0xC0020008)
+#define RPC_NT_NO_ENDPOINT_FOUND cpu_to_le32(0xC0020009)
+#define RPC_NT_INVALID_TIMEOUT cpu_to_le32(0xC002000A)
+#define RPC_NT_OBJECT_NOT_FOUND cpu_to_le32(0xC002000B)
+#define RPC_NT_ALREADY_REGISTERED cpu_to_le32(0xC002000C)
+#define RPC_NT_TYPE_ALREADY_REGISTERED cpu_to_le32(0xC002000D)
+#define RPC_NT_ALREADY_LISTENING cpu_to_le32(0xC002000E)
+#define RPC_NT_NO_PROTSEQS_REGISTERED cpu_to_le32(0xC002000F)
+#define RPC_NT_NOT_LISTENING cpu_to_le32(0xC0020010)
+#define RPC_NT_UNKNOWN_MGR_TYPE cpu_to_le32(0xC0020011)
+#define RPC_NT_UNKNOWN_IF cpu_to_le32(0xC0020012)
+#define RPC_NT_NO_BINDINGS cpu_to_le32(0xC0020013)
+#define RPC_NT_NO_PROTSEQS cpu_to_le32(0xC0020014)
+#define RPC_NT_CANT_CREATE_ENDPOINT cpu_to_le32(0xC0020015)
+#define RPC_NT_OUT_OF_RESOURCES cpu_to_le32(0xC0020016)
+#define RPC_NT_SERVER_UNAVAILABLE cpu_to_le32(0xC0020017)
+#define RPC_NT_SERVER_TOO_BUSY cpu_to_le32(0xC0020018)
+#define RPC_NT_INVALID_NETWORK_OPTIONS cpu_to_le32(0xC0020019)
+#define RPC_NT_NO_CALL_ACTIVE cpu_to_le32(0xC002001A)
+#define RPC_NT_CALL_FAILED cpu_to_le32(0xC002001B)
+#define RPC_NT_CALL_FAILED_DNE cpu_to_le32(0xC002001C)
+#define RPC_NT_PROTOCOL_ERROR cpu_to_le32(0xC002001D)
+#define RPC_NT_UNSUPPORTED_TRANS_SYN cpu_to_le32(0xC002001F)
+#define RPC_NT_UNSUPPORTED_TYPE cpu_to_le32(0xC0020021)
+#define RPC_NT_INVALID_TAG cpu_to_le32(0xC0020022)
+#define RPC_NT_INVALID_BOUND cpu_to_le32(0xC0020023)
+#define RPC_NT_NO_ENTRY_NAME cpu_to_le32(0xC0020024)
+#define RPC_NT_INVALID_NAME_SYNTAX cpu_to_le32(0xC0020025)
+#define RPC_NT_UNSUPPORTED_NAME_SYNTAX cpu_to_le32(0xC0020026)
+#define RPC_NT_UUID_NO_ADDRESS cpu_to_le32(0xC0020028)
+#define RPC_NT_DUPLICATE_ENDPOINT cpu_to_le32(0xC0020029)
+#define RPC_NT_UNKNOWN_AUTHN_TYPE cpu_to_le32(0xC002002A)
+#define RPC_NT_MAX_CALLS_TOO_SMALL cpu_to_le32(0xC002002B)
+#define RPC_NT_STRING_TOO_LONG cpu_to_le32(0xC002002C)
+#define RPC_NT_PROTSEQ_NOT_FOUND cpu_to_le32(0xC002002D)
+#define RPC_NT_PROCNUM_OUT_OF_RANGE cpu_to_le32(0xC002002E)
+#define RPC_NT_BINDING_HAS_NO_AUTH cpu_to_le32(0xC002002F)
+#define RPC_NT_UNKNOWN_AUTHN_SERVICE cpu_to_le32(0xC0020030)
+#define RPC_NT_UNKNOWN_AUTHN_LEVEL cpu_to_le32(0xC0020031)
+#define RPC_NT_INVALID_AUTH_IDENTITY cpu_to_le32(0xC0020032)
+#define RPC_NT_UNKNOWN_AUTHZ_SERVICE cpu_to_le32(0xC0020033)
+#define EPT_NT_INVALID_ENTRY cpu_to_le32(0xC0020034)
+#define EPT_NT_CANT_PERFORM_OP cpu_to_le32(0xC0020035)
+#define EPT_NT_NOT_REGISTERED cpu_to_le32(0xC0020036)
+#define RPC_NT_NOTHING_TO_EXPORT cpu_to_le32(0xC0020037)
+#define RPC_NT_INCOMPLETE_NAME cpu_to_le32(0xC0020038)
+#define RPC_NT_INVALID_VERS_OPTION cpu_to_le32(0xC0020039)
+#define RPC_NT_NO_MORE_MEMBERS cpu_to_le32(0xC002003A)
+#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED cpu_to_le32(0xC002003B)
+#define RPC_NT_INTERFACE_NOT_FOUND cpu_to_le32(0xC002003C)
+#define RPC_NT_ENTRY_ALREADY_EXISTS cpu_to_le32(0xC002003D)
+#define RPC_NT_ENTRY_NOT_FOUND cpu_to_le32(0xC002003E)
+#define RPC_NT_NAME_SERVICE_UNAVAILABLE cpu_to_le32(0xC002003F)
+#define RPC_NT_INVALID_NAF_ID cpu_to_le32(0xC0020040)
+#define RPC_NT_CANNOT_SUPPORT cpu_to_le32(0xC0020041)
+#define RPC_NT_NO_CONTEXT_AVAILABLE cpu_to_le32(0xC0020042)
+#define RPC_NT_INTERNAL_ERROR cpu_to_le32(0xC0020043)
+#define RPC_NT_ZERO_DIVIDE cpu_to_le32(0xC0020044)
+#define RPC_NT_ADDRESS_ERROR cpu_to_le32(0xC0020045)
+#define RPC_NT_FP_DIV_ZERO cpu_to_le32(0xC0020046)
+#define RPC_NT_FP_UNDERFLOW cpu_to_le32(0xC0020047)
+#define RPC_NT_FP_OVERFLOW cpu_to_le32(0xC0020048)
+#define RPC_NT_CALL_IN_PROGRESS cpu_to_le32(0xC0020049)
+#define RPC_NT_NO_MORE_BINDINGS cpu_to_le32(0xC002004A)
+#define RPC_NT_GROUP_MEMBER_NOT_FOUND cpu_to_le32(0xC002004B)
+#define EPT_NT_CANT_CREATE cpu_to_le32(0xC002004C)
+#define RPC_NT_INVALID_OBJECT cpu_to_le32(0xC002004D)
+#define RPC_NT_NO_INTERFACES cpu_to_le32(0xC002004F)
+#define RPC_NT_CALL_CANCELLED cpu_to_le32(0xC0020050)
+#define RPC_NT_BINDING_INCOMPLETE cpu_to_le32(0xC0020051)
+#define RPC_NT_COMM_FAILURE cpu_to_le32(0xC0020052)
+#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL cpu_to_le32(0xC0020053)
+#define RPC_NT_NO_PRINC_NAME cpu_to_le32(0xC0020054)
+#define RPC_NT_NOT_RPC_ERROR cpu_to_le32(0xC0020055)
+#define RPC_NT_SEC_PKG_ERROR cpu_to_le32(0xC0020057)
+#define RPC_NT_NOT_CANCELLED cpu_to_le32(0xC0020058)
+#define RPC_NT_INVALID_ASYNC_HANDLE cpu_to_le32(0xC0020062)
+#define RPC_NT_INVALID_ASYNC_CALL cpu_to_le32(0xC0020063)
+#define RPC_NT_PROXY_ACCESS_DENIED cpu_to_le32(0xC0020064)
+#define RPC_NT_NO_MORE_ENTRIES cpu_to_le32(0xC0030001)
+#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL cpu_to_le32(0xC0030002)
+#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE cpu_to_le32(0xC0030003)
+#define RPC_NT_SS_IN_NULL_CONTEXT cpu_to_le32(0xC0030004)
+#define RPC_NT_SS_CONTEXT_MISMATCH cpu_to_le32(0xC0030005)
+#define RPC_NT_SS_CONTEXT_DAMAGED cpu_to_le32(0xC0030006)
+#define RPC_NT_SS_HANDLES_MISMATCH cpu_to_le32(0xC0030007)
+#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE cpu_to_le32(0xC0030008)
+#define RPC_NT_NULL_REF_POINTER cpu_to_le32(0xC0030009)
+#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE cpu_to_le32(0xC003000A)
+#define RPC_NT_BYTE_COUNT_TOO_SMALL cpu_to_le32(0xC003000B)
+#define RPC_NT_BAD_STUB_DATA cpu_to_le32(0xC003000C)
+#define RPC_NT_INVALID_ES_ACTION cpu_to_le32(0xC0030059)
+#define RPC_NT_WRONG_ES_VERSION cpu_to_le32(0xC003005A)
+#define RPC_NT_WRONG_STUB_VERSION cpu_to_le32(0xC003005B)
+#define RPC_NT_INVALID_PIPE_OBJECT cpu_to_le32(0xC003005C)
+#define RPC_NT_INVALID_PIPE_OPERATION cpu_to_le32(0xC003005D)
+#define RPC_NT_WRONG_PIPE_VERSION cpu_to_le32(0xC003005E)
+#define RPC_NT_PIPE_CLOSED cpu_to_le32(0xC003005F)
+#define RPC_NT_PIPE_DISCIPLINE_ERROR cpu_to_le32(0xC0030060)
+#define RPC_NT_PIPE_EMPTY cpu_to_le32(0xC0030061)
+#define STATUS_PNP_BAD_MPS_TABLE cpu_to_le32(0xC0040035)
+#define STATUS_PNP_TRANSLATION_FAILED cpu_to_le32(0xC0040036)
+#define STATUS_PNP_IRQ_TRANSLATION_FAILED cpu_to_le32(0xC0040037)
+#define STATUS_PNP_INVALID_ID cpu_to_le32(0xC0040038)
+#define STATUS_IO_REISSUE_AS_CACHED cpu_to_le32(0xC0040039)
+#define STATUS_CTX_WINSTATION_NAME_INVALID cpu_to_le32(0xC00A0001)
+#define STATUS_CTX_INVALID_PD cpu_to_le32(0xC00A0002)
+#define STATUS_CTX_PD_NOT_FOUND cpu_to_le32(0xC00A0003)
+#define STATUS_CTX_CLOSE_PENDING cpu_to_le32(0xC00A0006)
+#define STATUS_CTX_NO_OUTBUF cpu_to_le32(0xC00A0007)
+#define STATUS_CTX_MODEM_INF_NOT_FOUND cpu_to_le32(0xC00A0008)
+#define STATUS_CTX_INVALID_MODEMNAME cpu_to_le32(0xC00A0009)
+#define STATUS_CTX_RESPONSE_ERROR cpu_to_le32(0xC00A000A)
+#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT cpu_to_le32(0xC00A000B)
+#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER cpu_to_le32(0xC00A000C)
+#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE cpu_to_le32(0xC00A000D)
+#define STATUS_CTX_MODEM_RESPONSE_BUSY cpu_to_le32(0xC00A000E)
+#define STATUS_CTX_MODEM_RESPONSE_VOICE cpu_to_le32(0xC00A000F)
+#define STATUS_CTX_TD_ERROR cpu_to_le32(0xC00A0010)
+#define STATUS_CTX_LICENSE_CLIENT_INVALID cpu_to_le32(0xC00A0012)
+#define STATUS_CTX_LICENSE_NOT_AVAILABLE cpu_to_le32(0xC00A0013)
+#define STATUS_CTX_LICENSE_EXPIRED cpu_to_le32(0xC00A0014)
+#define STATUS_CTX_WINSTATION_NOT_FOUND cpu_to_le32(0xC00A0015)
+#define STATUS_CTX_WINSTATION_NAME_COLLISION cpu_to_le32(0xC00A0016)
+#define STATUS_CTX_WINSTATION_BUSY cpu_to_le32(0xC00A0017)
+#define STATUS_CTX_BAD_VIDEO_MODE cpu_to_le32(0xC00A0018)
+#define STATUS_CTX_GRAPHICS_INVALID cpu_to_le32(0xC00A0022)
+#define STATUS_CTX_NOT_CONSOLE cpu_to_le32(0xC00A0024)
+#define STATUS_CTX_CLIENT_QUERY_TIMEOUT cpu_to_le32(0xC00A0026)
+#define STATUS_CTX_CONSOLE_DISCONNECT cpu_to_le32(0xC00A0027)
+#define STATUS_CTX_CONSOLE_CONNECT cpu_to_le32(0xC00A0028)
+#define STATUS_CTX_SHADOW_DENIED cpu_to_le32(0xC00A002A)
+#define STATUS_CTX_WINSTATION_ACCESS_DENIED cpu_to_le32(0xC00A002B)
+#define STATUS_CTX_INVALID_WD cpu_to_le32(0xC00A002E)
+#define STATUS_CTX_WD_NOT_FOUND cpu_to_le32(0xC00A002F)
+#define STATUS_CTX_SHADOW_INVALID cpu_to_le32(0xC00A0030)
+#define STATUS_CTX_SHADOW_DISABLED cpu_to_le32(0xC00A0031)
+#define STATUS_RDP_PROTOCOL_ERROR cpu_to_le32(0xC00A0032)
+#define STATUS_CTX_CLIENT_LICENSE_NOT_SET cpu_to_le32(0xC00A0033)
+#define STATUS_CTX_CLIENT_LICENSE_IN_USE cpu_to_le32(0xC00A0034)
+#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE cpu_to_le32(0xC00A0035)
+#define STATUS_CTX_SHADOW_NOT_RUNNING cpu_to_le32(0xC00A0036)
+#define STATUS_CTX_LOGON_DISABLED cpu_to_le32(0xC00A0037)
+#define STATUS_CTX_SECURITY_LAYER_ERROR cpu_to_le32(0xC00A0038)
+#define STATUS_TS_INCOMPATIBLE_SESSIONS cpu_to_le32(0xC00A0039)
+#define STATUS_MUI_FILE_NOT_FOUND cpu_to_le32(0xC00B0001)
+#define STATUS_MUI_INVALID_FILE cpu_to_le32(0xC00B0002)
+#define STATUS_MUI_INVALID_RC_CONFIG cpu_to_le32(0xC00B0003)
+#define STATUS_MUI_INVALID_LOCALE_NAME cpu_to_le32(0xC00B0004)
+#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME cpu_to_le32(0xC00B0005)
+#define STATUS_MUI_FILE_NOT_LOADED cpu_to_le32(0xC00B0006)
+#define STATUS_RESOURCE_ENUM_USER_STOP cpu_to_le32(0xC00B0007)
+#define STATUS_CLUSTER_INVALID_NODE cpu_to_le32(0xC0130001)
+#define STATUS_CLUSTER_NODE_EXISTS cpu_to_le32(0xC0130002)
+#define STATUS_CLUSTER_JOIN_IN_PROGRESS cpu_to_le32(0xC0130003)
+#define STATUS_CLUSTER_NODE_NOT_FOUND cpu_to_le32(0xC0130004)
+#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND cpu_to_le32(0xC0130005)
+#define STATUS_CLUSTER_NETWORK_EXISTS cpu_to_le32(0xC0130006)
+#define STATUS_CLUSTER_NETWORK_NOT_FOUND cpu_to_le32(0xC0130007)
+#define STATUS_CLUSTER_NETINTERFACE_EXISTS cpu_to_le32(0xC0130008)
+#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND cpu_to_le32(0xC0130009)
+#define STATUS_CLUSTER_INVALID_REQUEST cpu_to_le32(0xC013000A)
+#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER cpu_to_le32(0xC013000B)
+#define STATUS_CLUSTER_NODE_DOWN cpu_to_le32(0xC013000C)
+#define STATUS_CLUSTER_NODE_UNREACHABLE cpu_to_le32(0xC013000D)
+#define STATUS_CLUSTER_NODE_NOT_MEMBER cpu_to_le32(0xC013000E)
+#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS cpu_to_le32(0xC013000F)
+#define STATUS_CLUSTER_INVALID_NETWORK cpu_to_le32(0xC0130010)
+#define STATUS_CLUSTER_NO_NET_ADAPTERS cpu_to_le32(0xC0130011)
+#define STATUS_CLUSTER_NODE_UP cpu_to_le32(0xC0130012)
+#define STATUS_CLUSTER_NODE_PAUSED cpu_to_le32(0xC0130013)
+#define STATUS_CLUSTER_NODE_NOT_PAUSED cpu_to_le32(0xC0130014)
+#define STATUS_CLUSTER_NO_SECURITY_CONTEXT cpu_to_le32(0xC0130015)
+#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL cpu_to_le32(0xC0130016)
+#define STATUS_CLUSTER_POISONED cpu_to_le32(0xC0130017)
+#define STATUS_ACPI_INVALID_OPCODE cpu_to_le32(0xC0140001)
+#define STATUS_ACPI_STACK_OVERFLOW cpu_to_le32(0xC0140002)
+#define STATUS_ACPI_ASSERT_FAILED cpu_to_le32(0xC0140003)
+#define STATUS_ACPI_INVALID_INDEX cpu_to_le32(0xC0140004)
+#define STATUS_ACPI_INVALID_ARGUMENT cpu_to_le32(0xC0140005)
+#define STATUS_ACPI_FATAL cpu_to_le32(0xC0140006)
+#define STATUS_ACPI_INVALID_SUPERNAME cpu_to_le32(0xC0140007)
+#define STATUS_ACPI_INVALID_ARGTYPE cpu_to_le32(0xC0140008)
+#define STATUS_ACPI_INVALID_OBJTYPE cpu_to_le32(0xC0140009)
+#define STATUS_ACPI_INVALID_TARGETTYPE cpu_to_le32(0xC014000A)
+#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT cpu_to_le32(0xC014000B)
+#define STATUS_ACPI_ADDRESS_NOT_MAPPED cpu_to_le32(0xC014000C)
+#define STATUS_ACPI_INVALID_EVENTTYPE cpu_to_le32(0xC014000D)
+#define STATUS_ACPI_HANDLER_COLLISION cpu_to_le32(0xC014000E)
+#define STATUS_ACPI_INVALID_DATA cpu_to_le32(0xC014000F)
+#define STATUS_ACPI_INVALID_REGION cpu_to_le32(0xC0140010)
+#define STATUS_ACPI_INVALID_ACCESS_SIZE cpu_to_le32(0xC0140011)
+#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK cpu_to_le32(0xC0140012)
+#define STATUS_ACPI_ALREADY_INITIALIZED cpu_to_le32(0xC0140013)
+#define STATUS_ACPI_NOT_INITIALIZED cpu_to_le32(0xC0140014)
+#define STATUS_ACPI_INVALID_MUTEX_LEVEL cpu_to_le32(0xC0140015)
+#define STATUS_ACPI_MUTEX_NOT_OWNED cpu_to_le32(0xC0140016)
+#define STATUS_ACPI_MUTEX_NOT_OWNER cpu_to_le32(0xC0140017)
+#define STATUS_ACPI_RS_ACCESS cpu_to_le32(0xC0140018)
+#define STATUS_ACPI_INVALID_TABLE cpu_to_le32(0xC0140019)
+#define STATUS_ACPI_REG_HANDLER_FAILED cpu_to_le32(0xC0140020)
+#define STATUS_ACPI_POWER_REQUEST_FAILED cpu_to_le32(0xC0140021)
+#define STATUS_SXS_SECTION_NOT_FOUND cpu_to_le32(0xC0150001)
+#define STATUS_SXS_CANT_GEN_ACTCTX cpu_to_le32(0xC0150002)
+#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT cpu_to_le32(0xC0150003)
+#define STATUS_SXS_ASSEMBLY_NOT_FOUND cpu_to_le32(0xC0150004)
+#define STATUS_SXS_MANIFEST_FORMAT_ERROR cpu_to_le32(0xC0150005)
+#define STATUS_SXS_MANIFEST_PARSE_ERROR cpu_to_le32(0xC0150006)
+#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED cpu_to_le32(0xC0150007)
+#define STATUS_SXS_KEY_NOT_FOUND cpu_to_le32(0xC0150008)
+#define STATUS_SXS_VERSION_CONFLICT cpu_to_le32(0xC0150009)
+#define STATUS_SXS_WRONG_SECTION_TYPE cpu_to_le32(0xC015000A)
+#define STATUS_SXS_THREAD_QUERIES_DISABLED cpu_to_le32(0xC015000B)
+#define STATUS_SXS_ASSEMBLY_MISSING cpu_to_le32(0xC015000C)
+#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET cpu_to_le32(0xC015000E)
+#define STATUS_SXS_EARLY_DEACTIVATION cpu_to_le32(0xC015000F)
+#define STATUS_SXS_INVALID_DEACTIVATION cpu_to_le32(0xC0150010)
+#define STATUS_SXS_MULTIPLE_DEACTIVATION cpu_to_le32(0xC0150011)
+#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY     \
+       cpu_to_le32(0xC0150012)
+#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED cpu_to_le32(0xC0150013)
+#define STATUS_SXS_CORRUPT_ACTIVATION_STACK cpu_to_le32(0xC0150014)
+#define STATUS_SXS_CORRUPTION cpu_to_le32(0xC0150015)
+#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE cpu_to_le32(0xC0150016)
+#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME cpu_to_le32(0xC0150017)
+#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE cpu_to_le32(0xC0150018)
+#define STATUS_SXS_IDENTITY_PARSE_ERROR cpu_to_le32(0xC0150019)
+#define STATUS_SXS_COMPONENT_STORE_CORRUPT cpu_to_le32(0xC015001A)
+#define STATUS_SXS_FILE_HASH_MISMATCH cpu_to_le32(0xC015001B)
+#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT       \
+       cpu_to_le32(0xC015001C)
+#define STATUS_SXS_IDENTITIES_DIFFERENT cpu_to_le32(0xC015001D)
+#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT cpu_to_le32(0xC015001E)
+#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY cpu_to_le32(0xC015001F)
+#define STATUS_ADVANCED_INSTALLER_FAILED cpu_to_le32(0xC0150020)
+#define STATUS_XML_ENCODING_MISMATCH cpu_to_le32(0xC0150021)
+#define STATUS_SXS_MANIFEST_TOO_BIG cpu_to_le32(0xC0150022)
+#define STATUS_SXS_SETTING_NOT_REGISTERED cpu_to_le32(0xC0150023)
+#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE cpu_to_le32(0xC0150024)
+#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED cpu_to_le32(0xC0150025)
+#define STATUS_GENERIC_COMMAND_FAILED cpu_to_le32(0xC0150026)
+#define STATUS_SXS_FILE_HASH_MISSING cpu_to_le32(0xC0150027)
+#define STATUS_TRANSACTIONAL_CONFLICT cpu_to_le32(0xC0190001)
+#define STATUS_INVALID_TRANSACTION cpu_to_le32(0xC0190002)
+#define STATUS_TRANSACTION_NOT_ACTIVE cpu_to_le32(0xC0190003)
+#define STATUS_TM_INITIALIZATION_FAILED cpu_to_le32(0xC0190004)
+#define STATUS_RM_NOT_ACTIVE cpu_to_le32(0xC0190005)
+#define STATUS_RM_METADATA_CORRUPT cpu_to_le32(0xC0190006)
+#define STATUS_TRANSACTION_NOT_JOINED cpu_to_le32(0xC0190007)
+#define STATUS_DIRECTORY_NOT_RM cpu_to_le32(0xC0190008)
+#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE cpu_to_le32(0xC019000A)
+#define STATUS_LOG_RESIZE_INVALID_SIZE cpu_to_le32(0xC019000B)
+#define STATUS_REMOTE_FILE_VERSION_MISMATCH cpu_to_le32(0xC019000C)
+#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS cpu_to_le32(0xC019000F)
+#define STATUS_TRANSACTION_PROPAGATION_FAILED cpu_to_le32(0xC0190010)
+#define STATUS_CRM_PROTOCOL_NOT_FOUND cpu_to_le32(0xC0190011)
+#define STATUS_TRANSACTION_SUPERIOR_EXISTS cpu_to_le32(0xC0190012)
+#define STATUS_TRANSACTION_REQUEST_NOT_VALID cpu_to_le32(0xC0190013)
+#define STATUS_TRANSACTION_NOT_REQUESTED cpu_to_le32(0xC0190014)
+#define STATUS_TRANSACTION_ALREADY_ABORTED cpu_to_le32(0xC0190015)
+#define STATUS_TRANSACTION_ALREADY_COMMITTED cpu_to_le32(0xC0190016)
+#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER cpu_to_le32(0xC0190017)
+#define STATUS_CURRENT_TRANSACTION_NOT_VALID cpu_to_le32(0xC0190018)
+#define STATUS_LOG_GROWTH_FAILED cpu_to_le32(0xC0190019)
+#define STATUS_OBJECT_NO_LONGER_EXISTS cpu_to_le32(0xC0190021)
+#define STATUS_STREAM_MINIVERSION_NOT_FOUND cpu_to_le32(0xC0190022)
+#define STATUS_STREAM_MINIVERSION_NOT_VALID cpu_to_le32(0xC0190023)
+#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION     \
+       cpu_to_le32(0xC0190024)
+#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT cpu_to_le32(0xC0190025)
+#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS cpu_to_le32(0xC0190026)
+#define STATUS_HANDLE_NO_LONGER_VALID cpu_to_le32(0xC0190028)
+#define STATUS_LOG_CORRUPTION_DETECTED cpu_to_le32(0xC0190030)
+#define STATUS_RM_DISCONNECTED cpu_to_le32(0xC0190032)
+#define STATUS_ENLISTMENT_NOT_SUPERIOR cpu_to_le32(0xC0190033)
+#define STATUS_FILE_IDENTITY_NOT_PERSISTENT cpu_to_le32(0xC0190036)
+#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY cpu_to_le32(0xC0190037)
+#define STATUS_CANT_CROSS_RM_BOUNDARY cpu_to_le32(0xC0190038)
+#define STATUS_TXF_DIR_NOT_EMPTY cpu_to_le32(0xC0190039)
+#define STATUS_INDOUBT_TRANSACTIONS_EXIST cpu_to_le32(0xC019003A)
+#define STATUS_TM_VOLATILE cpu_to_le32(0xC019003B)
+#define STATUS_ROLLBACK_TIMER_EXPIRED cpu_to_le32(0xC019003C)
+#define STATUS_TXF_ATTRIBUTE_CORRUPT cpu_to_le32(0xC019003D)
+#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC019003E)
+#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED cpu_to_le32(0xC019003F)
+#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE cpu_to_le32(0xC0190040)
+#define STATUS_TRANSACTION_REQUIRED_PROMOTION cpu_to_le32(0xC0190043)
+#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION cpu_to_le32(0xC0190044)
+#define STATUS_TRANSACTIONS_NOT_FROZEN cpu_to_le32(0xC0190045)
+#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS cpu_to_le32(0xC0190046)
+#define STATUS_NOT_SNAPSHOT_VOLUME cpu_to_le32(0xC0190047)
+#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES cpu_to_le32(0xC0190048)
+#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190049)
+#define STATUS_TM_IDENTITY_MISMATCH cpu_to_le32(0xC019004A)
+#define STATUS_FLOATED_SECTION cpu_to_le32(0xC019004B)
+#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK cpu_to_le32(0xC019004C)
+#define STATUS_CANNOT_ABORT_TRANSACTIONS cpu_to_le32(0xC019004D)
+#define STATUS_TRANSACTION_NOT_FOUND cpu_to_le32(0xC019004E)
+#define STATUS_RESOURCEMANAGER_NOT_FOUND cpu_to_le32(0xC019004F)
+#define STATUS_ENLISTMENT_NOT_FOUND cpu_to_le32(0xC0190050)
+#define STATUS_TRANSACTIONMANAGER_NOT_FOUND cpu_to_le32(0xC0190051)
+#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE cpu_to_le32(0xC0190052)
+#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION      \
+       cpu_to_le32(0xC0190053)
+#define STATUS_TRANSACTION_NOT_ROOT cpu_to_le32(0xC0190054)
+#define STATUS_TRANSACTION_OBJECT_EXPIRED cpu_to_le32(0xC0190055)
+#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190056)
+#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED cpu_to_le32(0xC0190057)
+#define STATUS_TRANSACTION_RECORD_TOO_LONG cpu_to_le32(0xC0190058)
+#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION cpu_to_le32(0xC0190059)
+#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION cpu_to_le32(0xC019005A)
+#define STATUS_TRANSACTION_INTEGRITY_VIOLATED cpu_to_le32(0xC019005B)
+#define STATUS_LOG_SECTOR_INVALID cpu_to_le32(0xC01A0001)
+#define STATUS_LOG_SECTOR_PARITY_INVALID cpu_to_le32(0xC01A0002)
+#define STATUS_LOG_SECTOR_REMAPPED cpu_to_le32(0xC01A0003)
+#define STATUS_LOG_BLOCK_INCOMPLETE cpu_to_le32(0xC01A0004)
+#define STATUS_LOG_INVALID_RANGE cpu_to_le32(0xC01A0005)
+#define STATUS_LOG_BLOCKS_EXHAUSTED cpu_to_le32(0xC01A0006)
+#define STATUS_LOG_READ_CONTEXT_INVALID cpu_to_le32(0xC01A0007)
+#define STATUS_LOG_RESTART_INVALID cpu_to_le32(0xC01A0008)
+#define STATUS_LOG_BLOCK_VERSION cpu_to_le32(0xC01A0009)
+#define STATUS_LOG_BLOCK_INVALID cpu_to_le32(0xC01A000A)
+#define STATUS_LOG_READ_MODE_INVALID cpu_to_le32(0xC01A000B)
+#define STATUS_LOG_METADATA_CORRUPT cpu_to_le32(0xC01A000D)
+#define STATUS_LOG_METADATA_INVALID cpu_to_le32(0xC01A000E)
+#define STATUS_LOG_METADATA_INCONSISTENT cpu_to_le32(0xC01A000F)
+#define STATUS_LOG_RESERVATION_INVALID cpu_to_le32(0xC01A0010)
+#define STATUS_LOG_CANT_DELETE cpu_to_le32(0xC01A0011)
+#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED cpu_to_le32(0xC01A0012)
+#define STATUS_LOG_START_OF_LOG cpu_to_le32(0xC01A0013)
+#define STATUS_LOG_POLICY_ALREADY_INSTALLED cpu_to_le32(0xC01A0014)
+#define STATUS_LOG_POLICY_NOT_INSTALLED cpu_to_le32(0xC01A0015)
+#define STATUS_LOG_POLICY_INVALID cpu_to_le32(0xC01A0016)
+#define STATUS_LOG_POLICY_CONFLICT cpu_to_le32(0xC01A0017)
+#define STATUS_LOG_PINNED_ARCHIVE_TAIL cpu_to_le32(0xC01A0018)
+#define STATUS_LOG_RECORD_NONEXISTENT cpu_to_le32(0xC01A0019)
+#define STATUS_LOG_RECORDS_RESERVED_INVALID cpu_to_le32(0xC01A001A)
+#define STATUS_LOG_SPACE_RESERVED_INVALID cpu_to_le32(0xC01A001B)
+#define STATUS_LOG_TAIL_INVALID cpu_to_le32(0xC01A001C)
+#define STATUS_LOG_FULL cpu_to_le32(0xC01A001D)
+#define STATUS_LOG_MULTIPLEXED cpu_to_le32(0xC01A001E)
+#define STATUS_LOG_DEDICATED cpu_to_le32(0xC01A001F)
+#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS cpu_to_le32(0xC01A0020)
+#define STATUS_LOG_ARCHIVE_IN_PROGRESS cpu_to_le32(0xC01A0021)
+#define STATUS_LOG_EPHEMERAL cpu_to_le32(0xC01A0022)
+#define STATUS_LOG_NOT_ENOUGH_CONTAINERS cpu_to_le32(0xC01A0023)
+#define STATUS_LOG_CLIENT_ALREADY_REGISTERED cpu_to_le32(0xC01A0024)
+#define STATUS_LOG_CLIENT_NOT_REGISTERED cpu_to_le32(0xC01A0025)
+#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS cpu_to_le32(0xC01A0026)
+#define STATUS_LOG_CONTAINER_READ_FAILED cpu_to_le32(0xC01A0027)
+#define STATUS_LOG_CONTAINER_WRITE_FAILED cpu_to_le32(0xC01A0028)
+#define STATUS_LOG_CONTAINER_OPEN_FAILED cpu_to_le32(0xC01A0029)
+#define STATUS_LOG_CONTAINER_STATE_INVALID cpu_to_le32(0xC01A002A)
+#define STATUS_LOG_STATE_INVALID cpu_to_le32(0xC01A002B)
+#define STATUS_LOG_PINNED cpu_to_le32(0xC01A002C)
+#define STATUS_LOG_METADATA_FLUSH_FAILED cpu_to_le32(0xC01A002D)
+#define STATUS_LOG_INCONSISTENT_SECURITY cpu_to_le32(0xC01A002E)
+#define STATUS_LOG_APPENDED_FLUSH_FAILED cpu_to_le32(0xC01A002F)
+#define STATUS_LOG_PINNED_RESERVATION cpu_to_le32(0xC01A0030)
+#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC01B00EA)
+#define STATUS_FLT_NO_HANDLER_DEFINED cpu_to_le32(0xC01C0001)
+#define STATUS_FLT_CONTEXT_ALREADY_DEFINED cpu_to_le32(0xC01C0002)
+#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST cpu_to_le32(0xC01C0003)
+#define STATUS_FLT_DISALLOW_FAST_IO cpu_to_le32(0xC01C0004)
+#define STATUS_FLT_INVALID_NAME_REQUEST cpu_to_le32(0xC01C0005)
+#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION cpu_to_le32(0xC01C0006)
+#define STATUS_FLT_NOT_INITIALIZED cpu_to_le32(0xC01C0007)
+#define STATUS_FLT_FILTER_NOT_READY cpu_to_le32(0xC01C0008)
+#define STATUS_FLT_POST_OPERATION_CLEANUP cpu_to_le32(0xC01C0009)
+#define STATUS_FLT_INTERNAL_ERROR cpu_to_le32(0xC01C000A)
+#define STATUS_FLT_DELETING_OBJECT cpu_to_le32(0xC01C000B)
+#define STATUS_FLT_MUST_BE_NONPAGED_POOL cpu_to_le32(0xC01C000C)
+#define STATUS_FLT_DUPLICATE_ENTRY cpu_to_le32(0xC01C000D)
+#define STATUS_FLT_CBDQ_DISABLED cpu_to_le32(0xC01C000E)
+#define STATUS_FLT_DO_NOT_ATTACH cpu_to_le32(0xC01C000F)
+#define STATUS_FLT_DO_NOT_DETACH cpu_to_le32(0xC01C0010)
+#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION cpu_to_le32(0xC01C0011)
+#define STATUS_FLT_INSTANCE_NAME_COLLISION cpu_to_le32(0xC01C0012)
+#define STATUS_FLT_FILTER_NOT_FOUND cpu_to_le32(0xC01C0013)
+#define STATUS_FLT_VOLUME_NOT_FOUND cpu_to_le32(0xC01C0014)
+#define STATUS_FLT_INSTANCE_NOT_FOUND cpu_to_le32(0xC01C0015)
+#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND cpu_to_le32(0xC01C0016)
+#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION cpu_to_le32(0xC01C0017)
+#define STATUS_FLT_NAME_CACHE_MISS cpu_to_le32(0xC01C0018)
+#define STATUS_FLT_NO_DEVICE_OBJECT cpu_to_le32(0xC01C0019)
+#define STATUS_FLT_VOLUME_ALREADY_MOUNTED cpu_to_le32(0xC01C001A)
+#define STATUS_FLT_ALREADY_ENLISTED cpu_to_le32(0xC01C001B)
+#define STATUS_FLT_CONTEXT_ALREADY_LINKED cpu_to_le32(0xC01C001C)
+#define STATUS_FLT_NO_WAITER_FOR_REPLY cpu_to_le32(0xC01C0020)
+#define STATUS_MONITOR_NO_DESCRIPTOR cpu_to_le32(0xC01D0001)
+#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT cpu_to_le32(0xC01D0002)
+#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM cpu_to_le32(0xC01D0003)
+#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK cpu_to_le32(0xC01D0004)
+#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED cpu_to_le32(0xC01D0005)
+#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK      \
+       cpu_to_le32(0xC01D0006)
+#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK      \
+       cpu_to_le32(0xC01D0007)
+#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA cpu_to_le32(0xC01D0008)
+#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK cpu_to_le32(0xC01D0009)
+#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER cpu_to_le32(0xC01E0000)
+#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER cpu_to_le32(0xC01E0001)
+#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER cpu_to_le32(0xC01E0002)
+#define STATUS_GRAPHICS_ADAPTER_WAS_RESET cpu_to_le32(0xC01E0003)
+#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL cpu_to_le32(0xC01E0004)
+#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED cpu_to_le32(0xC01E0005)
+#define STATUS_GRAPHICS_PRESENT_OCCLUDED cpu_to_le32(0xC01E0006)
+#define STATUS_GRAPHICS_PRESENT_DENIED cpu_to_le32(0xC01E0007)
+#define STATUS_GRAPHICS_CANNOTCOLORCONVERT cpu_to_le32(0xC01E0008)
+#define STATUS_GRAPHICS_NO_VIDEO_MEMORY cpu_to_le32(0xC01E0100)
+#define STATUS_GRAPHICS_CANT_LOCK_MEMORY cpu_to_le32(0xC01E0101)
+#define STATUS_GRAPHICS_ALLOCATION_BUSY cpu_to_le32(0xC01E0102)
+#define STATUS_GRAPHICS_TOO_MANY_REFERENCES cpu_to_le32(0xC01E0103)
+#define STATUS_GRAPHICS_TRY_AGAIN_LATER cpu_to_le32(0xC01E0104)
+#define STATUS_GRAPHICS_TRY_AGAIN_NOW cpu_to_le32(0xC01E0105)
+#define STATUS_GRAPHICS_ALLOCATION_INVALID cpu_to_le32(0xC01E0106)
+#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE cpu_to_le32(0xC01E0107)
+#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED cpu_to_le32(0xC01E0108)
+#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION cpu_to_le32(0xC01E0109)
+#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE cpu_to_le32(0xC01E0110)
+#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION cpu_to_le32(0xC01E0111)
+#define STATUS_GRAPHICS_ALLOCATION_CLOSED cpu_to_le32(0xC01E0112)
+#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE cpu_to_le32(0xC01E0113)
+#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE cpu_to_le32(0xC01E0114)
+#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE cpu_to_le32(0xC01E0115)
+#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST cpu_to_le32(0xC01E0116)
+#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE cpu_to_le32(0xC01E0200)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0300)
+#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED cpu_to_le32(0xC01E0301)
+#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED \
+       cpu_to_le32(0xC01E0302)
+#define STATUS_GRAPHICS_INVALID_VIDPN cpu_to_le32(0xC01E0303)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE cpu_to_le32(0xC01E0304)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET cpu_to_le32(0xC01E0305)
+#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED cpu_to_le32(0xC01E0306)
+#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET cpu_to_le32(0xC01E0308)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET cpu_to_le32(0xC01E0309)
+#define STATUS_GRAPHICS_INVALID_FREQUENCY cpu_to_le32(0xC01E030A)
+#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION cpu_to_le32(0xC01E030B)
+#define STATUS_GRAPHICS_INVALID_TOTAL_REGION cpu_to_le32(0xC01E030C)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE      \
+       cpu_to_le32(0xC01E0310)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE      \
+       cpu_to_le32(0xC01E0311)
+#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET cpu_to_le32(0xC01E0312)
+#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY cpu_to_le32(0xC01E0313)
+#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET cpu_to_le32(0xC01E0314)
+#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET cpu_to_le32(0xC01E0315)
+#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET cpu_to_le32(0xC01E0316)
+#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET cpu_to_le32(0xC01E0317)
+#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET cpu_to_le32(0xC01E0318)
+#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH cpu_to_le32(0xC01E0319)
+#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY cpu_to_le32(0xC01E031A)
+#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET      \
+       cpu_to_le32(0xC01E031B)
+#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE cpu_to_le32(0xC01E031C)
+#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET cpu_to_le32(0xC01E031D)
+#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET cpu_to_le32(0xC01E031F)
+#define STATUS_GRAPHICS_STALE_MODESET cpu_to_le32(0xC01E0320)
+#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET cpu_to_le32(0xC01E0321)
+#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE cpu_to_le32(0xC01E0322)
+#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN cpu_to_le32(0xC01E0323)
+#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0324)
+#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION        \
+       cpu_to_le32(0xC01E0325)
+#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES        \
+       cpu_to_le32(0xC01E0326)
+#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0327)
+#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE  \
+       cpu_to_le32(0xC01E0328)
+#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET  \
+       cpu_to_le32(0xC01E0329)
+#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET cpu_to_le32(0xC01E032A)
+#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR cpu_to_le32(0xC01E032B)
+#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET cpu_to_le32(0xC01E032C)
+#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET cpu_to_le32(0xC01E032D)
+#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE    \
+       cpu_to_le32(0xC01E032E)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE cpu_to_le32(0xC01E032F)
+#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED cpu_to_le32(0xC01E0330)
+#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0331)
+#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0332)
+#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET cpu_to_le32(0xC01E0333)
+#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER   \
+       cpu_to_le32(0xC01E0334)
+#define STATUS_GRAPHICS_NO_VIDPNMGR cpu_to_le32(0xC01E0335)
+#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN cpu_to_le32(0xC01E0336)
+#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0337)
+#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED cpu_to_le32(0xC01E0338)
+#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0339)
+#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE cpu_to_le32(0xC01E033A)
+#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE cpu_to_le32(0xC01E033B)
+#define STATUS_GRAPHICS_INVALID_STRIDE cpu_to_le32(0xC01E033C)
+#define STATUS_GRAPHICS_INVALID_PIXELFORMAT cpu_to_le32(0xC01E033D)
+#define STATUS_GRAPHICS_INVALID_COLORBASIS cpu_to_le32(0xC01E033E)
+#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE cpu_to_le32(0xC01E033F)
+#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0340)
+#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT     \
+       cpu_to_le32(0xC01E0341)
+#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE cpu_to_le32(0xC01E0342)
+#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN cpu_to_le32(0xC01E0343)
+#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL cpu_to_le32(0xC01E0344)
+#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION   \
+       cpu_to_le32(0xC01E0345)
+#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED \
+       cpu_to_le32(0xC01E0346)
+#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP cpu_to_le32(0xC01E0347)
+#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED cpu_to_le32(0xC01E0348)
+#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED cpu_to_le32(0xC01E0349)
+#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET cpu_to_le32(0xC01E034A)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON   \
+       cpu_to_le32(0xC01E034D)
+#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE cpu_to_le32(0xC01E034E)
+#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE cpu_to_le32(0xC01E034F)
+#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS      \
+       cpu_to_le32(0xC01E0350)
+#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING cpu_to_le32(0xC01E0352)
+#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED cpu_to_le32(0xC01E0353)
+#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS cpu_to_le32(0xC01E0354)
+#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT cpu_to_le32(0xC01E0355)
+#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM cpu_to_le32(0xC01E0356)
+#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN      \
+       cpu_to_le32(0xC01E0357)
+#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT      \
+       cpu_to_le32(0xC01E0358)
+#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED cpu_to_le32(0xC01E0359)
+#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION     \
+       cpu_to_le32(0xC01E035A)
+#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE cpu_to_le32(0xC01E035B)
+#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET cpu_to_le32(0xC01E035C)
+#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED      \
+       cpu_to_le32(0xC01E0400)
+#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED cpu_to_le32(0xC01E0401)
+#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER cpu_to_le32(0xC01E0430)
+#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED cpu_to_le32(0xC01E0431)
+#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED cpu_to_le32(0xC01E0432)
+#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY cpu_to_le32(0xC01E0433)
+#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED cpu_to_le32(0xC01E0434)
+#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON cpu_to_le32(0xC01E0435)
+#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE cpu_to_le32(0xC01E0436)
+#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER cpu_to_le32(0xC01E0438)
+#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED cpu_to_le32(0xC01E043B)
+#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS \
+       cpu_to_le32(0xC01E051C)
+#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST cpu_to_le32(0xC01E051D)
+#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC01E051E)
+#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS \
+       cpu_to_le32(0xC01E051F)
+#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED cpu_to_le32(0xC01E0520)
+#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST      \
+       cpu_to_le32(0xC01E0521)
+#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED cpu_to_le32(0xC01E0500)
+#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED cpu_to_le32(0xC01E0501)
+#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED cpu_to_le32(0xC01E0502)
+#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS cpu_to_le32(0xC01E0503)
+#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E0504)
+#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST cpu_to_le32(0xC01E0505)
+#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME      \
+       cpu_to_le32(0xC01E0506)
+#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP     \
+       cpu_to_le32(0xC01E0507)
+#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED    \
+       cpu_to_le32(0xC01E0508)
+#define STATUS_GRAPHICS_OPM_INVALID_POINTER cpu_to_le32(0xC01E050A)
+#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR cpu_to_le32(0xC01E050B)
+#define STATUS_GRAPHICS_OPM_INVALID_HANDLE cpu_to_le32(0xC01E050C)
+#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE   \
+       cpu_to_le32(0xC01E050D)
+#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH cpu_to_le32(0xC01E050E)
+#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED cpu_to_le32(0xC01E050F)
+#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED cpu_to_le32(0xC01E0510)
+#define STATUS_GRAPHICS_PVP_HFS_FAILED cpu_to_le32(0xC01E0511)
+#define STATUS_GRAPHICS_OPM_INVALID_SRM cpu_to_le32(0xC01E0512)
+#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP cpu_to_le32(0xC01E0513)
+#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP cpu_to_le32(0xC01E0514)
+#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA      \
+       cpu_to_le32(0xC01E0515)
+#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET cpu_to_le32(0xC01E0516)
+#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH cpu_to_le32(0xC01E0517)
+#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE   \
+       cpu_to_le32(0xC01E0518)
+#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS  \
+       cpu_to_le32(0xC01E051A)
+#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS    \
+       cpu_to_le32(0xC01E051B)
+#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED cpu_to_le32(0xC01E0580)
+#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC01E0581)
+#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA cpu_to_le32(0xC01E0582)
+#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA cpu_to_le32(0xC01E0583)
+#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED cpu_to_le32(0xC01E0584)
+#define STATUS_GRAPHICS_DDCCI_INVALID_DATA cpu_to_le32(0xC01E0585)
+#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE \
+       cpu_to_le32(0xC01E0586)
+#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING      \
+       cpu_to_le32(0xC01E0587)
+#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR cpu_to_le32(0xC01E0588)
+#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND cpu_to_le32(0xC01E0589)
+#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH cpu_to_le32(0xC01E058A)
+#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM cpu_to_le32(0xC01E058B)
+#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE cpu_to_le32(0xC01E058C)
+#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS cpu_to_le32(0xC01E058D)
+#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED cpu_to_le32(0xC01E05E0)
+#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME  \
+       cpu_to_le32(0xC01E05E1)
+#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP \
+       cpu_to_le32(0xC01E05E2)
+#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E05E3)
+#define STATUS_GRAPHICS_INVALID_POINTER cpu_to_le32(0xC01E05E4)
+#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE       \
+       cpu_to_le32(0xC01E05E5)
+#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E05E6)
+#define STATUS_GRAPHICS_INTERNAL_ERROR cpu_to_le32(0xC01E05E7)
+#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E05E8)
+#define STATUS_FVE_LOCKED_VOLUME cpu_to_le32(0xC0210000)
+#define STATUS_FVE_NOT_ENCRYPTED cpu_to_le32(0xC0210001)
+#define STATUS_FVE_BAD_INFORMATION cpu_to_le32(0xC0210002)
+#define STATUS_FVE_TOO_SMALL cpu_to_le32(0xC0210003)
+#define STATUS_FVE_FAILED_WRONG_FS cpu_to_le32(0xC0210004)
+#define STATUS_FVE_FAILED_BAD_FS cpu_to_le32(0xC0210005)
+#define STATUS_FVE_FS_NOT_EXTENDED cpu_to_le32(0xC0210006)
+#define STATUS_FVE_FS_MOUNTED cpu_to_le32(0xC0210007)
+#define STATUS_FVE_NO_LICENSE cpu_to_le32(0xC0210008)
+#define STATUS_FVE_ACTION_NOT_ALLOWED cpu_to_le32(0xC0210009)
+#define STATUS_FVE_BAD_DATA cpu_to_le32(0xC021000A)
+#define STATUS_FVE_VOLUME_NOT_BOUND cpu_to_le32(0xC021000B)
+#define STATUS_FVE_NOT_DATA_VOLUME cpu_to_le32(0xC021000C)
+#define STATUS_FVE_CONV_READ_ERROR cpu_to_le32(0xC021000D)
+#define STATUS_FVE_CONV_WRITE_ERROR cpu_to_le32(0xC021000E)
+#define STATUS_FVE_OVERLAPPED_UPDATE cpu_to_le32(0xC021000F)
+#define STATUS_FVE_FAILED_SECTOR_SIZE cpu_to_le32(0xC0210010)
+#define STATUS_FVE_FAILED_AUTHENTICATION cpu_to_le32(0xC0210011)
+#define STATUS_FVE_NOT_OS_VOLUME cpu_to_le32(0xC0210012)
+#define STATUS_FVE_KEYFILE_NOT_FOUND cpu_to_le32(0xC0210013)
+#define STATUS_FVE_KEYFILE_INVALID cpu_to_le32(0xC0210014)
+#define STATUS_FVE_KEYFILE_NO_VMK cpu_to_le32(0xC0210015)
+#define STATUS_FVE_TPM_DISABLED cpu_to_le32(0xC0210016)
+#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO cpu_to_le32(0xC0210017)
+#define STATUS_FVE_TPM_INVALID_PCR cpu_to_le32(0xC0210018)
+#define STATUS_FVE_TPM_NO_VMK cpu_to_le32(0xC0210019)
+#define STATUS_FVE_PIN_INVALID cpu_to_le32(0xC021001A)
+#define STATUS_FVE_AUTH_INVALID_APPLICATION cpu_to_le32(0xC021001B)
+#define STATUS_FVE_AUTH_INVALID_CONFIG cpu_to_le32(0xC021001C)
+#define STATUS_FVE_DEBUGGER_ENABLED cpu_to_le32(0xC021001D)
+#define STATUS_FVE_DRY_RUN_FAILED cpu_to_le32(0xC021001E)
+#define STATUS_FVE_BAD_METADATA_POINTER cpu_to_le32(0xC021001F)
+#define STATUS_FVE_OLD_METADATA_COPY cpu_to_le32(0xC0210020)
+#define STATUS_FVE_REBOOT_REQUIRED cpu_to_le32(0xC0210021)
+#define STATUS_FVE_RAW_ACCESS cpu_to_le32(0xC0210022)
+#define STATUS_FVE_RAW_BLOCKED cpu_to_le32(0xC0210023)
+#define STATUS_FWP_CALLOUT_NOT_FOUND cpu_to_le32(0xC0220001)
+#define STATUS_FWP_CONDITION_NOT_FOUND cpu_to_le32(0xC0220002)
+#define STATUS_FWP_FILTER_NOT_FOUND cpu_to_le32(0xC0220003)
+#define STATUS_FWP_LAYER_NOT_FOUND cpu_to_le32(0xC0220004)
+#define STATUS_FWP_PROVIDER_NOT_FOUND cpu_to_le32(0xC0220005)
+#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND cpu_to_le32(0xC0220006)
+#define STATUS_FWP_SUBLAYER_NOT_FOUND cpu_to_le32(0xC0220007)
+#define STATUS_FWP_NOT_FOUND cpu_to_le32(0xC0220008)
+#define STATUS_FWP_ALREADY_EXISTS cpu_to_le32(0xC0220009)
+#define STATUS_FWP_IN_USE cpu_to_le32(0xC022000A)
+#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS cpu_to_le32(0xC022000B)
+#define STATUS_FWP_WRONG_SESSION cpu_to_le32(0xC022000C)
+#define STATUS_FWP_NO_TXN_IN_PROGRESS cpu_to_le32(0xC022000D)
+#define STATUS_FWP_TXN_IN_PROGRESS cpu_to_le32(0xC022000E)
+#define STATUS_FWP_TXN_ABORTED cpu_to_le32(0xC022000F)
+#define STATUS_FWP_SESSION_ABORTED cpu_to_le32(0xC0220010)
+#define STATUS_FWP_INCOMPATIBLE_TXN cpu_to_le32(0xC0220011)
+#define STATUS_FWP_TIMEOUT cpu_to_le32(0xC0220012)
+#define STATUS_FWP_NET_EVENTS_DISABLED cpu_to_le32(0xC0220013)
+#define STATUS_FWP_INCOMPATIBLE_LAYER cpu_to_le32(0xC0220014)
+#define STATUS_FWP_KM_CLIENTS_ONLY cpu_to_le32(0xC0220015)
+#define STATUS_FWP_LIFETIME_MISMATCH cpu_to_le32(0xC0220016)
+#define STATUS_FWP_BUILTIN_OBJECT cpu_to_le32(0xC0220017)
+#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS cpu_to_le32(0xC0220018)
+#define STATUS_FWP_TOO_MANY_CALLOUTS cpu_to_le32(0xC0220018)
+#define STATUS_FWP_NOTIFICATION_DROPPED cpu_to_le32(0xC0220019)
+#define STATUS_FWP_TRAFFIC_MISMATCH cpu_to_le32(0xC022001A)
+#define STATUS_FWP_INCOMPATIBLE_SA_STATE cpu_to_le32(0xC022001B)
+#define STATUS_FWP_NULL_POINTER cpu_to_le32(0xC022001C)
+#define STATUS_FWP_INVALID_ENUMERATOR cpu_to_le32(0xC022001D)
+#define STATUS_FWP_INVALID_FLAGS cpu_to_le32(0xC022001E)
+#define STATUS_FWP_INVALID_NET_MASK cpu_to_le32(0xC022001F)
+#define STATUS_FWP_INVALID_RANGE cpu_to_le32(0xC0220020)
+#define STATUS_FWP_INVALID_INTERVAL cpu_to_le32(0xC0220021)
+#define STATUS_FWP_ZERO_LENGTH_ARRAY cpu_to_le32(0xC0220022)
+#define STATUS_FWP_NULL_DISPLAY_NAME cpu_to_le32(0xC0220023)
+#define STATUS_FWP_INVALID_ACTION_TYPE cpu_to_le32(0xC0220024)
+#define STATUS_FWP_INVALID_WEIGHT cpu_to_le32(0xC0220025)
+#define STATUS_FWP_MATCH_TYPE_MISMATCH cpu_to_le32(0xC0220026)
+#define STATUS_FWP_TYPE_MISMATCH cpu_to_le32(0xC0220027)
+#define STATUS_FWP_OUT_OF_BOUNDS cpu_to_le32(0xC0220028)
+#define STATUS_FWP_RESERVED cpu_to_le32(0xC0220029)
+#define STATUS_FWP_DUPLICATE_CONDITION cpu_to_le32(0xC022002A)
+#define STATUS_FWP_DUPLICATE_KEYMOD cpu_to_le32(0xC022002B)
+#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002C)
+#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER cpu_to_le32(0xC022002D)
+#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002E)
+#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT cpu_to_le32(0xC022002F)
+#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD cpu_to_le32(0xC0220030)
+#define STATUS_FWP_INCOMPATIBLE_DH_GROUP cpu_to_le32(0xC0220031)
+#define STATUS_FWP_EM_NOT_SUPPORTED cpu_to_le32(0xC0220032)
+#define STATUS_FWP_NEVER_MATCH cpu_to_le32(0xC0220033)
+#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH cpu_to_le32(0xC0220034)
+#define STATUS_FWP_INVALID_PARAMETER cpu_to_le32(0xC0220035)
+#define STATUS_FWP_TOO_MANY_SUBLAYERS cpu_to_le32(0xC0220036)
+#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED cpu_to_le32(0xC0220037)
+#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG cpu_to_le32(0xC0220038)
+#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG cpu_to_le32(0xC0220039)
+#define STATUS_FWP_TCPIP_NOT_READY cpu_to_le32(0xC0220100)
+#define STATUS_FWP_INJECT_HANDLE_CLOSING cpu_to_le32(0xC0220101)
+#define STATUS_FWP_INJECT_HANDLE_STALE cpu_to_le32(0xC0220102)
+#define STATUS_FWP_CANNOT_PEND cpu_to_le32(0xC0220103)
+#define STATUS_NDIS_CLOSING cpu_to_le32(0xC0230002)
+#define STATUS_NDIS_BAD_VERSION cpu_to_le32(0xC0230004)
+#define STATUS_NDIS_BAD_CHARACTERISTICS cpu_to_le32(0xC0230005)
+#define STATUS_NDIS_ADAPTER_NOT_FOUND cpu_to_le32(0xC0230006)
+#define STATUS_NDIS_OPEN_FAILED cpu_to_le32(0xC0230007)
+#define STATUS_NDIS_DEVICE_FAILED cpu_to_le32(0xC0230008)
+#define STATUS_NDIS_MULTICAST_FULL cpu_to_le32(0xC0230009)
+#define STATUS_NDIS_MULTICAST_EXISTS cpu_to_le32(0xC023000A)
+#define STATUS_NDIS_MULTICAST_NOT_FOUND cpu_to_le32(0xC023000B)
+#define STATUS_NDIS_REQUEST_ABORTED cpu_to_le32(0xC023000C)
+#define STATUS_NDIS_RESET_IN_PROGRESS cpu_to_le32(0xC023000D)
+#define STATUS_NDIS_INVALID_PACKET cpu_to_le32(0xC023000F)
+#define STATUS_NDIS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0230010)
+#define STATUS_NDIS_ADAPTER_NOT_READY cpu_to_le32(0xC0230011)
+#define STATUS_NDIS_INVALID_LENGTH cpu_to_le32(0xC0230014)
+#define STATUS_NDIS_INVALID_DATA cpu_to_le32(0xC0230015)
+#define STATUS_NDIS_BUFFER_TOO_SHORT cpu_to_le32(0xC0230016)
+#define STATUS_NDIS_INVALID_OID cpu_to_le32(0xC0230017)
+#define STATUS_NDIS_ADAPTER_REMOVED cpu_to_le32(0xC0230018)
+#define STATUS_NDIS_UNSUPPORTED_MEDIA cpu_to_le32(0xC0230019)
+#define STATUS_NDIS_GROUP_ADDRESS_IN_USE cpu_to_le32(0xC023001A)
+#define STATUS_NDIS_FILE_NOT_FOUND cpu_to_le32(0xC023001B)
+#define STATUS_NDIS_ERROR_READING_FILE cpu_to_le32(0xC023001C)
+#define STATUS_NDIS_ALREADY_MAPPED cpu_to_le32(0xC023001D)
+#define STATUS_NDIS_RESOURCE_CONFLICT cpu_to_le32(0xC023001E)
+#define STATUS_NDIS_MEDIA_DISCONNECTED cpu_to_le32(0xC023001F)
+#define STATUS_NDIS_INVALID_ADDRESS cpu_to_le32(0xC0230022)
+#define STATUS_NDIS_PAUSED cpu_to_le32(0xC023002A)
+#define STATUS_NDIS_INTERFACE_NOT_FOUND cpu_to_le32(0xC023002B)
+#define STATUS_NDIS_UNSUPPORTED_REVISION cpu_to_le32(0xC023002C)
+#define STATUS_NDIS_INVALID_PORT cpu_to_le32(0xC023002D)
+#define STATUS_NDIS_INVALID_PORT_STATE cpu_to_le32(0xC023002E)
+#define STATUS_NDIS_LOW_POWER_STATE cpu_to_le32(0xC023002F)
+#define STATUS_NDIS_NOT_SUPPORTED cpu_to_le32(0xC02300BB)
+#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED cpu_to_le32(0xC0232000)
+#define STATUS_NDIS_DOT11_MEDIA_IN_USE cpu_to_le32(0xC0232001)
+#define STATUS_NDIS_DOT11_POWER_STATE_INVALID cpu_to_le32(0xC0232002)
+#define STATUS_IPSEC_BAD_SPI cpu_to_le32(0xC0360001)
+#define STATUS_IPSEC_SA_LIFETIME_EXPIRED cpu_to_le32(0xC0360002)
+#define STATUS_IPSEC_WRONG_SA cpu_to_le32(0xC0360003)
+#define STATUS_IPSEC_REPLAY_CHECK_FAILED cpu_to_le32(0xC0360004)
+#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
+#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
+#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
+
+#define STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP cpu_to_le32(0xC05D0000)
+#define STATUS_INVALID_LOCK_RANGE cpu_to_le32(0xC00001a1)
diff --git a/fs/ksmbd/transport_ipc.c b/fs/ksmbd/transport_ipc.c
new file mode 100644 (file)
index 0000000..44aea33
--- /dev/null
@@ -0,0 +1,874 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/hashtable.h>
+#include <net/net_namespace.h>
+#include <net/genetlink.h>
+#include <linux/socket.h>
+#include <linux/workqueue.h>
+
+#include "vfs_cache.h"
+#include "transport_ipc.h"
+#include "server.h"
+#include "smb_common.h"
+
+#include "mgmt/user_config.h"
+#include "mgmt/share_config.h"
+#include "mgmt/user_session.h"
+#include "mgmt/tree_connect.h"
+#include "mgmt/ksmbd_ida.h"
+#include "connection.h"
+#include "transport_tcp.h"
+
+#define IPC_WAIT_TIMEOUT       (2 * HZ)
+
+#define IPC_MSG_HASH_BITS      3
+static DEFINE_HASHTABLE(ipc_msg_table, IPC_MSG_HASH_BITS);
+static DECLARE_RWSEM(ipc_msg_table_lock);
+static DEFINE_MUTEX(startup_lock);
+
+static DEFINE_IDA(ipc_ida);
+
+static unsigned int ksmbd_tools_pid;
+
+static bool ksmbd_ipc_validate_version(struct genl_info *m)
+{
+       if (m->genlhdr->version != KSMBD_GENL_VERSION) {
+               pr_err("%s. ksmbd: %d, kernel module: %d. %s.\n",
+                      "Daemon and kernel module version mismatch",
+                      m->genlhdr->version,
+                      KSMBD_GENL_VERSION,
+                      "User-space ksmbd should terminate");
+               return false;
+       }
+       return true;
+}
+
+struct ksmbd_ipc_msg {
+       unsigned int            type;
+       unsigned int            sz;
+       unsigned char           payload[];
+};
+
+struct ipc_msg_table_entry {
+       unsigned int            handle;
+       unsigned int            type;
+       wait_queue_head_t       wait;
+       struct hlist_node       ipc_table_hlist;
+
+       void                    *response;
+};
+
+static struct delayed_work ipc_timer_work;
+
+static int handle_startup_event(struct sk_buff *skb, struct genl_info *info);
+static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info);
+static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
+static int ksmbd_ipc_heartbeat_request(void);
+
+static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
+       [KSMBD_EVENT_UNSPEC] = {
+               .len = 0,
+       },
+       [KSMBD_EVENT_HEARTBEAT_REQUEST] = {
+               .len = sizeof(struct ksmbd_heartbeat),
+       },
+       [KSMBD_EVENT_STARTING_UP] = {
+               .len = sizeof(struct ksmbd_startup_request),
+       },
+       [KSMBD_EVENT_SHUTTING_DOWN] = {
+               .len = sizeof(struct ksmbd_shutdown_request),
+       },
+       [KSMBD_EVENT_LOGIN_REQUEST] = {
+               .len = sizeof(struct ksmbd_login_request),
+       },
+       [KSMBD_EVENT_LOGIN_RESPONSE] = {
+               .len = sizeof(struct ksmbd_login_response),
+       },
+       [KSMBD_EVENT_SHARE_CONFIG_REQUEST] = {
+               .len = sizeof(struct ksmbd_share_config_request),
+       },
+       [KSMBD_EVENT_SHARE_CONFIG_RESPONSE] = {
+               .len = sizeof(struct ksmbd_share_config_response),
+       },
+       [KSMBD_EVENT_TREE_CONNECT_REQUEST] = {
+               .len = sizeof(struct ksmbd_tree_connect_request),
+       },
+       [KSMBD_EVENT_TREE_CONNECT_RESPONSE] = {
+               .len = sizeof(struct ksmbd_tree_connect_response),
+       },
+       [KSMBD_EVENT_TREE_DISCONNECT_REQUEST] = {
+               .len = sizeof(struct ksmbd_tree_disconnect_request),
+       },
+       [KSMBD_EVENT_LOGOUT_REQUEST] = {
+               .len = sizeof(struct ksmbd_logout_request),
+       },
+       [KSMBD_EVENT_RPC_REQUEST] = {
+       },
+       [KSMBD_EVENT_RPC_RESPONSE] = {
+       },
+       [KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST] = {
+       },
+       [KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE] = {
+       },
+};
+
+static struct genl_ops ksmbd_genl_ops[] = {
+       {
+               .cmd    = KSMBD_EVENT_UNSPEC,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_HEARTBEAT_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_STARTING_UP,
+               .doit   = handle_startup_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_SHUTTING_DOWN,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_LOGIN_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_LOGIN_RESPONSE,
+               .doit   = handle_generic_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_SHARE_CONFIG_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_SHARE_CONFIG_RESPONSE,
+               .doit   = handle_generic_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_TREE_CONNECT_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_TREE_CONNECT_RESPONSE,
+               .doit   = handle_generic_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_TREE_DISCONNECT_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_LOGOUT_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_RPC_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_RPC_RESPONSE,
+               .doit   = handle_generic_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
+               .doit   = handle_unsupported_event,
+       },
+       {
+               .cmd    = KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE,
+               .doit   = handle_generic_event,
+       },
+};
+
+static struct genl_family ksmbd_genl_family = {
+       .name           = KSMBD_GENL_NAME,
+       .version        = KSMBD_GENL_VERSION,
+       .hdrsize        = 0,
+       .maxattr        = KSMBD_EVENT_MAX,
+       .netnsok        = true,
+       .module         = THIS_MODULE,
+       .ops            = ksmbd_genl_ops,
+       .n_ops          = ARRAY_SIZE(ksmbd_genl_ops),
+};
+
+static void ksmbd_nl_init_fixup(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ksmbd_genl_ops); i++)
+               ksmbd_genl_ops[i].validate = GENL_DONT_VALIDATE_STRICT |
+                                               GENL_DONT_VALIDATE_DUMP;
+
+       ksmbd_genl_family.policy = ksmbd_nl_policy;
+}
+
+static int rpc_context_flags(struct ksmbd_session *sess)
+{
+       if (user_guest(sess->user))
+               return KSMBD_RPC_RESTRICTED_CONTEXT;
+       return 0;
+}
+
+static void ipc_update_last_active(void)
+{
+       if (server_conf.ipc_timeout)
+               server_conf.ipc_last_active = jiffies;
+}
+
+static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz)
+{
+       struct ksmbd_ipc_msg *msg;
+       size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
+
+       msg = kvmalloc(msg_sz, GFP_KERNEL | __GFP_ZERO);
+       if (msg)
+               msg->sz = sz;
+       return msg;
+}
+
+static void ipc_msg_free(struct ksmbd_ipc_msg *msg)
+{
+       kvfree(msg);
+}
+
+static void ipc_msg_handle_free(int handle)
+{
+       if (handle >= 0)
+               ksmbd_release_id(&ipc_ida, handle);
+}
+
+static int handle_response(int type, void *payload, size_t sz)
+{
+       unsigned int handle = *(unsigned int *)payload;
+       struct ipc_msg_table_entry *entry;
+       int ret = 0;
+
+       ipc_update_last_active();
+       down_read(&ipc_msg_table_lock);
+       hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) {
+               if (handle != entry->handle)
+                       continue;
+
+               entry->response = NULL;
+               /*
+                * Response message type value should be equal to
+                * request message type + 1.
+                */
+               if (entry->type + 1 != type) {
+                       pr_err("Waiting for IPC type %d, got %d. Ignore.\n",
+                              entry->type + 1, type);
+               }
+
+               entry->response = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+               if (!entry->response) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               memcpy(entry->response, payload, sz);
+               wake_up_interruptible(&entry->wait);
+               ret = 0;
+               break;
+       }
+       up_read(&ipc_msg_table_lock);
+
+       return ret;
+}
+
+static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+{
+       int ret;
+
+       ksmbd_set_fd_limit(req->file_max);
+       server_conf.flags = req->flags;
+       server_conf.signing = req->signing;
+       server_conf.tcp_port = req->tcp_port;
+       server_conf.ipc_timeout = req->ipc_timeout * HZ;
+       server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL;
+       server_conf.share_fake_fscaps = req->share_fake_fscaps;
+       ksmbd_init_domain(req->sub_auth);
+
+       if (req->smb2_max_read)
+               init_smb2_max_read_size(req->smb2_max_read);
+       if (req->smb2_max_write)
+               init_smb2_max_write_size(req->smb2_max_write);
+       if (req->smb2_max_trans)
+               init_smb2_max_trans_size(req->smb2_max_trans);
+
+       ret = ksmbd_set_netbios_name(req->netbios_name);
+       ret |= ksmbd_set_server_string(req->server_string);
+       ret |= ksmbd_set_work_group(req->work_group);
+       ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
+                                       req->ifc_list_sz);
+       if (ret) {
+               pr_err("Server configuration error: %s %s %s\n",
+                      req->netbios_name, req->server_string,
+                      req->work_group);
+               return ret;
+       }
+
+       if (req->min_prot[0]) {
+               ret = ksmbd_lookup_protocol_idx(req->min_prot);
+               if (ret >= 0)
+                       server_conf.min_protocol = ret;
+       }
+       if (req->max_prot[0]) {
+               ret = ksmbd_lookup_protocol_idx(req->max_prot);
+               if (ret >= 0)
+                       server_conf.max_protocol = ret;
+       }
+
+       if (server_conf.ipc_timeout)
+               schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout);
+       return 0;
+}
+
+static int handle_startup_event(struct sk_buff *skb, struct genl_info *info)
+{
+       int ret = 0;
+
+#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
+#endif
+
+       if (!ksmbd_ipc_validate_version(info))
+               return -EINVAL;
+
+       if (!info->attrs[KSMBD_EVENT_STARTING_UP])
+               return -EINVAL;
+
+       mutex_lock(&startup_lock);
+       if (!ksmbd_server_configurable()) {
+               mutex_unlock(&startup_lock);
+               pr_err("Server reset is in progress, can't start daemon\n");
+               return -EINVAL;
+       }
+
+       if (ksmbd_tools_pid) {
+               if (ksmbd_ipc_heartbeat_request() == 0) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               pr_err("Reconnect to a new user space daemon\n");
+       } else {
+               struct ksmbd_startup_request *req;
+
+               req = nla_data(info->attrs[info->genlhdr->cmd]);
+               ret = ipc_server_config_on_startup(req);
+               if (ret)
+                       goto out;
+               server_queue_ctrl_init_work();
+       }
+
+       ksmbd_tools_pid = info->snd_portid;
+       ipc_update_last_active();
+
+out:
+       mutex_unlock(&startup_lock);
+       return ret;
+}
+
+static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
+{
+       pr_err("Unknown IPC event: %d, ignore.\n", info->genlhdr->cmd);
+       return -EINVAL;
+}
+
+static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
+{
+       void *payload;
+       int sz;
+       int type = info->genlhdr->cmd;
+
+#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
+#endif
+
+       if (type >= KSMBD_EVENT_MAX) {
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       if (!ksmbd_ipc_validate_version(info))
+               return -EINVAL;
+
+       if (!info->attrs[type])
+               return -EINVAL;
+
+       payload = nla_data(info->attrs[info->genlhdr->cmd]);
+       sz = nla_len(info->attrs[info->genlhdr->cmd]);
+       return handle_response(type, payload, sz);
+}
+
+static int ipc_msg_send(struct ksmbd_ipc_msg *msg)
+{
+       struct genlmsghdr *nlh;
+       struct sk_buff *skb;
+       int ret = -EINVAL;
+
+       if (!ksmbd_tools_pid)
+               return ret;
+
+       skb = genlmsg_new(msg->sz, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       nlh = genlmsg_put(skb, 0, 0, &ksmbd_genl_family, 0, msg->type);
+       if (!nlh)
+               goto out;
+
+       ret = nla_put(skb, msg->type, msg->sz, msg->payload);
+       if (ret) {
+               genlmsg_cancel(skb, nlh);
+               goto out;
+       }
+
+       genlmsg_end(skb, nlh);
+       ret = genlmsg_unicast(&init_net, skb, ksmbd_tools_pid);
+       if (!ret)
+               ipc_update_last_active();
+       return ret;
+
+out:
+       nlmsg_free(skb);
+       return ret;
+}
+
+static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
+{
+       struct ipc_msg_table_entry entry;
+       int ret;
+
+       if ((int)handle < 0)
+               return NULL;
+
+       entry.type = msg->type;
+       entry.response = NULL;
+       init_waitqueue_head(&entry.wait);
+
+       down_write(&ipc_msg_table_lock);
+       entry.handle = handle;
+       hash_add(ipc_msg_table, &entry.ipc_table_hlist, entry.handle);
+       up_write(&ipc_msg_table_lock);
+
+       ret = ipc_msg_send(msg);
+       if (ret)
+               goto out;
+
+       ret = wait_event_interruptible_timeout(entry.wait,
+                                              entry.response != NULL,
+                                              IPC_WAIT_TIMEOUT);
+out:
+       down_write(&ipc_msg_table_lock);
+       hash_del(&entry.ipc_table_hlist);
+       up_write(&ipc_msg_table_lock);
+       return entry.response;
+}
+
+static int ksmbd_ipc_heartbeat_request(void)
+{
+       struct ksmbd_ipc_msg *msg;
+       int ret;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_heartbeat));
+       if (!msg)
+               return -EINVAL;
+
+       msg->type = KSMBD_EVENT_HEARTBEAT_REQUEST;
+       ret = ipc_msg_send(msg);
+       ipc_msg_free(msg);
+       return ret;
+}
+
+struct ksmbd_login_response *ksmbd_ipc_login_request(const char *account)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_login_request *req;
+       struct ksmbd_login_response *resp;
+
+       if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
+               return NULL;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_login_request));
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_LOGIN_REQUEST;
+       req = (struct ksmbd_login_request *)msg->payload;
+       req->handle = ksmbd_acquire_id(&ipc_ida);
+       strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_handle_free(req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_spnego_authen_response *
+ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_spnego_authen_request *req;
+       struct ksmbd_spnego_authen_response *resp;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) +
+                       blob_len + 1);
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST;
+       req = (struct ksmbd_spnego_authen_request *)msg->payload;
+       req->handle = ksmbd_acquire_id(&ipc_ida);
+       req->spnego_blob_len = blob_len;
+       memcpy(req->spnego_blob, spnego_blob, blob_len);
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_handle_free(req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_tree_connect_response *
+ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
+                              struct ksmbd_share_config *share,
+                              struct ksmbd_tree_connect *tree_conn,
+                              struct sockaddr *peer_addr)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_tree_connect_request *req;
+       struct ksmbd_tree_connect_response *resp;
+
+       if (strlen(user_name(sess->user)) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
+               return NULL;
+
+       if (strlen(share->name) >= KSMBD_REQ_MAX_SHARE_NAME)
+               return NULL;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_connect_request));
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_TREE_CONNECT_REQUEST;
+       req = (struct ksmbd_tree_connect_request *)msg->payload;
+
+       req->handle = ksmbd_acquire_id(&ipc_ida);
+       req->account_flags = sess->user->flags;
+       req->session_id = sess->id;
+       req->connect_id = tree_conn->id;
+       strscpy(req->account, user_name(sess->user), KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
+       strscpy(req->share, share->name, KSMBD_REQ_MAX_SHARE_NAME);
+       snprintf(req->peer_addr, sizeof(req->peer_addr), "%pIS", peer_addr);
+
+       if (peer_addr->sa_family == AF_INET6)
+               req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_IPV6;
+       if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
+               req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_SMB2;
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_handle_free(req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
+                                     unsigned long long connect_id)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_tree_disconnect_request *req;
+       int ret;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_disconnect_request));
+       if (!msg)
+               return -ENOMEM;
+
+       msg->type = KSMBD_EVENT_TREE_DISCONNECT_REQUEST;
+       req = (struct ksmbd_tree_disconnect_request *)msg->payload;
+       req->session_id = session_id;
+       req->connect_id = connect_id;
+
+       ret = ipc_msg_send(msg);
+       ipc_msg_free(msg);
+       return ret;
+}
+
+int ksmbd_ipc_logout_request(const char *account)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_logout_request *req;
+       int ret;
+
+       if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
+               return -EINVAL;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_logout_request));
+       if (!msg)
+               return -ENOMEM;
+
+       msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
+       req = (struct ksmbd_logout_request *)msg->payload;
+       strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
+
+       ret = ipc_msg_send(msg);
+       ipc_msg_free(msg);
+       return ret;
+}
+
+struct ksmbd_share_config_response *
+ksmbd_ipc_share_config_request(const char *name)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_share_config_request *req;
+       struct ksmbd_share_config_response *resp;
+
+       if (strlen(name) >= KSMBD_REQ_MAX_SHARE_NAME)
+               return NULL;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_share_config_request));
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_SHARE_CONFIG_REQUEST;
+       req = (struct ksmbd_share_config_request *)msg->payload;
+       req->handle = ksmbd_acquire_id(&ipc_ida);
+       strscpy(req->share_name, name, KSMBD_REQ_MAX_SHARE_NAME);
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_handle_free(req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_rpc_command *req;
+       struct ksmbd_rpc_command *resp;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+       req->flags = ksmbd_session_rpc_method(sess, handle);
+       req->flags |= KSMBD_RPC_OPEN_METHOD;
+       req->payload_sz = 0;
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_rpc_command *req;
+       struct ksmbd_rpc_command *resp;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+       req->flags = ksmbd_session_rpc_method(sess, handle);
+       req->flags |= KSMBD_RPC_CLOSE_METHOD;
+       req->payload_sz = 0;
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle,
+                                         void *payload, size_t payload_sz)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_rpc_command *req;
+       struct ksmbd_rpc_command *resp;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+       req->flags = ksmbd_session_rpc_method(sess, handle);
+       req->flags |= rpc_context_flags(sess);
+       req->flags |= KSMBD_RPC_WRITE_METHOD;
+       req->payload_sz = payload_sz;
+       memcpy(req->payload, payload, payload_sz);
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_rpc_command *req;
+       struct ksmbd_rpc_command *resp;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+       req->flags = ksmbd_session_rpc_method(sess, handle);
+       req->flags |= rpc_context_flags(sess);
+       req->flags |= KSMBD_RPC_READ_METHOD;
+       req->payload_sz = 0;
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
+                                         void *payload, size_t payload_sz)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_rpc_command *req;
+       struct ksmbd_rpc_command *resp;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+       req->flags = ksmbd_session_rpc_method(sess, handle);
+       req->flags |= rpc_context_flags(sess);
+       req->flags |= KSMBD_RPC_IOCTL_METHOD;
+       req->payload_sz = payload_sz;
+       memcpy(req->payload, payload, payload_sz);
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
+                                       size_t payload_sz)
+{
+       struct ksmbd_ipc_msg *msg;
+       struct ksmbd_rpc_command *req;
+       struct ksmbd_rpc_command *resp;
+
+       msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+       if (!msg)
+               return NULL;
+
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = ksmbd_acquire_id(&ipc_ida);
+       req->flags = rpc_context_flags(sess);
+       req->flags |= KSMBD_RPC_RAP_METHOD;
+       req->payload_sz = payload_sz;
+       memcpy(req->payload, payload, payload_sz);
+
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_handle_free(req->handle);
+       ipc_msg_free(msg);
+       return resp;
+}
+
+static int __ipc_heartbeat(void)
+{
+       unsigned long delta;
+
+       if (!ksmbd_server_running())
+               return 0;
+
+       if (time_after(jiffies, server_conf.ipc_last_active)) {
+               delta = (jiffies - server_conf.ipc_last_active);
+       } else {
+               ipc_update_last_active();
+               schedule_delayed_work(&ipc_timer_work,
+                                     server_conf.ipc_timeout);
+               return 0;
+       }
+
+       if (delta < server_conf.ipc_timeout) {
+               schedule_delayed_work(&ipc_timer_work,
+                                     server_conf.ipc_timeout - delta);
+               return 0;
+       }
+
+       if (ksmbd_ipc_heartbeat_request() == 0) {
+               schedule_delayed_work(&ipc_timer_work,
+                                     server_conf.ipc_timeout);
+               return 0;
+       }
+
+       mutex_lock(&startup_lock);
+       WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING);
+       server_conf.ipc_last_active = 0;
+       ksmbd_tools_pid = 0;
+       pr_err("No IPC daemon response for %lus\n", delta / HZ);
+       mutex_unlock(&startup_lock);
+       return -EINVAL;
+}
+
+static void ipc_timer_heartbeat(struct work_struct *w)
+{
+       if (__ipc_heartbeat())
+               server_queue_ctrl_reset_work();
+}
+
+int ksmbd_ipc_id_alloc(void)
+{
+       return ksmbd_acquire_id(&ipc_ida);
+}
+
+void ksmbd_rpc_id_free(int handle)
+{
+       ksmbd_release_id(&ipc_ida, handle);
+}
+
+void ksmbd_ipc_release(void)
+{
+       cancel_delayed_work_sync(&ipc_timer_work);
+       genl_unregister_family(&ksmbd_genl_family);
+}
+
+void ksmbd_ipc_soft_reset(void)
+{
+       mutex_lock(&startup_lock);
+       ksmbd_tools_pid = 0;
+       cancel_delayed_work_sync(&ipc_timer_work);
+       mutex_unlock(&startup_lock);
+}
+
+int ksmbd_ipc_init(void)
+{
+       int ret = 0;
+
+       ksmbd_nl_init_fixup();
+       INIT_DELAYED_WORK(&ipc_timer_work, ipc_timer_heartbeat);
+
+       ret = genl_register_family(&ksmbd_genl_family);
+       if (ret) {
+               pr_err("Failed to register KSMBD netlink interface %d\n", ret);
+               cancel_delayed_work_sync(&ipc_timer_work);
+       }
+
+       return ret;
+}
diff --git a/fs/ksmbd/transport_ipc.h b/fs/ksmbd/transport_ipc.h
new file mode 100644 (file)
index 0000000..9eacc89
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_TRANSPORT_IPC_H__
+#define __KSMBD_TRANSPORT_IPC_H__
+
+#include <linux/wait.h>
+
+#define KSMBD_IPC_MAX_PAYLOAD  4096
+
+struct ksmbd_login_response *
+ksmbd_ipc_login_request(const char *account);
+
+struct ksmbd_session;
+struct ksmbd_share_config;
+struct ksmbd_tree_connect;
+struct sockaddr;
+
+struct ksmbd_tree_connect_response *
+ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
+                              struct ksmbd_share_config *share,
+                              struct ksmbd_tree_connect *tree_conn,
+                              struct sockaddr *peer_addr);
+int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
+                                     unsigned long long connect_id);
+int ksmbd_ipc_logout_request(const char *account);
+struct ksmbd_share_config_response *
+ksmbd_ipc_share_config_request(const char *name);
+struct ksmbd_spnego_authen_response *
+ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len);
+int ksmbd_ipc_id_alloc(void);
+void ksmbd_rpc_id_free(int handle);
+struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle);
+struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle);
+struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle,
+                                         void *payload, size_t payload_sz);
+struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle);
+struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
+                                         void *payload, size_t payload_sz);
+struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
+                                       size_t payload_sz);
+void ksmbd_ipc_release(void);
+void ksmbd_ipc_soft_reset(void);
+int ksmbd_ipc_init(void);
+#endif /* __KSMBD_TRANSPORT_IPC_H__ */
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
new file mode 100644 (file)
index 0000000..58f5300
--- /dev/null
@@ -0,0 +1,2058 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2017, Microsoft Corporation.
+ *   Copyright (C) 2018, LG Electronics.
+ *
+ *   Author(s): Long Li <longli@microsoft.com>,
+ *             Hyunchul Lee <hyc.lee@gmail.com>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ */
+
+#define SUBMOD_NAME    "smb_direct"
+
+#include <linux/kthread.h>
+#include <linux/rwlock.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+
+#include "glob.h"
+#include "connection.h"
+#include "smb_common.h"
+#include "smbstatus.h"
+#include "transport_rdma.h"
+
+#define SMB_DIRECT_PORT        5445
+
+#define SMB_DIRECT_VERSION_LE          cpu_to_le16(0x0100)
+
+/* SMB_DIRECT negotiation timeout in seconds */
+#define SMB_DIRECT_NEGOTIATE_TIMEOUT           120
+
+#define SMB_DIRECT_MAX_SEND_SGES               8
+#define SMB_DIRECT_MAX_RECV_SGES               1
+
+/*
+ * Default maximum number of RDMA read/write outstanding on this connection
+ * This value is possibly decreased during QP creation on hardware limit
+ */
+#define SMB_DIRECT_CM_INITIATOR_DEPTH          8
+
+/* Maximum number of retries on data transfer operations */
+#define SMB_DIRECT_CM_RETRY                    6
+/* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
+#define SMB_DIRECT_CM_RNR_RETRY                0
+
+/*
+ * User configurable initial values per SMB_DIRECT transport connection
+ * as defined in [MS-SMBD] 3.1.1.1
+ * Those may change after a SMB_DIRECT negotiation
+ */
+/* The local peer's maximum number of credits to grant to the peer */
+static int smb_direct_receive_credit_max = 255;
+
+/* The remote peer's credit request of local peer */
+static int smb_direct_send_credit_target = 255;
+
+/* The maximum single message size can be sent to remote peer */
+static int smb_direct_max_send_size = 8192;
+
+/*  The maximum fragmented upper-layer payload receive size supported */
+static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
+
+/*  The maximum single-message size which can be received */
+static int smb_direct_max_receive_size = 8192;
+
+static int smb_direct_max_read_write_size = 1024 * 1024;
+
+static int smb_direct_max_outstanding_rw_ops = 8;
+
+static struct smb_direct_listener {
+       struct rdma_cm_id       *cm_id;
+} smb_direct_listener;
+
+static struct workqueue_struct *smb_direct_wq;
+
+enum smb_direct_status {
+       SMB_DIRECT_CS_NEW = 0,
+       SMB_DIRECT_CS_CONNECTED,
+       SMB_DIRECT_CS_DISCONNECTING,
+       SMB_DIRECT_CS_DISCONNECTED,
+};
+
+struct smb_direct_transport {
+       struct ksmbd_transport  transport;
+
+       enum smb_direct_status  status;
+       bool                    full_packet_received;
+       wait_queue_head_t       wait_status;
+
+       struct rdma_cm_id       *cm_id;
+       struct ib_cq            *send_cq;
+       struct ib_cq            *recv_cq;
+       struct ib_pd            *pd;
+       struct ib_qp            *qp;
+
+       int                     max_send_size;
+       int                     max_recv_size;
+       int                     max_fragmented_send_size;
+       int                     max_fragmented_recv_size;
+       int                     max_rdma_rw_size;
+
+       spinlock_t              reassembly_queue_lock;
+       struct list_head        reassembly_queue;
+       int                     reassembly_data_length;
+       int                     reassembly_queue_length;
+       int                     first_entry_offset;
+       wait_queue_head_t       wait_reassembly_queue;
+
+       spinlock_t              receive_credit_lock;
+       int                     recv_credits;
+       int                     count_avail_recvmsg;
+       int                     recv_credit_max;
+       int                     recv_credit_target;
+
+       spinlock_t              recvmsg_queue_lock;
+       struct list_head        recvmsg_queue;
+
+       spinlock_t              empty_recvmsg_queue_lock;
+       struct list_head        empty_recvmsg_queue;
+
+       int                     send_credit_target;
+       atomic_t                send_credits;
+       spinlock_t              lock_new_recv_credits;
+       int                     new_recv_credits;
+       atomic_t                rw_avail_ops;
+
+       wait_queue_head_t       wait_send_credits;
+       wait_queue_head_t       wait_rw_avail_ops;
+
+       mempool_t               *sendmsg_mempool;
+       struct kmem_cache       *sendmsg_cache;
+       mempool_t               *recvmsg_mempool;
+       struct kmem_cache       *recvmsg_cache;
+
+       wait_queue_head_t       wait_send_payload_pending;
+       atomic_t                send_payload_pending;
+       wait_queue_head_t       wait_send_pending;
+       atomic_t                send_pending;
+
+       struct delayed_work     post_recv_credits_work;
+       struct work_struct      send_immediate_work;
+       struct work_struct      disconnect_work;
+
+       bool                    negotiation_requested;
+};
+
+#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
+
+enum {
+       SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
+       SMB_DIRECT_MSG_DATA_TRANSFER
+};
+
+static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
+
+struct smb_direct_send_ctx {
+       struct list_head        msg_list;
+       int                     wr_cnt;
+       bool                    need_invalidate_rkey;
+       unsigned int            remote_key;
+};
+
+struct smb_direct_sendmsg {
+       struct smb_direct_transport     *transport;
+       struct ib_send_wr       wr;
+       struct list_head        list;
+       int                     num_sge;
+       struct ib_sge           sge[SMB_DIRECT_MAX_SEND_SGES];
+       struct ib_cqe           cqe;
+       u8                      packet[];
+};
+
+struct smb_direct_recvmsg {
+       struct smb_direct_transport     *transport;
+       struct list_head        list;
+       int                     type;
+       struct ib_sge           sge;
+       struct ib_cqe           cqe;
+       bool                    first_segment;
+       u8                      packet[];
+};
+
+struct smb_direct_rdma_rw_msg {
+       struct smb_direct_transport     *t;
+       struct ib_cqe           cqe;
+       struct completion       *completion;
+       struct rdma_rw_ctx      rw_ctx;
+       struct sg_table         sgt;
+       struct scatterlist      sg_list[0];
+};
+
+static inline int get_buf_page_count(void *buf, int size)
+{
+       return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
+               (uintptr_t)buf / PAGE_SIZE;
+}
+
+static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
+static void smb_direct_post_recv_credits(struct work_struct *work);
+static int smb_direct_post_send_data(struct smb_direct_transport *t,
+                                    struct smb_direct_send_ctx *send_ctx,
+                                    struct kvec *iov, int niov,
+                                    int remaining_data_length);
+
+static inline struct smb_direct_transport *
+smb_trans_direct_transfort(struct ksmbd_transport *t)
+{
+       return container_of(t, struct smb_direct_transport, transport);
+}
+
+static inline void
+*smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
+{
+       return (void *)recvmsg->packet;
+}
+
+static inline bool is_receive_credit_post_required(int receive_credits,
+                                                  int avail_recvmsg_count)
+{
+       return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
+               avail_recvmsg_count >= (receive_credits >> 2);
+}
+
+static struct
+smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
+{
+       struct smb_direct_recvmsg *recvmsg = NULL;
+
+       spin_lock(&t->recvmsg_queue_lock);
+       if (!list_empty(&t->recvmsg_queue)) {
+               recvmsg = list_first_entry(&t->recvmsg_queue,
+                                          struct smb_direct_recvmsg,
+                                          list);
+               list_del(&recvmsg->list);
+       }
+       spin_unlock(&t->recvmsg_queue_lock);
+       return recvmsg;
+}
+
+static void put_recvmsg(struct smb_direct_transport *t,
+                       struct smb_direct_recvmsg *recvmsg)
+{
+       ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+                           recvmsg->sge.length, DMA_FROM_DEVICE);
+
+       spin_lock(&t->recvmsg_queue_lock);
+       list_add(&recvmsg->list, &t->recvmsg_queue);
+       spin_unlock(&t->recvmsg_queue_lock);
+}
+
+static struct
+smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
+{
+       struct smb_direct_recvmsg *recvmsg = NULL;
+
+       spin_lock(&t->empty_recvmsg_queue_lock);
+       if (!list_empty(&t->empty_recvmsg_queue)) {
+               recvmsg = list_first_entry(&t->empty_recvmsg_queue,
+                                          struct smb_direct_recvmsg, list);
+               list_del(&recvmsg->list);
+       }
+       spin_unlock(&t->empty_recvmsg_queue_lock);
+       return recvmsg;
+}
+
+static void put_empty_recvmsg(struct smb_direct_transport *t,
+                             struct smb_direct_recvmsg *recvmsg)
+{
+       ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+                           recvmsg->sge.length, DMA_FROM_DEVICE);
+
+       spin_lock(&t->empty_recvmsg_queue_lock);
+       list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
+       spin_unlock(&t->empty_recvmsg_queue_lock);
+}
+
+static void enqueue_reassembly(struct smb_direct_transport *t,
+                              struct smb_direct_recvmsg *recvmsg,
+                              int data_length)
+{
+       spin_lock(&t->reassembly_queue_lock);
+       list_add_tail(&recvmsg->list, &t->reassembly_queue);
+       t->reassembly_queue_length++;
+       /*
+        * Make sure reassembly_data_length is updated after list and
+        * reassembly_queue_length are updated. On the dequeue side
+        * reassembly_data_length is checked without a lock to determine
+        * if reassembly_queue_length and list is up to date
+        */
+       virt_wmb();
+       t->reassembly_data_length += data_length;
+       spin_unlock(&t->reassembly_queue_lock);
+}
+
+static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
+{
+       if (!list_empty(&t->reassembly_queue))
+               return list_first_entry(&t->reassembly_queue,
+                               struct smb_direct_recvmsg, list);
+       else
+               return NULL;
+}
+
+static void smb_direct_disconnect_rdma_work(struct work_struct *work)
+{
+       struct smb_direct_transport *t =
+               container_of(work, struct smb_direct_transport,
+                            disconnect_work);
+
+       if (t->status == SMB_DIRECT_CS_CONNECTED) {
+               t->status = SMB_DIRECT_CS_DISCONNECTING;
+               rdma_disconnect(t->cm_id);
+       }
+}
+
+static void
+smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
+{
+       if (t->status == SMB_DIRECT_CS_CONNECTED)
+               queue_work(smb_direct_wq, &t->disconnect_work);
+}
+
+static void smb_direct_send_immediate_work(struct work_struct *work)
+{
+       struct smb_direct_transport *t = container_of(work,
+                       struct smb_direct_transport, send_immediate_work);
+
+       if (t->status != SMB_DIRECT_CS_CONNECTED)
+               return;
+
+       smb_direct_post_send_data(t, NULL, NULL, 0, 0);
+}
+
+static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+{
+       struct smb_direct_transport *t;
+       struct ksmbd_conn *conn;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               return NULL;
+
+       t->cm_id = cm_id;
+       cm_id->context = t;
+
+       t->status = SMB_DIRECT_CS_NEW;
+       init_waitqueue_head(&t->wait_status);
+
+       spin_lock_init(&t->reassembly_queue_lock);
+       INIT_LIST_HEAD(&t->reassembly_queue);
+       t->reassembly_data_length = 0;
+       t->reassembly_queue_length = 0;
+       init_waitqueue_head(&t->wait_reassembly_queue);
+       init_waitqueue_head(&t->wait_send_credits);
+       init_waitqueue_head(&t->wait_rw_avail_ops);
+
+       spin_lock_init(&t->receive_credit_lock);
+       spin_lock_init(&t->recvmsg_queue_lock);
+       INIT_LIST_HEAD(&t->recvmsg_queue);
+
+       spin_lock_init(&t->empty_recvmsg_queue_lock);
+       INIT_LIST_HEAD(&t->empty_recvmsg_queue);
+
+       init_waitqueue_head(&t->wait_send_payload_pending);
+       atomic_set(&t->send_payload_pending, 0);
+       init_waitqueue_head(&t->wait_send_pending);
+       atomic_set(&t->send_pending, 0);
+
+       spin_lock_init(&t->lock_new_recv_credits);
+
+       INIT_DELAYED_WORK(&t->post_recv_credits_work,
+                         smb_direct_post_recv_credits);
+       INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
+       INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
+
+       conn = ksmbd_conn_alloc();
+       if (!conn)
+               goto err;
+       conn->transport = KSMBD_TRANS(t);
+       KSMBD_TRANS(t)->conn = conn;
+       KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
+       return t;
+err:
+       kfree(t);
+       return NULL;
+}
+
+static void free_transport(struct smb_direct_transport *t)
+{
+       struct smb_direct_recvmsg *recvmsg;
+
+       wake_up_interruptible(&t->wait_send_credits);
+
+       ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
+       wait_event(t->wait_send_payload_pending,
+                  atomic_read(&t->send_payload_pending) == 0);
+       wait_event(t->wait_send_pending,
+                  atomic_read(&t->send_pending) == 0);
+
+       cancel_work_sync(&t->disconnect_work);
+       cancel_delayed_work_sync(&t->post_recv_credits_work);
+       cancel_work_sync(&t->send_immediate_work);
+
+       if (t->qp) {
+               ib_drain_qp(t->qp);
+               ib_destroy_qp(t->qp);
+       }
+
+       ksmbd_debug(RDMA, "drain the reassembly queue\n");
+       do {
+               spin_lock(&t->reassembly_queue_lock);
+               recvmsg = get_first_reassembly(t);
+               if (recvmsg) {
+                       list_del(&recvmsg->list);
+                       spin_unlock(&t->reassembly_queue_lock);
+                       put_recvmsg(t, recvmsg);
+               } else {
+                       spin_unlock(&t->reassembly_queue_lock);
+               }
+       } while (recvmsg);
+       t->reassembly_data_length = 0;
+
+       if (t->send_cq)
+               ib_free_cq(t->send_cq);
+       if (t->recv_cq)
+               ib_free_cq(t->recv_cq);
+       if (t->pd)
+               ib_dealloc_pd(t->pd);
+       if (t->cm_id)
+               rdma_destroy_id(t->cm_id);
+
+       smb_direct_destroy_pools(t);
+       ksmbd_conn_free(KSMBD_TRANS(t)->conn);
+       kfree(t);
+}
+
+static struct smb_direct_sendmsg
+*smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
+{
+       struct smb_direct_sendmsg *msg;
+
+       msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
+       if (!msg)
+               return ERR_PTR(-ENOMEM);
+       msg->transport = t;
+       INIT_LIST_HEAD(&msg->list);
+       msg->num_sge = 0;
+       return msg;
+}
+
+static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
+                                   struct smb_direct_sendmsg *msg)
+{
+       int i;
+
+       if (msg->num_sge > 0) {
+               ib_dma_unmap_single(t->cm_id->device,
+                                   msg->sge[0].addr, msg->sge[0].length,
+                                   DMA_TO_DEVICE);
+               for (i = 1; i < msg->num_sge; i++)
+                       ib_dma_unmap_page(t->cm_id->device,
+                                         msg->sge[i].addr, msg->sge[i].length,
+                                         DMA_TO_DEVICE);
+       }
+       mempool_free(msg, t->sendmsg_mempool);
+}
+
+static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
+{
+       switch (recvmsg->type) {
+       case SMB_DIRECT_MSG_DATA_TRANSFER: {
+               struct smb_direct_data_transfer *req =
+                       (struct smb_direct_data_transfer *)recvmsg->packet;
+               struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
+                               + le32_to_cpu(req->data_offset) - 4);
+               ksmbd_debug(RDMA,
+                           "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
+                           le16_to_cpu(req->credits_granted),
+                           le16_to_cpu(req->credits_requested),
+                           req->data_length, req->remaining_data_length,
+                           hdr->ProtocolId, hdr->Command);
+               break;
+       }
+       case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
+               struct smb_direct_negotiate_req *req =
+                       (struct smb_direct_negotiate_req *)recvmsg->packet;
+               ksmbd_debug(RDMA,
+                           "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
+                           le16_to_cpu(req->min_version),
+                           le16_to_cpu(req->max_version),
+                           le16_to_cpu(req->credits_requested),
+                           le32_to_cpu(req->preferred_send_size),
+                           le32_to_cpu(req->max_receive_size),
+                           le32_to_cpu(req->max_fragmented_size));
+               if (le16_to_cpu(req->min_version) > 0x0100 ||
+                   le16_to_cpu(req->max_version) < 0x0100)
+                       return -EOPNOTSUPP;
+               if (le16_to_cpu(req->credits_requested) <= 0 ||
+                   le32_to_cpu(req->max_receive_size) <= 128 ||
+                   le32_to_cpu(req->max_fragmented_size) <=
+                                       128 * 1024)
+                       return -ECONNABORTED;
+
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       struct smb_direct_recvmsg *recvmsg;
+       struct smb_direct_transport *t;
+
+       recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
+       t = recvmsg->transport;
+
+       if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+               if (wc->status != IB_WC_WR_FLUSH_ERR) {
+                       pr_err("Recv error. status='%s (%d)' opcode=%d\n",
+                              ib_wc_status_msg(wc->status), wc->status,
+                              wc->opcode);
+                       smb_direct_disconnect_rdma_connection(t);
+               }
+               put_empty_recvmsg(t, recvmsg);
+               return;
+       }
+
+       ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
+                   ib_wc_status_msg(wc->status), wc->status,
+                   wc->opcode);
+
+       ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
+                                  recvmsg->sge.length, DMA_FROM_DEVICE);
+
+       switch (recvmsg->type) {
+       case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+               t->negotiation_requested = true;
+               t->full_packet_received = true;
+               wake_up_interruptible(&t->wait_status);
+               break;
+       case SMB_DIRECT_MSG_DATA_TRANSFER: {
+               struct smb_direct_data_transfer *data_transfer =
+                       (struct smb_direct_data_transfer *)recvmsg->packet;
+               int data_length = le32_to_cpu(data_transfer->data_length);
+               int avail_recvmsg_count, receive_credits;
+
+               if (data_length) {
+                       if (t->full_packet_received)
+                               recvmsg->first_segment = true;
+
+                       if (le32_to_cpu(data_transfer->remaining_data_length))
+                               t->full_packet_received = false;
+                       else
+                               t->full_packet_received = true;
+
+                       enqueue_reassembly(t, recvmsg, data_length);
+                       wake_up_interruptible(&t->wait_reassembly_queue);
+
+                       spin_lock(&t->receive_credit_lock);
+                       receive_credits = --(t->recv_credits);
+                       avail_recvmsg_count = t->count_avail_recvmsg;
+                       spin_unlock(&t->receive_credit_lock);
+               } else {
+                       put_empty_recvmsg(t, recvmsg);
+
+                       spin_lock(&t->receive_credit_lock);
+                       receive_credits = --(t->recv_credits);
+                       avail_recvmsg_count = ++(t->count_avail_recvmsg);
+                       spin_unlock(&t->receive_credit_lock);
+               }
+
+               t->recv_credit_target =
+                               le16_to_cpu(data_transfer->credits_requested);
+               atomic_add(le16_to_cpu(data_transfer->credits_granted),
+                          &t->send_credits);
+
+               if (le16_to_cpu(data_transfer->flags) &
+                   SMB_DIRECT_RESPONSE_REQUESTED)
+                       queue_work(smb_direct_wq, &t->send_immediate_work);
+
+               if (atomic_read(&t->send_credits) > 0)
+                       wake_up_interruptible(&t->wait_send_credits);
+
+               if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+                       mod_delayed_work(smb_direct_wq,
+                                        &t->post_recv_credits_work, 0);
+               break;
+       }
+       default:
+               break;
+       }
+}
+
+static int smb_direct_post_recv(struct smb_direct_transport *t,
+                               struct smb_direct_recvmsg *recvmsg)
+{
+       struct ib_recv_wr wr;
+       int ret;
+
+       recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
+                                             recvmsg->packet, t->max_recv_size,
+                                             DMA_FROM_DEVICE);
+       ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
+       if (ret)
+               return ret;
+       recvmsg->sge.length = t->max_recv_size;
+       recvmsg->sge.lkey = t->pd->local_dma_lkey;
+       recvmsg->cqe.done = recv_done;
+
+       wr.wr_cqe = &recvmsg->cqe;
+       wr.next = NULL;
+       wr.sg_list = &recvmsg->sge;
+       wr.num_sge = 1;
+
+       ret = ib_post_recv(t->qp, &wr, NULL);
+       if (ret) {
+               pr_err("Can't post recv: %d\n", ret);
+               ib_dma_unmap_single(t->cm_id->device,
+                                   recvmsg->sge.addr, recvmsg->sge.length,
+                                   DMA_FROM_DEVICE);
+               smb_direct_disconnect_rdma_connection(t);
+               return ret;
+       }
+       return ret;
+}
+
+static int smb_direct_read(struct ksmbd_transport *t, char *buf,
+                          unsigned int size)
+{
+       struct smb_direct_recvmsg *recvmsg;
+       struct smb_direct_data_transfer *data_transfer;
+       int to_copy, to_read, data_read, offset;
+       u32 data_length, remaining_data_length, data_offset;
+       int rc;
+       struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+
+again:
+       if (st->status != SMB_DIRECT_CS_CONNECTED) {
+               pr_err("disconnected\n");
+               return -ENOTCONN;
+       }
+
+       /*
+        * No need to hold the reassembly queue lock all the time as we are
+        * the only one reading from the front of the queue. The transport
+        * may add more entries to the back of the queue at the same time
+        */
+       if (st->reassembly_data_length >= size) {
+               int queue_length;
+               int queue_removed = 0;
+
+               /*
+                * Need to make sure reassembly_data_length is read before
+                * reading reassembly_queue_length and calling
+                * get_first_reassembly. This call is lock free
+                * as we never read at the end of the queue which are being
+                * updated in SOFTIRQ as more data is received
+                */
+               virt_rmb();
+               queue_length = st->reassembly_queue_length;
+               data_read = 0;
+               to_read = size;
+               offset = st->first_entry_offset;
+               while (data_read < size) {
+                       recvmsg = get_first_reassembly(st);
+                       data_transfer = smb_direct_recvmsg_payload(recvmsg);
+                       data_length = le32_to_cpu(data_transfer->data_length);
+                       remaining_data_length =
+                               le32_to_cpu(data_transfer->remaining_data_length);
+                       data_offset = le32_to_cpu(data_transfer->data_offset);
+
+                       /*
+                        * The upper layer expects RFC1002 length at the
+                        * beginning of the payload. Return it to indicate
+                        * the total length of the packet. This minimize the
+                        * change to upper layer packet processing logic. This
+                        * will be eventually remove when an intermediate
+                        * transport layer is added
+                        */
+                       if (recvmsg->first_segment && size == 4) {
+                               unsigned int rfc1002_len =
+                                       data_length + remaining_data_length;
+                               *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
+                               data_read = 4;
+                               recvmsg->first_segment = false;
+                               ksmbd_debug(RDMA,
+                                           "returning rfc1002 length %d\n",
+                                           rfc1002_len);
+                               goto read_rfc1002_done;
+                       }
+
+                       to_copy = min_t(int, data_length - offset, to_read);
+                       memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
+                              to_copy);
+
+                       /* move on to the next buffer? */
+                       if (to_copy == data_length - offset) {
+                               queue_length--;
+                               /*
+                                * No need to lock if we are not at the
+                                * end of the queue
+                                */
+                               if (queue_length) {
+                                       list_del(&recvmsg->list);
+                               } else {
+                                       spin_lock_irq(&st->reassembly_queue_lock);
+                                       list_del(&recvmsg->list);
+                                       spin_unlock_irq(&st->reassembly_queue_lock);
+                               }
+                               queue_removed++;
+                               put_recvmsg(st, recvmsg);
+                               offset = 0;
+                       } else {
+                               offset += to_copy;
+                       }
+
+                       to_read -= to_copy;
+                       data_read += to_copy;
+               }
+
+               spin_lock_irq(&st->reassembly_queue_lock);
+               st->reassembly_data_length -= data_read;
+               st->reassembly_queue_length -= queue_removed;
+               spin_unlock_irq(&st->reassembly_queue_lock);
+
+               spin_lock(&st->receive_credit_lock);
+               st->count_avail_recvmsg += queue_removed;
+               if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
+                       spin_unlock(&st->receive_credit_lock);
+                       mod_delayed_work(smb_direct_wq,
+                                        &st->post_recv_credits_work, 0);
+               } else {
+                       spin_unlock(&st->receive_credit_lock);
+               }
+
+               st->first_entry_offset = offset;
+               ksmbd_debug(RDMA,
+                           "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
+                           data_read, st->reassembly_data_length,
+                           st->first_entry_offset);
+read_rfc1002_done:
+               return data_read;
+       }
+
+       ksmbd_debug(RDMA, "wait_event on more data\n");
+       rc = wait_event_interruptible(st->wait_reassembly_queue,
+                                     st->reassembly_data_length >= size ||
+                                      st->status != SMB_DIRECT_CS_CONNECTED);
+       if (rc)
+               return -EINTR;
+
+       goto again;
+}
+
+static void smb_direct_post_recv_credits(struct work_struct *work)
+{
+       struct smb_direct_transport *t = container_of(work,
+               struct smb_direct_transport, post_recv_credits_work.work);
+       struct smb_direct_recvmsg *recvmsg;
+       int receive_credits, credits = 0;
+       int ret;
+       int use_free = 1;
+
+       spin_lock(&t->receive_credit_lock);
+       receive_credits = t->recv_credits;
+       spin_unlock(&t->receive_credit_lock);
+
+       if (receive_credits < t->recv_credit_target) {
+               while (true) {
+                       if (use_free)
+                               recvmsg = get_free_recvmsg(t);
+                       else
+                               recvmsg = get_empty_recvmsg(t);
+                       if (!recvmsg) {
+                               if (use_free) {
+                                       use_free = 0;
+                                       continue;
+                               } else {
+                                       break;
+                               }
+                       }
+
+                       recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
+                       recvmsg->first_segment = false;
+
+                       ret = smb_direct_post_recv(t, recvmsg);
+                       if (ret) {
+                               pr_err("Can't post recv: %d\n", ret);
+                               put_recvmsg(t, recvmsg);
+                               break;
+                       }
+                       credits++;
+               }
+       }
+
+       spin_lock(&t->receive_credit_lock);
+       t->recv_credits += credits;
+       t->count_avail_recvmsg -= credits;
+       spin_unlock(&t->receive_credit_lock);
+
+       spin_lock(&t->lock_new_recv_credits);
+       t->new_recv_credits += credits;
+       spin_unlock(&t->lock_new_recv_credits);
+
+       if (credits)
+               queue_work(smb_direct_wq, &t->send_immediate_work);
+}
+
+static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       struct smb_direct_sendmsg *sendmsg, *sibling;
+       struct smb_direct_transport *t;
+       struct list_head *pos, *prev, *end;
+
+       sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
+       t = sendmsg->transport;
+
+       ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
+                   ib_wc_status_msg(wc->status), wc->status,
+                   wc->opcode);
+
+       if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+               pr_err("Send error. status='%s (%d)', opcode=%d\n",
+                      ib_wc_status_msg(wc->status), wc->status,
+                      wc->opcode);
+               smb_direct_disconnect_rdma_connection(t);
+       }
+
+       if (sendmsg->num_sge > 1) {
+               if (atomic_dec_and_test(&t->send_payload_pending))
+                       wake_up(&t->wait_send_payload_pending);
+       } else {
+               if (atomic_dec_and_test(&t->send_pending))
+                       wake_up(&t->wait_send_pending);
+       }
+
+       /* iterate and free the list of messages in reverse. the list's head
+        * is invalid.
+        */
+       for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
+            prev != end; pos = prev, prev = prev->prev) {
+               sibling = container_of(pos, struct smb_direct_sendmsg, list);
+               smb_direct_free_sendmsg(t, sibling);
+       }
+
+       sibling = container_of(pos, struct smb_direct_sendmsg, list);
+       smb_direct_free_sendmsg(t, sibling);
+}
+
+static int manage_credits_prior_sending(struct smb_direct_transport *t)
+{
+       int new_credits;
+
+       spin_lock(&t->lock_new_recv_credits);
+       new_credits = t->new_recv_credits;
+       t->new_recv_credits = 0;
+       spin_unlock(&t->lock_new_recv_credits);
+
+       return new_credits;
+}
+
+static int smb_direct_post_send(struct smb_direct_transport *t,
+                               struct ib_send_wr *wr)
+{
+       int ret;
+
+       if (wr->num_sge > 1)
+               atomic_inc(&t->send_payload_pending);
+       else
+               atomic_inc(&t->send_pending);
+
+       ret = ib_post_send(t->qp, wr, NULL);
+       if (ret) {
+               pr_err("failed to post send: %d\n", ret);
+               if (wr->num_sge > 1) {
+                       if (atomic_dec_and_test(&t->send_payload_pending))
+                               wake_up(&t->wait_send_payload_pending);
+               } else {
+                       if (atomic_dec_and_test(&t->send_pending))
+                               wake_up(&t->wait_send_pending);
+               }
+               smb_direct_disconnect_rdma_connection(t);
+       }
+       return ret;
+}
+
+static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
+                                    struct smb_direct_send_ctx *send_ctx,
+                                    bool need_invalidate_rkey,
+                                    unsigned int remote_key)
+{
+       INIT_LIST_HEAD(&send_ctx->msg_list);
+       send_ctx->wr_cnt = 0;
+       send_ctx->need_invalidate_rkey = need_invalidate_rkey;
+       send_ctx->remote_key = remote_key;
+}
+
+static int smb_direct_flush_send_list(struct smb_direct_transport *t,
+                                     struct smb_direct_send_ctx *send_ctx,
+                                     bool is_last)
+{
+       struct smb_direct_sendmsg *first, *last;
+       int ret;
+
+       if (list_empty(&send_ctx->msg_list))
+               return 0;
+
+       first = list_first_entry(&send_ctx->msg_list,
+                                struct smb_direct_sendmsg,
+                                list);
+       last = list_last_entry(&send_ctx->msg_list,
+                              struct smb_direct_sendmsg,
+                              list);
+
+       last->wr.send_flags = IB_SEND_SIGNALED;
+       last->wr.wr_cqe = &last->cqe;
+       if (is_last && send_ctx->need_invalidate_rkey) {
+               last->wr.opcode = IB_WR_SEND_WITH_INV;
+               last->wr.ex.invalidate_rkey = send_ctx->remote_key;
+       }
+
+       ret = smb_direct_post_send(t, &first->wr);
+       if (!ret) {
+               smb_direct_send_ctx_init(t, send_ctx,
+                                        send_ctx->need_invalidate_rkey,
+                                        send_ctx->remote_key);
+       } else {
+               atomic_add(send_ctx->wr_cnt, &t->send_credits);
+               wake_up(&t->wait_send_credits);
+               list_for_each_entry_safe(first, last, &send_ctx->msg_list,
+                                        list) {
+                       smb_direct_free_sendmsg(t, first);
+               }
+       }
+       return ret;
+}
+
+static int wait_for_credits(struct smb_direct_transport *t,
+                           wait_queue_head_t *waitq, atomic_t *credits)
+{
+       int ret;
+
+       do {
+               if (atomic_dec_return(credits) >= 0)
+                       return 0;
+
+               atomic_inc(credits);
+               ret = wait_event_interruptible(*waitq,
+                                              atomic_read(credits) > 0 ||
+                                               t->status != SMB_DIRECT_CS_CONNECTED);
+
+               if (t->status != SMB_DIRECT_CS_CONNECTED)
+                       return -ENOTCONN;
+               else if (ret < 0)
+                       return ret;
+       } while (true);
+}
+
+static int wait_for_send_credits(struct smb_direct_transport *t,
+                                struct smb_direct_send_ctx *send_ctx)
+{
+       int ret;
+
+       if (send_ctx &&
+           (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
+               ret = smb_direct_flush_send_list(t, send_ctx, false);
+               if (ret)
+                       return ret;
+       }
+
+       return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
+}
+
+static int smb_direct_create_header(struct smb_direct_transport *t,
+                                   int size, int remaining_data_length,
+                                   struct smb_direct_sendmsg **sendmsg_out)
+{
+       struct smb_direct_sendmsg *sendmsg;
+       struct smb_direct_data_transfer *packet;
+       int header_length;
+       int ret;
+
+       sendmsg = smb_direct_alloc_sendmsg(t);
+       if (IS_ERR(sendmsg))
+               return PTR_ERR(sendmsg);
+
+       /* Fill in the packet header */
+       packet = (struct smb_direct_data_transfer *)sendmsg->packet;
+       packet->credits_requested = cpu_to_le16(t->send_credit_target);
+       packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
+
+       packet->flags = 0;
+       packet->reserved = 0;
+       if (!size)
+               packet->data_offset = 0;
+       else
+               packet->data_offset = cpu_to_le32(24);
+       packet->data_length = cpu_to_le32(size);
+       packet->remaining_data_length = cpu_to_le32(remaining_data_length);
+       packet->padding = 0;
+
+       ksmbd_debug(RDMA,
+                   "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
+                   le16_to_cpu(packet->credits_requested),
+                   le16_to_cpu(packet->credits_granted),
+                   le32_to_cpu(packet->data_offset),
+                   le32_to_cpu(packet->data_length),
+                   le32_to_cpu(packet->remaining_data_length));
+
+       /* Map the packet to DMA */
+       header_length = sizeof(struct smb_direct_data_transfer);
+       /* If this is a packet without payload, don't send padding */
+       if (!size)
+               header_length =
+                       offsetof(struct smb_direct_data_transfer, padding);
+
+       sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
+                                                (void *)packet,
+                                                header_length,
+                                                DMA_TO_DEVICE);
+       ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
+       if (ret) {
+               smb_direct_free_sendmsg(t, sendmsg);
+               return ret;
+       }
+
+       sendmsg->num_sge = 1;
+       sendmsg->sge[0].length = header_length;
+       sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
+
+       *sendmsg_out = sendmsg;
+       return 0;
+}
+
+static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
+{
+       bool high = is_vmalloc_addr(buf);
+       struct page *page;
+       int offset, len;
+       int i = 0;
+
+       if (nentries < get_buf_page_count(buf, size))
+               return -EINVAL;
+
+       offset = offset_in_page(buf);
+       buf -= offset;
+       while (size > 0) {
+               len = min_t(int, PAGE_SIZE - offset, size);
+               if (high)
+                       page = vmalloc_to_page(buf);
+               else
+                       page = kmap_to_page(buf);
+
+               if (!sg_list)
+                       return -EINVAL;
+               sg_set_page(sg_list, page, len, offset);
+               sg_list = sg_next(sg_list);
+
+               buf += PAGE_SIZE;
+               size -= len;
+               offset = 0;
+               i++;
+       }
+       return i;
+}
+
+static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
+                             struct scatterlist *sg_list, int nentries,
+                             enum dma_data_direction dir)
+{
+       int npages;
+
+       npages = get_sg_list(buf, size, sg_list, nentries);
+       if (npages <= 0)
+               return -EINVAL;
+       return ib_dma_map_sg(device, sg_list, npages, dir);
+}
+
+static int post_sendmsg(struct smb_direct_transport *t,
+                       struct smb_direct_send_ctx *send_ctx,
+                       struct smb_direct_sendmsg *msg)
+{
+       int i;
+
+       for (i = 0; i < msg->num_sge; i++)
+               ib_dma_sync_single_for_device(t->cm_id->device,
+                                             msg->sge[i].addr, msg->sge[i].length,
+                                             DMA_TO_DEVICE);
+
+       msg->cqe.done = send_done;
+       msg->wr.opcode = IB_WR_SEND;
+       msg->wr.sg_list = &msg->sge[0];
+       msg->wr.num_sge = msg->num_sge;
+       msg->wr.next = NULL;
+
+       if (send_ctx) {
+               msg->wr.wr_cqe = NULL;
+               msg->wr.send_flags = 0;
+               if (!list_empty(&send_ctx->msg_list)) {
+                       struct smb_direct_sendmsg *last;
+
+                       last = list_last_entry(&send_ctx->msg_list,
+                                              struct smb_direct_sendmsg,
+                                              list);
+                       last->wr.next = &msg->wr;
+               }
+               list_add_tail(&msg->list, &send_ctx->msg_list);
+               send_ctx->wr_cnt++;
+               return 0;
+       }
+
+       msg->wr.wr_cqe = &msg->cqe;
+       msg->wr.send_flags = IB_SEND_SIGNALED;
+       return smb_direct_post_send(t, &msg->wr);
+}
+
+static int smb_direct_post_send_data(struct smb_direct_transport *t,
+                                    struct smb_direct_send_ctx *send_ctx,
+                                    struct kvec *iov, int niov,
+                                    int remaining_data_length)
+{
+       int i, j, ret;
+       struct smb_direct_sendmsg *msg;
+       int data_length;
+       struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
+
+       ret = wait_for_send_credits(t, send_ctx);
+       if (ret)
+               return ret;
+
+       data_length = 0;
+       for (i = 0; i < niov; i++)
+               data_length += iov[i].iov_len;
+
+       ret = smb_direct_create_header(t, data_length, remaining_data_length,
+                                      &msg);
+       if (ret) {
+               atomic_inc(&t->send_credits);
+               return ret;
+       }
+
+       for (i = 0; i < niov; i++) {
+               struct ib_sge *sge;
+               int sg_cnt;
+
+               sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
+               sg_cnt = get_mapped_sg_list(t->cm_id->device,
+                                           iov[i].iov_base, iov[i].iov_len,
+                                           sg, SMB_DIRECT_MAX_SEND_SGES - 1,
+                                           DMA_TO_DEVICE);
+               if (sg_cnt <= 0) {
+                       pr_err("failed to map buffer\n");
+                       ret = -ENOMEM;
+                       goto err;
+               } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES - 1) {
+                       pr_err("buffer not fitted into sges\n");
+                       ret = -E2BIG;
+                       ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
+                                       DMA_TO_DEVICE);
+                       goto err;
+               }
+
+               for (j = 0; j < sg_cnt; j++) {
+                       sge = &msg->sge[msg->num_sge];
+                       sge->addr = sg_dma_address(&sg[j]);
+                       sge->length = sg_dma_len(&sg[j]);
+                       sge->lkey  = t->pd->local_dma_lkey;
+                       msg->num_sge++;
+               }
+       }
+
+       ret = post_sendmsg(t, send_ctx, msg);
+       if (ret)
+               goto err;
+       return 0;
+err:
+       smb_direct_free_sendmsg(t, msg);
+       atomic_inc(&t->send_credits);
+       return ret;
+}
+
+static int smb_direct_writev(struct ksmbd_transport *t,
+                            struct kvec *iov, int niovs, int buflen,
+                            bool need_invalidate, unsigned int remote_key)
+{
+       struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+       int remaining_data_length;
+       int start, i, j;
+       int max_iov_size = st->max_send_size -
+                       sizeof(struct smb_direct_data_transfer);
+       int ret;
+       struct kvec vec;
+       struct smb_direct_send_ctx send_ctx;
+
+       if (st->status != SMB_DIRECT_CS_CONNECTED)
+               return -ENOTCONN;
+
+       //FIXME: skip RFC1002 header..
+       buflen -= 4;
+       iov[0].iov_base += 4;
+       iov[0].iov_len -= 4;
+
+       remaining_data_length = buflen;
+       ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
+
+       smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
+       start = i = 0;
+       buflen = 0;
+       while (true) {
+               buflen += iov[i].iov_len;
+               if (buflen > max_iov_size) {
+                       if (i > start) {
+                               remaining_data_length -=
+                                       (buflen - iov[i].iov_len);
+                               ret = smb_direct_post_send_data(st, &send_ctx,
+                                                               &iov[start], i - start,
+                                                               remaining_data_length);
+                               if (ret)
+                                       goto done;
+                       } else {
+                               /* iov[start] is too big, break it */
+                               int nvec  = (buflen + max_iov_size - 1) /
+                                               max_iov_size;
+
+                               for (j = 0; j < nvec; j++) {
+                                       vec.iov_base =
+                                               (char *)iov[start].iov_base +
+                                               j * max_iov_size;
+                                       vec.iov_len =
+                                               min_t(int, max_iov_size,
+                                                     buflen - max_iov_size * j);
+                                       remaining_data_length -= vec.iov_len;
+                                       ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
+                                                                       remaining_data_length);
+                                       if (ret)
+                                               goto done;
+                               }
+                               i++;
+                               if (i == niovs)
+                                       break;
+                       }
+                       start = i;
+                       buflen = 0;
+               } else {
+                       i++;
+                       if (i == niovs) {
+                               /* send out all remaining vecs */
+                               remaining_data_length -= buflen;
+                               ret = smb_direct_post_send_data(st, &send_ctx,
+                                                               &iov[start], i - start,
+                                                               remaining_data_length);
+                               if (ret)
+                                       goto done;
+                               break;
+                       }
+               }
+       }
+
+done:
+       ret = smb_direct_flush_send_list(st, &send_ctx, true);
+
+       /*
+        * As an optimization, we don't wait for individual I/O to finish
+        * before sending the next one.
+        * Send them all and wait for pending send count to get to 0
+        * that means all the I/Os have been out and we are good to return
+        */
+
+       wait_event(st->wait_send_payload_pending,
+                  atomic_read(&st->send_payload_pending) == 0);
+       return ret;
+}
+
+static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
+                           enum dma_data_direction dir)
+{
+       struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
+                                                         struct smb_direct_rdma_rw_msg, cqe);
+       struct smb_direct_transport *t = msg->t;
+
+       if (wc->status != IB_WC_SUCCESS) {
+               pr_err("read/write error. opcode = %d, status = %s(%d)\n",
+                      wc->opcode, ib_wc_status_msg(wc->status), wc->status);
+               smb_direct_disconnect_rdma_connection(t);
+       }
+
+       if (atomic_inc_return(&t->rw_avail_ops) > 0)
+               wake_up(&t->wait_rw_avail_ops);
+
+       rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
+                           msg->sg_list, msg->sgt.nents, dir);
+       sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+       complete(msg->completion);
+       kfree(msg);
+}
+
+static void read_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       read_write_done(cq, wc, DMA_FROM_DEVICE);
+}
+
+static void write_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       read_write_done(cq, wc, DMA_TO_DEVICE);
+}
+
+static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
+                               int buf_len, u32 remote_key, u64 remote_offset,
+                               u32 remote_len, bool is_read)
+{
+       struct smb_direct_rdma_rw_msg *msg;
+       int ret;
+       DECLARE_COMPLETION_ONSTACK(completion);
+       struct ib_send_wr *first_wr = NULL;
+
+       ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
+       if (ret < 0)
+               return ret;
+
+       /* TODO: mempool */
+       msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
+                     sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
+       if (!msg) {
+               atomic_inc(&t->rw_avail_ops);
+               return -ENOMEM;
+       }
+
+       msg->sgt.sgl = &msg->sg_list[0];
+       ret = sg_alloc_table_chained(&msg->sgt,
+                                    get_buf_page_count(buf, buf_len),
+                                    msg->sg_list, SG_CHUNK_SIZE);
+       if (ret) {
+               atomic_inc(&t->rw_avail_ops);
+               kfree(msg);
+               return -ENOMEM;
+       }
+
+       ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
+       if (ret <= 0) {
+               pr_err("failed to get pages\n");
+               goto err;
+       }
+
+       ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
+                              msg->sg_list, get_buf_page_count(buf, buf_len),
+                              0, remote_offset, remote_key,
+                              is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+       if (ret < 0) {
+               pr_err("failed to init rdma_rw_ctx: %d\n", ret);
+               goto err;
+       }
+
+       msg->t = t;
+       msg->cqe.done = is_read ? read_done : write_done;
+       msg->completion = &completion;
+       first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
+                                  &msg->cqe, NULL);
+
+       ret = ib_post_send(t->qp, first_wr, NULL);
+       if (ret) {
+               pr_err("failed to post send wr: %d\n", ret);
+               goto err;
+       }
+
+       wait_for_completion(&completion);
+       return 0;
+
+err:
+       atomic_inc(&t->rw_avail_ops);
+       if (first_wr)
+               rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
+                                   msg->sg_list, msg->sgt.nents,
+                                   is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+       sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+       kfree(msg);
+       return ret;
+}
+
+static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
+                                unsigned int buflen, u32 remote_key,
+                                u64 remote_offset, u32 remote_len)
+{
+       return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
+                                   remote_key, remote_offset,
+                                   remote_len, false);
+}
+
+static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
+                               unsigned int buflen, u32 remote_key,
+                               u64 remote_offset, u32 remote_len)
+{
+       return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
+                                   remote_key, remote_offset,
+                                   remote_len, true);
+}
+
+static void smb_direct_disconnect(struct ksmbd_transport *t)
+{
+       struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+
+       ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
+
+       smb_direct_disconnect_rdma_work(&st->disconnect_work);
+       wait_event_interruptible(st->wait_status,
+                                st->status == SMB_DIRECT_CS_DISCONNECTED);
+       free_transport(st);
+}
+
+static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
+                                struct rdma_cm_event *event)
+{
+       struct smb_direct_transport *t = cm_id->context;
+
+       ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
+                   cm_id, rdma_event_msg(event->event), event->event);
+
+       switch (event->event) {
+       case RDMA_CM_EVENT_ESTABLISHED: {
+               t->status = SMB_DIRECT_CS_CONNECTED;
+               wake_up_interruptible(&t->wait_status);
+               break;
+       }
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+       case RDMA_CM_EVENT_DISCONNECTED: {
+               t->status = SMB_DIRECT_CS_DISCONNECTED;
+               wake_up_interruptible(&t->wait_status);
+               wake_up_interruptible(&t->wait_reassembly_queue);
+               wake_up(&t->wait_send_credits);
+               break;
+       }
+       case RDMA_CM_EVENT_CONNECT_ERROR: {
+               t->status = SMB_DIRECT_CS_DISCONNECTED;
+               wake_up_interruptible(&t->wait_status);
+               break;
+       }
+       default:
+               pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
+                      cm_id, rdma_event_msg(event->event),
+                      event->event);
+               break;
+       }
+       return 0;
+}
+
+static void smb_direct_qpair_handler(struct ib_event *event, void *context)
+{
+       struct smb_direct_transport *t = context;
+
+       ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
+                   t->cm_id, ib_event_msg(event->event), event->event);
+
+       switch (event->event) {
+       case IB_EVENT_CQ_ERR:
+       case IB_EVENT_QP_FATAL:
+               smb_direct_disconnect_rdma_connection(t);
+               break;
+       default:
+               break;
+       }
+}
+
+static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
+                                             int failed)
+{
+       struct smb_direct_sendmsg *sendmsg;
+       struct smb_direct_negotiate_resp *resp;
+       int ret;
+
+       sendmsg = smb_direct_alloc_sendmsg(t);
+       if (IS_ERR(sendmsg))
+               return -ENOMEM;
+
+       resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
+       if (failed) {
+               memset(resp, 0, sizeof(*resp));
+               resp->min_version = cpu_to_le16(0x0100);
+               resp->max_version = cpu_to_le16(0x0100);
+               resp->status = STATUS_NOT_SUPPORTED;
+       } else {
+               resp->status = STATUS_SUCCESS;
+               resp->min_version = SMB_DIRECT_VERSION_LE;
+               resp->max_version = SMB_DIRECT_VERSION_LE;
+               resp->negotiated_version = SMB_DIRECT_VERSION_LE;
+               resp->reserved = 0;
+               resp->credits_requested =
+                               cpu_to_le16(t->send_credit_target);
+               resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
+               resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
+               resp->preferred_send_size = cpu_to_le32(t->max_send_size);
+               resp->max_receive_size = cpu_to_le32(t->max_recv_size);
+               resp->max_fragmented_size =
+                               cpu_to_le32(t->max_fragmented_recv_size);
+       }
+
+       sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
+                                                (void *)resp, sizeof(*resp),
+                                                DMA_TO_DEVICE);
+       ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
+       if (ret) {
+               smb_direct_free_sendmsg(t, sendmsg);
+               return ret;
+       }
+
+       sendmsg->num_sge = 1;
+       sendmsg->sge[0].length = sizeof(*resp);
+       sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
+
+       ret = post_sendmsg(t, NULL, sendmsg);
+       if (ret) {
+               smb_direct_free_sendmsg(t, sendmsg);
+               return ret;
+       }
+
+       wait_event(t->wait_send_pending,
+                  atomic_read(&t->send_pending) == 0);
+       return 0;
+}
+
+static int smb_direct_accept_client(struct smb_direct_transport *t)
+{
+       struct rdma_conn_param conn_param;
+       struct ib_port_immutable port_immutable;
+       u32 ird_ord_hdr[2];
+       int ret;
+
+       memset(&conn_param, 0, sizeof(conn_param));
+       conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
+                                          SMB_DIRECT_CM_INITIATOR_DEPTH);
+       conn_param.responder_resources = 0;
+
+       t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
+                                                t->cm_id->port_num,
+                                                &port_immutable);
+       if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+               ird_ord_hdr[0] = conn_param.responder_resources;
+               ird_ord_hdr[1] = 1;
+               conn_param.private_data = ird_ord_hdr;
+               conn_param.private_data_len = sizeof(ird_ord_hdr);
+       } else {
+               conn_param.private_data = NULL;
+               conn_param.private_data_len = 0;
+       }
+       conn_param.retry_count = SMB_DIRECT_CM_RETRY;
+       conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
+       conn_param.flow_control = 0;
+
+       ret = rdma_accept(t->cm_id, &conn_param);
+       if (ret) {
+               pr_err("error at rdma_accept: %d\n", ret);
+               return ret;
+       }
+
+       wait_event_interruptible(t->wait_status,
+                                t->status != SMB_DIRECT_CS_NEW);
+       if (t->status != SMB_DIRECT_CS_CONNECTED)
+               return -ENOTCONN;
+       return 0;
+}
+
+static int smb_direct_negotiate(struct smb_direct_transport *t)
+{
+       int ret;
+       struct smb_direct_recvmsg *recvmsg;
+       struct smb_direct_negotiate_req *req;
+
+       recvmsg = get_free_recvmsg(t);
+       if (!recvmsg)
+               return -ENOMEM;
+       recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
+
+       ret = smb_direct_post_recv(t, recvmsg);
+       if (ret) {
+               pr_err("Can't post recv: %d\n", ret);
+               goto out;
+       }
+
+       t->negotiation_requested = false;
+       ret = smb_direct_accept_client(t);
+       if (ret) {
+               pr_err("Can't accept client\n");
+               goto out;
+       }
+
+       smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
+
+       ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
+       ret = wait_event_interruptible_timeout(t->wait_status,
+                                              t->negotiation_requested ||
+                                               t->status == SMB_DIRECT_CS_DISCONNECTED,
+                                              SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
+       if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) {
+               ret = ret < 0 ? ret : -ETIMEDOUT;
+               goto out;
+       }
+
+       ret = smb_direct_check_recvmsg(recvmsg);
+       if (ret == -ECONNABORTED)
+               goto out;
+
+       req = (struct smb_direct_negotiate_req *)recvmsg->packet;
+       t->max_recv_size = min_t(int, t->max_recv_size,
+                                le32_to_cpu(req->preferred_send_size));
+       t->max_send_size = min_t(int, t->max_send_size,
+                                le32_to_cpu(req->max_receive_size));
+       t->max_fragmented_send_size =
+                       le32_to_cpu(req->max_fragmented_size);
+
+       ret = smb_direct_send_negotiate_response(t, ret);
+out:
+       if (recvmsg)
+               put_recvmsg(t, recvmsg);
+       return ret;
+}
+
+static int smb_direct_init_params(struct smb_direct_transport *t,
+                                 struct ib_qp_cap *cap)
+{
+       struct ib_device *device = t->cm_id->device;
+       int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
+
+       /* need 2 more sge. because a SMB_DIRECT header will be mapped,
+        * and maybe a send buffer could be not page aligned.
+        */
+       t->max_send_size = smb_direct_max_send_size;
+       max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
+       if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
+               pr_err("max_send_size %d is too large\n", t->max_send_size);
+               return -EINVAL;
+       }
+
+       /*
+        * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
+        * read/writes. HCA guarantees at least max_send_sge of sges for
+        * a RDMA read/write work request, and if memory registration is used,
+        * we need reg_mr, local_inv wrs for each read/write.
+        */
+       t->max_rdma_rw_size = smb_direct_max_read_write_size;
+       max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
+       max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
+       max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
+                       max_pages) * 2;
+       max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
+
+       max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
+       if (max_send_wrs > device->attrs.max_cqe ||
+           max_send_wrs > device->attrs.max_qp_wr) {
+               pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
+                      smb_direct_send_credit_target,
+                      smb_direct_max_outstanding_rw_ops);
+               pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+                      device->attrs.max_cqe, device->attrs.max_qp_wr);
+               return -EINVAL;
+       }
+
+       if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
+           smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
+               pr_err("consider lowering receive_credit_max = %d\n",
+                      smb_direct_receive_credit_max);
+               pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
+                      device->attrs.max_cqe, device->attrs.max_qp_wr);
+               return -EINVAL;
+       }
+
+       if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
+               pr_err("warning: device max_send_sge = %d too small\n",
+                      device->attrs.max_send_sge);
+               return -EINVAL;
+       }
+       if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
+               pr_err("warning: device max_recv_sge = %d too small\n",
+                      device->attrs.max_recv_sge);
+               return -EINVAL;
+       }
+
+       t->recv_credits = 0;
+       t->count_avail_recvmsg = 0;
+
+       t->recv_credit_max = smb_direct_receive_credit_max;
+       t->recv_credit_target = 10;
+       t->new_recv_credits = 0;
+
+       t->send_credit_target = smb_direct_send_credit_target;
+       atomic_set(&t->send_credits, 0);
+       atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
+
+       t->max_send_size = smb_direct_max_send_size;
+       t->max_recv_size = smb_direct_max_receive_size;
+       t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
+
+       cap->max_send_wr = max_send_wrs;
+       cap->max_recv_wr = t->recv_credit_max;
+       cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
+       cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
+       cap->max_inline_data = 0;
+       cap->max_rdma_ctxs = 0;
+       return 0;
+}
+
+static void smb_direct_destroy_pools(struct smb_direct_transport *t)
+{
+       struct smb_direct_recvmsg *recvmsg;
+
+       while ((recvmsg = get_free_recvmsg(t)))
+               mempool_free(recvmsg, t->recvmsg_mempool);
+       while ((recvmsg = get_empty_recvmsg(t)))
+               mempool_free(recvmsg, t->recvmsg_mempool);
+
+       mempool_destroy(t->recvmsg_mempool);
+       t->recvmsg_mempool = NULL;
+
+       kmem_cache_destroy(t->recvmsg_cache);
+       t->recvmsg_cache = NULL;
+
+       mempool_destroy(t->sendmsg_mempool);
+       t->sendmsg_mempool = NULL;
+
+       kmem_cache_destroy(t->sendmsg_cache);
+       t->sendmsg_cache = NULL;
+}
+
+static int smb_direct_create_pools(struct smb_direct_transport *t)
+{
+       char name[80];
+       int i;
+       struct smb_direct_recvmsg *recvmsg;
+
+       snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
+       t->sendmsg_cache = kmem_cache_create(name,
+                                            sizeof(struct smb_direct_sendmsg) +
+                                             sizeof(struct smb_direct_negotiate_resp),
+                                            0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!t->sendmsg_cache)
+               return -ENOMEM;
+
+       t->sendmsg_mempool = mempool_create(t->send_credit_target,
+                                           mempool_alloc_slab, mempool_free_slab,
+                                           t->sendmsg_cache);
+       if (!t->sendmsg_mempool)
+               goto err;
+
+       snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
+       t->recvmsg_cache = kmem_cache_create(name,
+                                            sizeof(struct smb_direct_recvmsg) +
+                                             t->max_recv_size,
+                                            0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!t->recvmsg_cache)
+               goto err;
+
+       t->recvmsg_mempool =
+               mempool_create(t->recv_credit_max, mempool_alloc_slab,
+                              mempool_free_slab, t->recvmsg_cache);
+       if (!t->recvmsg_mempool)
+               goto err;
+
+       INIT_LIST_HEAD(&t->recvmsg_queue);
+
+       for (i = 0; i < t->recv_credit_max; i++) {
+               recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
+               if (!recvmsg)
+                       goto err;
+               recvmsg->transport = t;
+               list_add(&recvmsg->list, &t->recvmsg_queue);
+       }
+       t->count_avail_recvmsg = t->recv_credit_max;
+
+       return 0;
+err:
+       smb_direct_destroy_pools(t);
+       return -ENOMEM;
+}
+
+static int smb_direct_create_qpair(struct smb_direct_transport *t,
+                                  struct ib_qp_cap *cap)
+{
+       int ret;
+       struct ib_qp_init_attr qp_attr;
+
+       t->pd = ib_alloc_pd(t->cm_id->device, 0);
+       if (IS_ERR(t->pd)) {
+               pr_err("Can't create RDMA PD\n");
+               ret = PTR_ERR(t->pd);
+               t->pd = NULL;
+               return ret;
+       }
+
+       t->send_cq = ib_alloc_cq(t->cm_id->device, t,
+                                t->send_credit_target, 0, IB_POLL_WORKQUEUE);
+       if (IS_ERR(t->send_cq)) {
+               pr_err("Can't create RDMA send CQ\n");
+               ret = PTR_ERR(t->send_cq);
+               t->send_cq = NULL;
+               goto err;
+       }
+
+       t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
+                                cap->max_send_wr + cap->max_rdma_ctxs,
+                                0, IB_POLL_WORKQUEUE);
+       if (IS_ERR(t->recv_cq)) {
+               pr_err("Can't create RDMA recv CQ\n");
+               ret = PTR_ERR(t->recv_cq);
+               t->recv_cq = NULL;
+               goto err;
+       }
+
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.event_handler = smb_direct_qpair_handler;
+       qp_attr.qp_context = t;
+       qp_attr.cap = *cap;
+       qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+       qp_attr.qp_type = IB_QPT_RC;
+       qp_attr.send_cq = t->send_cq;
+       qp_attr.recv_cq = t->recv_cq;
+       qp_attr.port_num = ~0;
+
+       ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
+       if (ret) {
+               pr_err("Can't create RDMA QP: %d\n", ret);
+               goto err;
+       }
+
+       t->qp = t->cm_id->qp;
+       t->cm_id->event_handler = smb_direct_cm_handler;
+
+       return 0;
+err:
+       if (t->qp) {
+               ib_destroy_qp(t->qp);
+               t->qp = NULL;
+       }
+       if (t->recv_cq) {
+               ib_destroy_cq(t->recv_cq);
+               t->recv_cq = NULL;
+       }
+       if (t->send_cq) {
+               ib_destroy_cq(t->send_cq);
+               t->send_cq = NULL;
+       }
+       if (t->pd) {
+               ib_dealloc_pd(t->pd);
+               t->pd = NULL;
+       }
+       return ret;
+}
+
+static int smb_direct_prepare(struct ksmbd_transport *t)
+{
+       struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+       int ret;
+       struct ib_qp_cap qp_cap;
+
+       ret = smb_direct_init_params(st, &qp_cap);
+       if (ret) {
+               pr_err("Can't configure RDMA parameters\n");
+               return ret;
+       }
+
+       ret = smb_direct_create_pools(st);
+       if (ret) {
+               pr_err("Can't init RDMA pool: %d\n", ret);
+               return ret;
+       }
+
+       ret = smb_direct_create_qpair(st, &qp_cap);
+       if (ret) {
+               pr_err("Can't accept RDMA client: %d\n", ret);
+               return ret;
+       }
+
+       ret = smb_direct_negotiate(st);
+       if (ret) {
+               pr_err("Can't negotiate: %d\n", ret);
+               return ret;
+       }
+
+       st->status = SMB_DIRECT_CS_CONNECTED;
+       return 0;
+}
+
+static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
+{
+       if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
+               return false;
+       if (attrs->max_fast_reg_page_list_len == 0)
+               return false;
+       return true;
+}
+
+static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+{
+       struct smb_direct_transport *t;
+
+       if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
+               ksmbd_debug(RDMA,
+                           "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
+                           new_cm_id->device->attrs.device_cap_flags);
+               return -EPROTONOSUPPORT;
+       }
+
+       t = alloc_transport(new_cm_id);
+       if (!t)
+               return -ENOMEM;
+
+       KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+                                             KSMBD_TRANS(t)->conn, "ksmbd:r%u",
+                                             SMB_DIRECT_PORT);
+       if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+               int ret = PTR_ERR(KSMBD_TRANS(t)->handler);
+
+               pr_err("Can't start thread\n");
+               free_transport(t);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
+                                    struct rdma_cm_event *event)
+{
+       switch (event->event) {
+       case RDMA_CM_EVENT_CONNECT_REQUEST: {
+               int ret = smb_direct_handle_connect_request(cm_id);
+
+               if (ret) {
+                       pr_err("Can't create transport: %d\n", ret);
+                       return ret;
+               }
+
+               ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
+                           cm_id);
+               break;
+       }
+       default:
+               pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
+                      cm_id, rdma_event_msg(event->event), event->event);
+               break;
+       }
+       return 0;
+}
+
+static int smb_direct_listen(int port)
+{
+       int ret;
+       struct rdma_cm_id *cm_id;
+       struct sockaddr_in sin = {
+               .sin_family             = AF_INET,
+               .sin_addr.s_addr        = htonl(INADDR_ANY),
+               .sin_port               = htons(port),
+       };
+
+       cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
+                              &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
+       if (IS_ERR(cm_id)) {
+               pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
+               return PTR_ERR(cm_id);
+       }
+
+       ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+       if (ret) {
+               pr_err("Can't bind: %d\n", ret);
+               goto err;
+       }
+
+       smb_direct_listener.cm_id = cm_id;
+
+       ret = rdma_listen(cm_id, 10);
+       if (ret) {
+               pr_err("Can't listen: %d\n", ret);
+               goto err;
+       }
+       return 0;
+err:
+       smb_direct_listener.cm_id = NULL;
+       rdma_destroy_id(cm_id);
+       return ret;
+}
+
+int ksmbd_rdma_init(void)
+{
+       int ret;
+
+       smb_direct_listener.cm_id = NULL;
+
+       /* When a client is running out of send credits, the credits are
+        * granted by the server's sending a packet using this queue.
+        * This avoids the situation that a clients cannot send packets
+        * for lack of credits
+        */
+       smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
+                                       WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+       if (!smb_direct_wq)
+               return -ENOMEM;
+
+       ret = smb_direct_listen(SMB_DIRECT_PORT);
+       if (ret) {
+               destroy_workqueue(smb_direct_wq);
+               smb_direct_wq = NULL;
+               pr_err("Can't listen: %d\n", ret);
+               return ret;
+       }
+
+       ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
+                   smb_direct_listener.cm_id);
+       return 0;
+}
+
+int ksmbd_rdma_destroy(void)
+{
+       if (smb_direct_listener.cm_id)
+               rdma_destroy_id(smb_direct_listener.cm_id);
+       smb_direct_listener.cm_id = NULL;
+
+       if (smb_direct_wq) {
+               flush_workqueue(smb_direct_wq);
+               destroy_workqueue(smb_direct_wq);
+               smb_direct_wq = NULL;
+       }
+       return 0;
+}
+
+bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
+{
+       struct ib_device *ibdev;
+       bool rdma_capable = false;
+
+       ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
+       if (ibdev) {
+               if (rdma_frwr_is_supported(&ibdev->attrs))
+                       rdma_capable = true;
+               ib_device_put(ibdev);
+       }
+       return rdma_capable;
+}
+
+static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
+       .prepare        = smb_direct_prepare,
+       .disconnect     = smb_direct_disconnect,
+       .writev         = smb_direct_writev,
+       .read           = smb_direct_read,
+       .rdma_read      = smb_direct_rdma_read,
+       .rdma_write     = smb_direct_rdma_write,
+};
diff --git a/fs/ksmbd/transport_rdma.h b/fs/ksmbd/transport_rdma.h
new file mode 100644 (file)
index 0000000..0fa8adc
--- /dev/null
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2017, Microsoft Corporation.
+ *   Copyright (C) 2018, LG Electronics.
+ */
+
+#ifndef __KSMBD_TRANSPORT_RDMA_H__
+#define __KSMBD_TRANSPORT_RDMA_H__
+
+#define SMB_DIRECT_PORT        5445
+
+/* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
+struct smb_direct_negotiate_req {
+       __le16 min_version;
+       __le16 max_version;
+       __le16 reserved;
+       __le16 credits_requested;
+       __le32 preferred_send_size;
+       __le32 max_receive_size;
+       __le32 max_fragmented_size;
+} __packed;
+
+/* SMB DIRECT negotiation response packet [MS-SMBD] 2.2.2 */
+struct smb_direct_negotiate_resp {
+       __le16 min_version;
+       __le16 max_version;
+       __le16 negotiated_version;
+       __le16 reserved;
+       __le16 credits_requested;
+       __le16 credits_granted;
+       __le32 status;
+       __le32 max_readwrite_size;
+       __le32 preferred_send_size;
+       __le32 max_receive_size;
+       __le32 max_fragmented_size;
+} __packed;
+
+#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
+
+/* SMB DIRECT data transfer packet with payload [MS-SMBD] 2.2.3 */
+struct smb_direct_data_transfer {
+       __le16 credits_requested;
+       __le16 credits_granted;
+       __le16 flags;
+       __le16 reserved;
+       __le32 remaining_data_length;
+       __le32 data_offset;
+       __le32 data_length;
+       __le32 padding;
+       __u8 buffer[];
+} __packed;
+
+#ifdef CONFIG_SMB_SERVER_SMBDIRECT
+int ksmbd_rdma_init(void);
+int ksmbd_rdma_destroy(void);
+bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
+#else
+static inline int ksmbd_rdma_init(void) { return 0; }
+static inline int ksmbd_rdma_destroy(void) { return 0; }
+static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
+#endif
+
+#endif /* __KSMBD_TRANSPORT_RDMA_H__ */
diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
new file mode 100644 (file)
index 0000000..dc15a5e
--- /dev/null
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/freezer.h>
+
+#include "smb_common.h"
+#include "server.h"
+#include "auth.h"
+#include "connection.h"
+#include "transport_tcp.h"
+
+#define IFACE_STATE_DOWN               BIT(0)
+#define IFACE_STATE_CONFIGURED         BIT(1)
+
+struct interface {
+       struct task_struct      *ksmbd_kthread;
+       struct socket           *ksmbd_socket;
+       struct list_head        entry;
+       char                    *name;
+       struct mutex            sock_release_lock;
+       int                     state;
+};
+
+static LIST_HEAD(iface_list);
+
+static int bind_additional_ifaces;
+
+struct tcp_transport {
+       struct ksmbd_transport          transport;
+       struct socket                   *sock;
+       struct kvec                     *iov;
+       unsigned int                    nr_iov;
+};
+
+static struct ksmbd_transport_ops ksmbd_tcp_transport_ops;
+
+static void tcp_stop_kthread(struct task_struct *kthread);
+static struct interface *alloc_iface(char *ifname);
+
+#define KSMBD_TRANS(t) (&(t)->transport)
+#define TCP_TRANS(t)   ((struct tcp_transport *)container_of(t, \
+                               struct tcp_transport, transport))
+
+static inline void ksmbd_tcp_nodelay(struct socket *sock)
+{
+       tcp_sock_set_nodelay(sock->sk);
+}
+
+static inline void ksmbd_tcp_reuseaddr(struct socket *sock)
+{
+       sock_set_reuseaddr(sock->sk);
+}
+
+static inline void ksmbd_tcp_rcv_timeout(struct socket *sock, s64 secs)
+{
+       lock_sock(sock->sk);
+       if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
+               sock->sk->sk_rcvtimeo = secs * HZ;
+       else
+               sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+       release_sock(sock->sk);
+}
+
+static inline void ksmbd_tcp_snd_timeout(struct socket *sock, s64 secs)
+{
+       sock_set_sndtimeo(sock->sk, secs);
+}
+
+static struct tcp_transport *alloc_transport(struct socket *client_sk)
+{
+       struct tcp_transport *t;
+       struct ksmbd_conn *conn;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               return NULL;
+       t->sock = client_sk;
+
+       conn = ksmbd_conn_alloc();
+       if (!conn) {
+               kfree(t);
+               return NULL;
+       }
+
+       conn->transport = KSMBD_TRANS(t);
+       KSMBD_TRANS(t)->conn = conn;
+       KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
+       return t;
+}
+
+static void free_transport(struct tcp_transport *t)
+{
+       kernel_sock_shutdown(t->sock, SHUT_RDWR);
+       sock_release(t->sock);
+       t->sock = NULL;
+
+       ksmbd_conn_free(KSMBD_TRANS(t)->conn);
+       kfree(t->iov);
+       kfree(t);
+}
+
+/**
+ * kvec_array_init() - initialize a IO vector segment
+ * @new:       IO vector to be initialized
+ * @iov:       base IO vector
+ * @nr_segs:   number of segments in base iov
+ * @bytes:     total iovec length so far for read
+ *
+ * Return:     Number of IO segments
+ */
+static unsigned int kvec_array_init(struct kvec *new, struct kvec *iov,
+                                   unsigned int nr_segs, size_t bytes)
+{
+       size_t base = 0;
+
+       while (bytes || !iov->iov_len) {
+               int copy = min(bytes, iov->iov_len);
+
+               bytes -= copy;
+               base += copy;
+               if (iov->iov_len == base) {
+                       iov++;
+                       nr_segs--;
+                       base = 0;
+               }
+       }
+
+       memcpy(new, iov, sizeof(*iov) * nr_segs);
+       new->iov_base += base;
+       new->iov_len -= base;
+       return nr_segs;
+}
+
+/**
+ * get_conn_iovec() - get connection iovec for reading from socket
+ * @t:         TCP transport instance
+ * @nr_segs:   number of segments in iov
+ *
+ * Return:     return existing or newly allocate iovec
+ */
+static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs)
+{
+       struct kvec *new_iov;
+
+       if (t->iov && nr_segs <= t->nr_iov)
+               return t->iov;
+
+       /* not big enough -- allocate a new one and release the old */
+       new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL);
+       if (new_iov) {
+               kfree(t->iov);
+               t->iov = new_iov;
+               t->nr_iov = nr_segs;
+       }
+       return new_iov;
+}
+
+static unsigned short ksmbd_tcp_get_port(const struct sockaddr *sa)
+{
+       switch (sa->sa_family) {
+       case AF_INET:
+               return ntohs(((struct sockaddr_in *)sa)->sin_port);
+       case AF_INET6:
+               return ntohs(((struct sockaddr_in6 *)sa)->sin6_port);
+       }
+       return 0;
+}
+
+/**
+ * ksmbd_tcp_new_connection() - create a new tcp session on mount
+ * @client_sk: socket associated with new connection
+ *
+ * whenever a new connection is requested, create a conn thread
+ * (session thread) to handle new incoming smb requests from the connection
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int ksmbd_tcp_new_connection(struct socket *client_sk)
+{
+       struct sockaddr *csin;
+       int rc = 0;
+       struct tcp_transport *t;
+
+       t = alloc_transport(client_sk);
+       if (!t)
+               return -ENOMEM;
+
+       csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
+       if (kernel_getpeername(client_sk, csin) < 0) {
+               pr_err("client ip resolution failed\n");
+               rc = -EINVAL;
+               goto out_error;
+       }
+
+       KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+                                             KSMBD_TRANS(t)->conn,
+                                             "ksmbd:%u",
+                                             ksmbd_tcp_get_port(csin));
+       if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+               pr_err("cannot start conn thread\n");
+               rc = PTR_ERR(KSMBD_TRANS(t)->handler);
+               free_transport(t);
+       }
+       return rc;
+
+out_error:
+       free_transport(t);
+       return rc;
+}
+
+/**
+ * ksmbd_kthread_fn() - listen to new SMB connections and callback server
+ * @p:         arguments to forker thread
+ *
+ * Return:     Returns a task_struct or ERR_PTR
+ */
+static int ksmbd_kthread_fn(void *p)
+{
+       struct socket *client_sk = NULL;
+       struct interface *iface = (struct interface *)p;
+       int ret;
+
+       while (!kthread_should_stop()) {
+               mutex_lock(&iface->sock_release_lock);
+               if (!iface->ksmbd_socket) {
+                       mutex_unlock(&iface->sock_release_lock);
+                       break;
+               }
+               ret = kernel_accept(iface->ksmbd_socket, &client_sk,
+                                   O_NONBLOCK);
+               mutex_unlock(&iface->sock_release_lock);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               /* check for new connections every 100 msecs */
+                               schedule_timeout_interruptible(HZ / 10);
+                       continue;
+               }
+
+               ksmbd_debug(CONN, "connect success: accepted new connection\n");
+               client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+               client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+
+               ksmbd_tcp_new_connection(client_sk);
+       }
+
+       ksmbd_debug(CONN, "releasing socket\n");
+       return 0;
+}
+
+/**
+ * ksmbd_tcp_run_kthread() - start forker thread
+ * @iface: pointer to struct interface
+ *
+ * start forker thread(ksmbd/0) at module init time to listen
+ * on port 445 for new SMB connection requests. It creates per connection
+ * server threads(ksmbd/x)
+ *
+ * Return:     0 on success or error number
+ */
+static int ksmbd_tcp_run_kthread(struct interface *iface)
+{
+       int rc;
+       struct task_struct *kthread;
+
+       kthread = kthread_run(ksmbd_kthread_fn, (void *)iface, "ksmbd-%s",
+                             iface->name);
+       if (IS_ERR(kthread)) {
+               rc = PTR_ERR(kthread);
+               return rc;
+       }
+       iface->ksmbd_kthread = kthread;
+
+       return 0;
+}
+
+/**
+ * ksmbd_tcp_readv() - read data from socket in given iovec
+ * @t:         TCP transport instance
+ * @iov_orig:  base IO vector
+ * @nr_segs:   number of segments in base iov
+ * @to_read:   number of bytes to read from socket
+ *
+ * Return:     on success return number of bytes read from socket,
+ *             otherwise return error number
+ */
+static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+                          unsigned int nr_segs, unsigned int to_read)
+{
+       int length = 0;
+       int total_read;
+       unsigned int segs;
+       struct msghdr ksmbd_msg;
+       struct kvec *iov;
+       struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
+
+       iov = get_conn_iovec(t, nr_segs);
+       if (!iov)
+               return -ENOMEM;
+
+       ksmbd_msg.msg_control = NULL;
+       ksmbd_msg.msg_controllen = 0;
+
+       for (total_read = 0; to_read; total_read += length, to_read -= length) {
+               try_to_freeze();
+
+               if (!ksmbd_conn_alive(conn)) {
+                       total_read = -ESHUTDOWN;
+                       break;
+               }
+               segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
+
+               length = kernel_recvmsg(t->sock, &ksmbd_msg,
+                                       iov, segs, to_read, 0);
+
+               if (length == -EINTR) {
+                       total_read = -ESHUTDOWN;
+                       break;
+               } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
+                       total_read = -EAGAIN;
+                       break;
+               } else if (length == -ERESTARTSYS || length == -EAGAIN) {
+                       usleep_range(1000, 2000);
+                       length = 0;
+                       continue;
+               } else if (length <= 0) {
+                       total_read = -EAGAIN;
+                       break;
+               }
+       }
+       return total_read;
+}
+
+/**
+ * ksmbd_tcp_read() - read data from socket in given buffer
+ * @t:         TCP transport instance
+ * @buf:       buffer to store read data from socket
+ * @to_read:   number of bytes to read from socket
+ *
+ * Return:     on success return number of bytes read from socket,
+ *             otherwise return error number
+ */
+static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf, unsigned int to_read)
+{
+       struct kvec iov;
+
+       iov.iov_base = buf;
+       iov.iov_len = to_read;
+
+       return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read);
+}
+
+static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
+                           int nvecs, int size, bool need_invalidate,
+                           unsigned int remote_key)
+
+{
+       struct msghdr smb_msg = {.msg_flags = MSG_NOSIGNAL};
+
+       return kernel_sendmsg(TCP_TRANS(t)->sock, &smb_msg, iov, nvecs, size);
+}
+
+static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
+{
+       free_transport(TCP_TRANS(t));
+}
+
+static void tcp_destroy_socket(struct socket *ksmbd_socket)
+{
+       int ret;
+
+       if (!ksmbd_socket)
+               return;
+
+       /* set zero to timeout */
+       ksmbd_tcp_rcv_timeout(ksmbd_socket, 0);
+       ksmbd_tcp_snd_timeout(ksmbd_socket, 0);
+
+       ret = kernel_sock_shutdown(ksmbd_socket, SHUT_RDWR);
+       if (ret)
+               pr_err("Failed to shutdown socket: %d\n", ret);
+       sock_release(ksmbd_socket);
+}
+
+/**
+ * create_socket - create socket for ksmbd/0
+ *
+ * Return:     Returns a task_struct or ERR_PTR
+ */
+static int create_socket(struct interface *iface)
+{
+       int ret;
+       struct sockaddr_in6 sin6;
+       struct sockaddr_in sin;
+       struct socket *ksmbd_socket;
+       bool ipv4 = false;
+
+       ret = sock_create(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &ksmbd_socket);
+       if (ret) {
+               pr_err("Can't create socket for ipv6, try ipv4: %d\n", ret);
+               ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP,
+                                 &ksmbd_socket);
+               if (ret) {
+                       pr_err("Can't create socket for ipv4: %d\n", ret);
+                       goto out_error;
+               }
+
+               sin.sin_family = PF_INET;
+               sin.sin_addr.s_addr = htonl(INADDR_ANY);
+               sin.sin_port = htons(server_conf.tcp_port);
+               ipv4 = true;
+       } else {
+               sin6.sin6_family = PF_INET6;
+               sin6.sin6_addr = in6addr_any;
+               sin6.sin6_port = htons(server_conf.tcp_port);
+       }
+
+       ksmbd_tcp_nodelay(ksmbd_socket);
+       ksmbd_tcp_reuseaddr(ksmbd_socket);
+
+       ret = sock_setsockopt(ksmbd_socket,
+                             SOL_SOCKET,
+                             SO_BINDTODEVICE,
+                             KERNEL_SOCKPTR(iface->name),
+                             strlen(iface->name));
+       if (ret != -ENODEV && ret < 0) {
+               pr_err("Failed to set SO_BINDTODEVICE: %d\n", ret);
+               goto out_error;
+       }
+
+       if (ipv4)
+               ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin,
+                                 sizeof(sin));
+       else
+               ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin6,
+                                 sizeof(sin6));
+       if (ret) {
+               pr_err("Failed to bind socket: %d\n", ret);
+               goto out_error;
+       }
+
+       ksmbd_socket->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+       ksmbd_socket->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+
+       ret = kernel_listen(ksmbd_socket, KSMBD_SOCKET_BACKLOG);
+       if (ret) {
+               pr_err("Port listen() error: %d\n", ret);
+               goto out_error;
+       }
+
+       iface->ksmbd_socket = ksmbd_socket;
+       ret = ksmbd_tcp_run_kthread(iface);
+       if (ret) {
+               pr_err("Can't start ksmbd main kthread: %d\n", ret);
+               goto out_error;
+       }
+       iface->state = IFACE_STATE_CONFIGURED;
+
+       return 0;
+
+out_error:
+       tcp_destroy_socket(ksmbd_socket);
+       iface->ksmbd_socket = NULL;
+       return ret;
+}
+
+static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
+                             void *ptr)
+{
+       struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+       struct interface *iface;
+       int ret, found = 0;
+
+       switch (event) {
+       case NETDEV_UP:
+               if (netdev->priv_flags & IFF_BRIDGE_PORT)
+                       return NOTIFY_OK;
+
+               list_for_each_entry(iface, &iface_list, entry) {
+                       if (!strcmp(iface->name, netdev->name)) {
+                               found = 1;
+                               if (iface->state != IFACE_STATE_DOWN)
+                                       break;
+                               ret = create_socket(iface);
+                               if (ret)
+                                       return NOTIFY_OK;
+                               break;
+                       }
+               }
+               if (!found && bind_additional_ifaces) {
+                       iface = alloc_iface(kstrdup(netdev->name, GFP_KERNEL));
+                       if (!iface)
+                               return NOTIFY_OK;
+                       ret = create_socket(iface);
+                       if (ret)
+                               break;
+               }
+               break;
+       case NETDEV_DOWN:
+               list_for_each_entry(iface, &iface_list, entry) {
+                       if (!strcmp(iface->name, netdev->name) &&
+                           iface->state == IFACE_STATE_CONFIGURED) {
+                               tcp_stop_kthread(iface->ksmbd_kthread);
+                               iface->ksmbd_kthread = NULL;
+                               mutex_lock(&iface->sock_release_lock);
+                               tcp_destroy_socket(iface->ksmbd_socket);
+                               iface->ksmbd_socket = NULL;
+                               mutex_unlock(&iface->sock_release_lock);
+
+                               iface->state = IFACE_STATE_DOWN;
+                               break;
+                       }
+               }
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block ksmbd_netdev_notifier = {
+       .notifier_call = ksmbd_netdev_event,
+};
+
+int ksmbd_tcp_init(void)
+{
+       register_netdevice_notifier(&ksmbd_netdev_notifier);
+
+       return 0;
+}
+
+static void tcp_stop_kthread(struct task_struct *kthread)
+{
+       int ret;
+
+       if (!kthread)
+               return;
+
+       ret = kthread_stop(kthread);
+       if (ret)
+               pr_err("failed to stop forker thread\n");
+}
+
+void ksmbd_tcp_destroy(void)
+{
+       struct interface *iface, *tmp;
+
+       unregister_netdevice_notifier(&ksmbd_netdev_notifier);
+
+       list_for_each_entry_safe(iface, tmp, &iface_list, entry) {
+               list_del(&iface->entry);
+               kfree(iface->name);
+               kfree(iface);
+       }
+}
+
+static struct interface *alloc_iface(char *ifname)
+{
+       struct interface *iface;
+
+       if (!ifname)
+               return NULL;
+
+       iface = kzalloc(sizeof(struct interface), GFP_KERNEL);
+       if (!iface) {
+               kfree(ifname);
+               return NULL;
+       }
+
+       iface->name = ifname;
+       iface->state = IFACE_STATE_DOWN;
+       list_add(&iface->entry, &iface_list);
+       mutex_init(&iface->sock_release_lock);
+       return iface;
+}
+
+int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
+{
+       int sz = 0;
+
+       if (!ifc_list_sz) {
+               struct net_device *netdev;
+
+               rtnl_lock();
+               for_each_netdev(&init_net, netdev) {
+                       if (netdev->priv_flags & IFF_BRIDGE_PORT)
+                               continue;
+                       if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
+                               return -ENOMEM;
+               }
+               rtnl_unlock();
+               bind_additional_ifaces = 1;
+               return 0;
+       }
+
+       while (ifc_list_sz > 0) {
+               if (!alloc_iface(kstrdup(ifc_list, GFP_KERNEL)))
+                       return -ENOMEM;
+
+               sz = strlen(ifc_list);
+               if (!sz)
+                       break;
+
+               ifc_list += sz + 1;
+               ifc_list_sz -= (sz + 1);
+       }
+
+       bind_additional_ifaces = 0;
+
+       return 0;
+}
+
+static struct ksmbd_transport_ops ksmbd_tcp_transport_ops = {
+       .read           = ksmbd_tcp_read,
+       .writev         = ksmbd_tcp_writev,
+       .disconnect     = ksmbd_tcp_disconnect,
+};
diff --git a/fs/ksmbd/transport_tcp.h b/fs/ksmbd/transport_tcp.h
new file mode 100644 (file)
index 0000000..e338beb
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_TRANSPORT_TCP_H__
+#define __KSMBD_TRANSPORT_TCP_H__
+
+int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz);
+int ksmbd_tcp_init(void);
+void ksmbd_tcp_destroy(void);
+
+#endif /* __KSMBD_TRANSPORT_TCP_H__ */
diff --git a/fs/ksmbd/unicode.c b/fs/ksmbd/unicode.c
new file mode 100644 (file)
index 0000000..a0db699
--- /dev/null
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Some of the source code in this file came from fs/cifs/cifs_unicode.c
+ *
+ *   Copyright (c) International Business Machines  Corp., 2000,2009
+ *   Modified by Steve French (sfrench@us.ibm.com)
+ *   Modified by Namjae Jeon (linkinjeon@kernel.org)
+ */
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include "glob.h"
+#include "unicode.h"
+#include "uniupr.h"
+#include "smb_common.h"
+
+/*
+ * smb_utf16_bytes() - how long will a string be after conversion?
+ * @from:      pointer to input string
+ * @maxbytes:  don't go past this many bytes of input string
+ * @codepage:  destination codepage
+ *
+ * Walk a utf16le string and return the number of bytes that the string will
+ * be after being converted to the given charset, not including any null
+ * termination required. Don't walk past maxbytes in the source buffer.
+ *
+ * Return:     string length after conversion
+ */
+static int smb_utf16_bytes(const __le16 *from, int maxbytes,
+                          const struct nls_table *codepage)
+{
+       int i;
+       int charlen, outlen = 0;
+       int maxwords = maxbytes / 2;
+       char tmp[NLS_MAX_CHARSET_SIZE];
+       __u16 ftmp;
+
+       for (i = 0; i < maxwords; i++) {
+               ftmp = get_unaligned_le16(&from[i]);
+               if (ftmp == 0)
+                       break;
+
+               charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
+               if (charlen > 0)
+                       outlen += charlen;
+               else
+                       outlen++;
+       }
+
+       return outlen;
+}
+
+/*
+ * cifs_mapchar() - convert a host-endian char to proper char in codepage
+ * @target:    where converted character should be copied
+ * @src_char:  2 byte host-endian source character
+ * @cp:                codepage to which character should be converted
+ * @mapchar:   should character be mapped according to mapchars mount option?
+ *
+ * This function handles the conversion of a single character. It is the
+ * responsibility of the caller to ensure that the target buffer is large
+ * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
+ *
+ * Return:     string length after conversion
+ */
+static int
+cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+            bool mapchar)
+{
+       int len = 1;
+
+       if (!mapchar)
+               goto cp_convert;
+
+       /*
+        * BB: Cannot handle remapping UNI_SLASH until all the calls to
+        *     build_path_from_dentry are modified, as they use slash as
+        *     separator.
+        */
+       switch (src_char) {
+       case UNI_COLON:
+               *target = ':';
+               break;
+       case UNI_ASTERISK:
+               *target = '*';
+               break;
+       case UNI_QUESTION:
+               *target = '?';
+               break;
+       case UNI_PIPE:
+               *target = '|';
+               break;
+       case UNI_GRTRTHAN:
+               *target = '>';
+               break;
+       case UNI_LESSTHAN:
+               *target = '<';
+               break;
+       default:
+               goto cp_convert;
+       }
+
+out:
+       return len;
+
+cp_convert:
+       len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
+       if (len <= 0) {
+               *target = '?';
+               len = 1;
+       }
+
+       goto out;
+}
+
+/*
+ * is_char_allowed() - check for valid character
+ * @ch:                input character to be checked
+ *
+ * Return:     1 if char is allowed, otherwise 0
+ */
+static inline int is_char_allowed(char *ch)
+{
+       /* check for control chars, wildcards etc. */
+       if (!(*ch & 0x80) &&
+           (*ch <= 0x1f ||
+            *ch == '?' || *ch == '"' || *ch == '<' ||
+            *ch == '>' || *ch == '|'))
+               return 0;
+
+       return 1;
+}
+
+/*
+ * smb_from_utf16() - convert utf16le string to local charset
+ * @to:                destination buffer
+ * @from:      source buffer
+ * @tolen:     destination buffer size (in bytes)
+ * @fromlen:   source buffer size (in bytes)
+ * @codepage:  codepage to which characters should be converted
+ * @mapchar:   should characters be remapped according to the mapchars option?
+ *
+ * Convert a little-endian utf16le string (as sent by the server) to a string
+ * in the provided codepage. The tolen and fromlen parameters are to ensure
+ * that the code doesn't walk off of the end of the buffer (which is always
+ * a danger if the alignment of the source buffer is off). The destination
+ * string is always properly null terminated and fits in the destination
+ * buffer. Returns the length of the destination string in bytes (including
+ * null terminator).
+ *
+ * Note that some windows versions actually send multiword UTF-16 characters
+ * instead of straight UTF16-2. The linux nls routines however aren't able to
+ * deal with those characters properly. In the event that we get some of
+ * those characters, they won't be translated properly.
+ *
+ * Return:     string length after conversion
+ */
+static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+                         const struct nls_table *codepage, bool mapchar)
+{
+       int i, charlen, safelen;
+       int outlen = 0;
+       int nullsize = nls_nullsize(codepage);
+       int fromwords = fromlen / 2;
+       char tmp[NLS_MAX_CHARSET_SIZE];
+       __u16 ftmp;
+
+       /*
+        * because the chars can be of varying widths, we need to take care
+        * not to overflow the destination buffer when we get close to the
+        * end of it. Until we get to this offset, we don't need to check
+        * for overflow however.
+        */
+       safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
+
+       for (i = 0; i < fromwords; i++) {
+               ftmp = get_unaligned_le16(&from[i]);
+               if (ftmp == 0)
+                       break;
+
+               /*
+                * check to see if converting this character might make the
+                * conversion bleed into the null terminator
+                */
+               if (outlen >= safelen) {
+                       charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar);
+                       if ((outlen + charlen) > (tolen - nullsize))
+                               break;
+               }
+
+               /* put converted char into 'to' buffer */
+               charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
+               outlen += charlen;
+       }
+
+       /* properly null-terminate string */
+       for (i = 0; i < nullsize; i++)
+               to[outlen++] = 0;
+
+       return outlen;
+}
+
+/*
+ * smb_strtoUTF16() - Convert character string to unicode string
+ * @to:                destination buffer
+ * @from:      source buffer
+ * @len:       destination buffer size (in bytes)
+ * @codepage:  codepage to which characters should be converted
+ *
+ * Return:     string length after conversion
+ */
+int smb_strtoUTF16(__le16 *to, const char *from, int len,
+                  const struct nls_table *codepage)
+{
+       int charlen;
+       int i;
+       wchar_t wchar_to; /* needed to quiet sparse */
+
+       /* special case for utf8 to handle no plane0 chars */
+       if (!strcmp(codepage->charset, "utf8")) {
+               /*
+                * convert utf8 -> utf16, we assume we have enough space
+                * as caller should have assumed conversion does not overflow
+                * in destination len is length in wchar_t units (16bits)
+                */
+               i  = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
+                                    (wchar_t *)to, len);
+
+               /* if success terminate and exit */
+               if (i >= 0)
+                       goto success;
+               /*
+                * if fails fall back to UCS encoding as this
+                * function should not return negative values
+                * currently can fail only if source contains
+                * invalid encoded characters
+                */
+       }
+
+       for (i = 0; len > 0 && *from; i++, from += charlen, len -= charlen) {
+               charlen = codepage->char2uni(from, len, &wchar_to);
+               if (charlen < 1) {
+                       /* A question mark */
+                       wchar_to = 0x003f;
+                       charlen = 1;
+               }
+               put_unaligned_le16(wchar_to, &to[i]);
+       }
+
+success:
+       put_unaligned_le16(0, &to[i]);
+       return i;
+}
+
+/*
+ * smb_strndup_from_utf16() - copy a string from wire format to the local
+ *             codepage
+ * @src:       source string
+ * @maxlen:    don't walk past this many bytes in the source string
+ * @is_unicode:        is this a unicode string?
+ * @codepage:  destination codepage
+ *
+ * Take a string given by the server, convert it to the local codepage and
+ * put it in a new buffer. Returns a pointer to the new string or NULL on
+ * error.
+ *
+ * Return:     destination string buffer or error ptr
+ */
+char *smb_strndup_from_utf16(const char *src, const int maxlen,
+                            const bool is_unicode,
+                            const struct nls_table *codepage)
+{
+       int len, ret;
+       char *dst;
+
+       if (is_unicode) {
+               len = smb_utf16_bytes((__le16 *)src, maxlen, codepage);
+               len += nls_nullsize(codepage);
+               dst = kmalloc(len, GFP_KERNEL);
+               if (!dst)
+                       return ERR_PTR(-ENOMEM);
+               ret = smb_from_utf16(dst, (__le16 *)src, len, maxlen, codepage,
+                                    false);
+               if (ret < 0) {
+                       kfree(dst);
+                       return ERR_PTR(-EINVAL);
+               }
+       } else {
+               len = strnlen(src, maxlen);
+               len++;
+               dst = kmalloc(len, GFP_KERNEL);
+               if (!dst)
+                       return ERR_PTR(-ENOMEM);
+               strscpy(dst, src, len);
+       }
+
+       return dst;
+}
+
+/*
+ * Convert 16 bit Unicode pathname to wire format from string in current code
+ * page. Conversion may involve remapping up the six characters that are
+ * only legal in POSIX-like OS (if they are present in the string). Path
+ * names are little endian 16 bit Unicode on the wire
+ */
+/*
+ * smbConvertToUTF16() - convert string from local charset to utf16
+ * @target:    destination buffer
+ * @source:    source buffer
+ * @srclen:    source buffer size (in bytes)
+ * @cp:                codepage to which characters should be converted
+ * @mapchar:   should characters be remapped according to the mapchars option?
+ *
+ * Convert 16 bit Unicode pathname to wire format from string in current code
+ * page. Conversion may involve remapping up the six characters that are
+ * only legal in POSIX-like OS (if they are present in the string). Path
+ * names are little endian 16 bit Unicode on the wire
+ *
+ * Return:     char length after conversion
+ */
+int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+                     const struct nls_table *cp, int mapchars)
+{
+       int i, j, charlen;
+       char src_char;
+       __le16 dst_char;
+       wchar_t tmp;
+
+       if (!mapchars)
+               return smb_strtoUTF16(target, source, srclen, cp);
+
+       for (i = 0, j = 0; i < srclen; j++) {
+               src_char = source[i];
+               charlen = 1;
+               switch (src_char) {
+               case 0:
+                       put_unaligned(0, &target[j]);
+                       return j;
+               case ':':
+                       dst_char = cpu_to_le16(UNI_COLON);
+                       break;
+               case '*':
+                       dst_char = cpu_to_le16(UNI_ASTERISK);
+                       break;
+               case '?':
+                       dst_char = cpu_to_le16(UNI_QUESTION);
+                       break;
+               case '<':
+                       dst_char = cpu_to_le16(UNI_LESSTHAN);
+                       break;
+               case '>':
+                       dst_char = cpu_to_le16(UNI_GRTRTHAN);
+                       break;
+               case '|':
+                       dst_char = cpu_to_le16(UNI_PIPE);
+                       break;
+               /*
+                * FIXME: We can not handle remapping backslash (UNI_SLASH)
+                * until all the calls to build_path_from_dentry are modified,
+                * as they use backslash as separator.
+                */
+               default:
+                       charlen = cp->char2uni(source + i, srclen - i, &tmp);
+                       dst_char = cpu_to_le16(tmp);
+
+                       /*
+                        * if no match, use question mark, which at least in
+                        * some cases serves as wild card
+                        */
+                       if (charlen < 1) {
+                               dst_char = cpu_to_le16(0x003f);
+                               charlen = 1;
+                       }
+               }
+               /*
+                * character may take more than one byte in the source string,
+                * but will take exactly two bytes in the target string
+                */
+               i += charlen;
+               put_unaligned(dst_char, &target[j]);
+       }
+
+       return j;
+}
diff --git a/fs/ksmbd/unicode.h b/fs/ksmbd/unicode.h
new file mode 100644 (file)
index 0000000..5593024
--- /dev/null
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Some of the source code in this file came from fs/cifs/cifs_unicode.c
+ * cifs_unicode:  Unicode kernel case support
+ *
+ * Function:
+ *     Convert a unicode character to upper or lower case using
+ *     compressed tables.
+ *
+ *   Copyright (c) International Business Machines  Corp., 2000,2009
+ *
+ *
+ * Notes:
+ *     These APIs are based on the C library functions.  The semantics
+ *     should match the C functions but with expanded size operands.
+ *
+ *     The upper/lower functions are based on a table created by mkupr.
+ *     This is a compressed table of upper and lower case conversion.
+ *
+ */
+#ifndef _CIFS_UNICODE_H
+#define _CIFS_UNICODE_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/nls.h>
+
+#define  UNIUPR_NOLOWER                /* Example to not expand lower case tables */
+
+/*
+ * Windows maps these to the user defined 16 bit Unicode range since they are
+ * reserved symbols (along with \ and /), otherwise illegal to store
+ * in filenames in NTFS
+ */
+#define UNI_ASTERISK    ((__u16)('*' + 0xF000))
+#define UNI_QUESTION    ((__u16)('?' + 0xF000))
+#define UNI_COLON       ((__u16)(':' + 0xF000))
+#define UNI_GRTRTHAN    ((__u16)('>' + 0xF000))
+#define UNI_LESSTHAN    ((__u16)('<' + 0xF000))
+#define UNI_PIPE        ((__u16)('|' + 0xF000))
+#define UNI_SLASH       ((__u16)('\\' + 0xF000))
+
+/* Just define what we want from uniupr.h.  We don't want to define the tables
+ * in each source file.
+ */
+#ifndef        UNICASERANGE_DEFINED
+struct UniCaseRange {
+       wchar_t start;
+       wchar_t end;
+       signed char *table;
+};
+#endif                         /* UNICASERANGE_DEFINED */
+
+#ifndef UNIUPR_NOUPPER
+extern signed char SmbUniUpperTable[512];
+extern const struct UniCaseRange SmbUniUpperRange[];
+#endif                         /* UNIUPR_NOUPPER */
+
+#ifndef UNIUPR_NOLOWER
+extern signed char CifsUniLowerTable[512];
+extern const struct UniCaseRange CifsUniLowerRange[];
+#endif                         /* UNIUPR_NOLOWER */
+
+#ifdef __KERNEL__
+int smb_strtoUTF16(__le16 *to, const char *from, int len,
+                  const struct nls_table *codepage);
+char *smb_strndup_from_utf16(const char *src, const int maxlen,
+                            const bool is_unicode,
+                            const struct nls_table *codepage);
+int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+                     const struct nls_table *cp, int mapchars);
+char *ksmbd_extract_sharename(char *treename);
+#endif
+
+/*
+ * UniStrcat:  Concatenate the second string to the first
+ *
+ * Returns:
+ *     Address of the first string
+ */
+static inline wchar_t *UniStrcat(wchar_t *ucs1, const wchar_t *ucs2)
+{
+       wchar_t *anchor = ucs1; /* save a pointer to start of ucs1 */
+
+       while (*ucs1++)
+       /*NULL*/;       /* To end of first string */
+       ucs1--;                 /* Return to the null */
+       while ((*ucs1++ = *ucs2++))
+       /*NULL*/;       /* copy string 2 over */
+       return anchor;
+}
+
+/*
+ * UniStrchr:  Find a character in a string
+ *
+ * Returns:
+ *     Address of first occurrence of character in string
+ *     or NULL if the character is not in the string
+ */
+static inline wchar_t *UniStrchr(const wchar_t *ucs, wchar_t uc)
+{
+       while ((*ucs != uc) && *ucs)
+               ucs++;
+
+       if (*ucs == uc)
+               return (wchar_t *)ucs;
+       return NULL;
+}
+
+/*
+ * UniStrcmp:  Compare two strings
+ *
+ * Returns:
+ *     < 0:  First string is less than second
+ *     = 0:  Strings are equal
+ *     > 0:  First string is greater than second
+ */
+static inline int UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
+{
+       while ((*ucs1 == *ucs2) && *ucs1) {
+               ucs1++;
+               ucs2++;
+       }
+       return (int)*ucs1 - (int)*ucs2;
+}
+
+/*
+ * UniStrcpy:  Copy a string
+ */
+static inline wchar_t *UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
+{
+       wchar_t *anchor = ucs1; /* save the start of result string */
+
+       while ((*ucs1++ = *ucs2++))
+       /*NULL*/;
+       return anchor;
+}
+
+/*
+ * UniStrlen:  Return the length of a string (in 16 bit Unicode chars not bytes)
+ */
+static inline size_t UniStrlen(const wchar_t *ucs1)
+{
+       int i = 0;
+
+       while (*ucs1++)
+               i++;
+       return i;
+}
+
+/*
+ * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a
+ *             string (length limited)
+ */
+static inline size_t UniStrnlen(const wchar_t *ucs1, int maxlen)
+{
+       int i = 0;
+
+       while (*ucs1++) {
+               i++;
+               if (i >= maxlen)
+                       break;
+       }
+       return i;
+}
+
+/*
+ * UniStrncat:  Concatenate length limited string
+ */
+static inline wchar_t *UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+{
+       wchar_t *anchor = ucs1; /* save pointer to string 1 */
+
+       while (*ucs1++)
+       /*NULL*/;
+       ucs1--;                 /* point to null terminator of s1 */
+       while (n-- && (*ucs1 = *ucs2)) {        /* copy s2 after s1 */
+               ucs1++;
+               ucs2++;
+       }
+       *ucs1 = 0;              /* Null terminate the result */
+       return anchor;
+}
+
+/*
+ * UniStrncmp:  Compare length limited string
+ */
+static inline int UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+{
+       if (!n)
+               return 0;       /* Null strings are equal */
+       while ((*ucs1 == *ucs2) && *ucs1 && --n) {
+               ucs1++;
+               ucs2++;
+       }
+       return (int)*ucs1 - (int)*ucs2;
+}
+
+/*
+ * UniStrncmp_le:  Compare length limited string - native to little-endian
+ */
+static inline int
+UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+{
+       if (!n)
+               return 0;       /* Null strings are equal */
+       while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
+               ucs1++;
+               ucs2++;
+       }
+       return (int)*ucs1 - (int)__le16_to_cpu(*ucs2);
+}
+
+/*
+ * UniStrncpy:  Copy length limited string with pad
+ */
+static inline wchar_t *UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+{
+       wchar_t *anchor = ucs1;
+
+       while (n-- && *ucs2)    /* Copy the strings */
+               *ucs1++ = *ucs2++;
+
+       n++;
+       while (n--)             /* Pad with nulls */
+               *ucs1++ = 0;
+       return anchor;
+}
+
+/*
+ * UniStrncpy_le:  Copy length limited string with pad to little-endian
+ */
+static inline wchar_t *UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+{
+       wchar_t *anchor = ucs1;
+
+       while (n-- && *ucs2)    /* Copy the strings */
+               *ucs1++ = __le16_to_cpu(*ucs2++);
+
+       n++;
+       while (n--)             /* Pad with nulls */
+               *ucs1++ = 0;
+       return anchor;
+}
+
+/*
+ * UniStrstr:  Find a string in a string
+ *
+ * Returns:
+ *     Address of first match found
+ *     NULL if no matching string is found
+ */
+static inline wchar_t *UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
+{
+       const wchar_t *anchor1 = ucs1;
+       const wchar_t *anchor2 = ucs2;
+
+       while (*ucs1) {
+               if (*ucs1 == *ucs2) {
+                       /* Partial match found */
+                       ucs1++;
+                       ucs2++;
+               } else {
+                       if (!*ucs2)     /* Match found */
+                               return (wchar_t *)anchor1;
+                       ucs1 = ++anchor1;       /* No match */
+                       ucs2 = anchor2;
+               }
+       }
+
+       if (!*ucs2)             /* Both end together */
+               return (wchar_t *)anchor1;      /* Match found */
+       return NULL;            /* No match */
+}
+
+#ifndef UNIUPR_NOUPPER
+/*
+ * UniToupper:  Convert a unicode character to upper case
+ */
+static inline wchar_t UniToupper(register wchar_t uc)
+{
+       register const struct UniCaseRange *rp;
+
+       if (uc < sizeof(SmbUniUpperTable)) {
+               /* Latin characters */
+               return uc + SmbUniUpperTable[uc];       /* Use base tables */
+       }
+
+       rp = SmbUniUpperRange;  /* Use range tables */
+       while (rp->start) {
+               if (uc < rp->start)     /* Before start of range */
+                       return uc;      /* Uppercase = input */
+               if (uc <= rp->end)      /* In range */
+                       return uc + rp->table[uc - rp->start];
+               rp++;   /* Try next range */
+       }
+       return uc;              /* Past last range */
+}
+
+/*
+ * UniStrupr:  Upper case a unicode string
+ */
+static inline __le16 *UniStrupr(register __le16 *upin)
+{
+       register __le16 *up;
+
+       up = upin;
+       while (*up) {           /* For all characters */
+               *up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
+               up++;
+       }
+       return upin;            /* Return input pointer */
+}
+#endif                         /* UNIUPR_NOUPPER */
+
+#ifndef UNIUPR_NOLOWER
+/*
+ * UniTolower:  Convert a unicode character to lower case
+ */
+static inline wchar_t UniTolower(register wchar_t uc)
+{
+       register const struct UniCaseRange *rp;
+
+       if (uc < sizeof(CifsUniLowerTable)) {
+               /* Latin characters */
+               return uc + CifsUniLowerTable[uc];      /* Use base tables */
+       }
+
+       rp = CifsUniLowerRange; /* Use range tables */
+       while (rp->start) {
+               if (uc < rp->start)     /* Before start of range */
+                       return uc;      /* Uppercase = input */
+               if (uc <= rp->end)      /* In range */
+                       return uc + rp->table[uc - rp->start];
+               rp++;   /* Try next range */
+       }
+       return uc;              /* Past last range */
+}
+
+/*
+ * UniStrlwr:  Lower case a unicode string
+ */
+static inline wchar_t *UniStrlwr(register wchar_t *upin)
+{
+       register wchar_t *up;
+
+       up = upin;
+       while (*up) {           /* For all characters */
+               *up = UniTolower(*up);
+               up++;
+       }
+       return upin;            /* Return input pointer */
+}
+
+#endif
+
+#endif /* _CIFS_UNICODE_H */
diff --git a/fs/ksmbd/uniupr.h b/fs/ksmbd/uniupr.h
new file mode 100644 (file)
index 0000000..26583b7
--- /dev/null
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Some of the source code in this file came from fs/cifs/uniupr.h
+ *   Copyright (c) International Business Machines  Corp., 2000,2002
+ *
+ * uniupr.h - Unicode compressed case ranges
+ *
+ */
+#ifndef __KSMBD_UNIUPR_H
+#define __KSMBD_UNIUPR_H
+
+#ifndef UNIUPR_NOUPPER
+/*
+ * Latin upper case
+ */
+signed char SmbUniUpperTable[512] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 030-03f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 040-04f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 050-05f */
+       0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+                               -32, -32, -32, -32, -32,        /* 060-06f */
+       -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+                               -32, 0, 0, 0, 0, 0,     /* 070-07f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 080-08f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 090-09f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0a0-0af */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0b0-0bf */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0c0-0cf */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0d0-0df */
+       -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+                        -32, -32, -32, -32, -32, -32,  /* 0e0-0ef */
+       -32, -32, -32, -32, -32, -32, -32, 0, -32, -32,
+                        -32, -32, -32, -32, -32, 121,  /* 0f0-0ff */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 100-10f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 110-11f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 120-12f */
+       0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,   /* 130-13f */
+       -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, /* 140-14f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 150-15f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 160-16f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,  /* 170-17f */
+       0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0,     /* 180-18f */
+       0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0,       /* 190-19f */
+       0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,    /* 1a0-1af */
+       -1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0,    /* 1b0-1bf */
+       0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0,  /* 1c0-1cf */
+       -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e0-1ef */
+       0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1,  /* 1f0-1ff */
+};
+
+/* Upper case range - Greek */
+static signed char UniCaseRangeU03a0[47] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -38, -37, -37, -37, /* 3a0-3af */
+       0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+                                        -32, -32, -32, -32,    /* 3b0-3bf */
+       -32, -32, -31, -32, -32, -32, -32, -32, -32, -32, -32, -32, -64,
+       -63, -63,
+};
+
+/* Upper case range - Cyrillic */
+static signed char UniCaseRangeU0430[48] = {
+       -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+                                        -32, -32, -32, -32,    /* 430-43f */
+       -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+                                        -32, -32, -32, -32,    /* 440-44f */
+       0, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80,
+                                        -80, -80, 0, -80, -80, /* 450-45f */
+};
+
+/* Upper case range - Extended cyrillic */
+static signed char UniCaseRangeU0490[61] = {
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 490-49f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4a0-4af */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4b0-4bf */
+       0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
+};
+
+/* Upper case range - Extended latin and greek */
+static signed char UniCaseRangeU1e00[509] = {
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e00-1e0f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e10-1e1f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e20-1e2f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e30-1e3f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e40-1e4f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e50-1e5f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e60-1e6f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e70-1e7f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e80-1e8f */
+       0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, -59, 0, -1, 0, -1,  /* 1e90-1e9f */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ea0-1eaf */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1eb0-1ebf */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ec0-1ecf */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ed0-1edf */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ee0-1eef */
+       0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0,    /* 1ef0-1eff */
+       8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f00-1f0f */
+       8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f10-1f1f */
+       8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f20-1f2f */
+       8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f30-1f3f */
+       8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f40-1f4f */
+       0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f50-1f5f */
+       8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f60-1f6f */
+       74, 74, 86, 86, 86, 86, 100, 100, 0, 0, 112, 112,
+                                126, 126, 0, 0,        /* 1f70-1f7f */
+       8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f80-1f8f */
+       8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f90-1f9f */
+       8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fa0-1faf */
+       8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fb0-1fbf */
+       0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fc0-1fcf */
+       8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fd0-1fdf */
+       8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fe0-1fef */
+       0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* Upper case range - Wide latin */
+static signed char UniCaseRangeUff40[27] = {
+       0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+                        -32, -32, -32, -32, -32,       /* ff40-ff4f */
+       -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+};
+
+/*
+ * Upper Case Range
+ */
+const struct UniCaseRange SmbUniUpperRange[] = {
+       {0x03a0, 0x03ce, UniCaseRangeU03a0},
+       {0x0430, 0x045f, UniCaseRangeU0430},
+       {0x0490, 0x04cc, UniCaseRangeU0490},
+       {0x1e00, 0x1ffc, UniCaseRangeU1e00},
+       {0xff40, 0xff5a, UniCaseRangeUff40},
+       {0}
+};
+#endif
+
+#ifndef UNIUPR_NOLOWER
+/*
+ * Latin lower case
+ */
+signed char CifsUniLowerTable[512] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 030-03f */
+       0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+                                        32, 32, 32,    /* 040-04f */
+       32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0,
+                                        0, 0, 0,       /* 050-05f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 060-06f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 070-07f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 080-08f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 090-09f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0a0-0af */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0b0-0bf */
+       32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+                                32, 32, 32, 32,        /* 0c0-0cf */
+       32, 32, 32, 32, 32, 32, 32, 0, 32, 32, 32, 32,
+                                        32, 32, 32, 0, /* 0d0-0df */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0e0-0ef */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0f0-0ff */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 100-10f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 110-11f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 120-12f */
+       0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, /* 130-13f */
+       0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, /* 140-14f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 150-15f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 160-16f */
+       1, 0, 1, 0, 1, 0, 1, 0, -121, 1, 0, 1, 0, 1, 0,
+                                                0,     /* 170-17f */
+       0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 79,
+                                                0,     /* 180-18f */
+       0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, /* 190-19f */
+       1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, /* 1a0-1af */
+       0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, /* 1b0-1bf */
+       0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1, 0, 1, /* 1c0-1cf */
+       0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, /* 1d0-1df */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e0-1ef */
+       0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1f0-1ff */
+};
+
+/* Lower case range - Greek */
+static signed char UniCaseRangeL0380[44] = {
+       0, 0, 0, 0, 0, 0, 38, 0, 37, 37, 37, 0, 64, 0, 63, 63,  /* 380-38f */
+       0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+                                                32, 32, 32,    /* 390-39f */
+       32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+};
+
+/* Lower case range - Cyrillic */
+static signed char UniCaseRangeL0400[48] = {
+       0, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+                                        0, 80, 80,     /* 400-40f */
+       32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+                                        32, 32, 32,    /* 410-41f */
+       32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+                                        32, 32, 32,    /* 420-42f */
+};
+
+/* Lower case range - Extended cyrillic */
+static signed char UniCaseRangeL0490[60] = {
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 490-49f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 4a0-4af */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 4b0-4bf */
+       0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
+};
+
+/* Lower case range - Extended latin and greek */
+static signed char UniCaseRangeL1e00[504] = {
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e00-1e0f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e10-1e1f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e20-1e2f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e30-1e3f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e40-1e4f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e50-1e5f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e60-1e6f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e70-1e7f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e80-1e8f */
+       1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, /* 1e90-1e9f */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ea0-1eaf */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1eb0-1ebf */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ec0-1ecf */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ed0-1edf */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ee0-1eef */
+       1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, /* 1ef0-1eff */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f00-1f0f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,   /* 1f10-1f1f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f20-1f2f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f30-1f3f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,   /* 1f40-1f4f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, -8, 0, -8, 0, -8, 0, -8,     /* 1f50-1f5f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f60-1f6f */
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f70-1f7f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f80-1f8f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f90-1f9f */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1fa0-1faf */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -74, -74, -9, 0, 0, 0,  /* 1fb0-1fbf */
+       0, 0, 0, 0, 0, 0, 0, 0, -86, -86, -86, -86, -9, 0,
+                                                        0, 0,  /* 1fc0-1fcf */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -100, -100, 0, 0, 0, 0, /* 1fd0-1fdf */
+       0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -112, -112, -7, 0,
+                                                        0, 0,  /* 1fe0-1fef */
+       0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* Lower case range - Wide latin */
+static signed char UniCaseRangeLff20[27] = {
+       0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+                                                        32,    /* ff20-ff2f */
+       32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+};
+
+/*
+ * Lower Case Range
+ */
+const struct UniCaseRange CifsUniLowerRange[] = {
+       {0x0380, 0x03ab, UniCaseRangeL0380},
+       {0x0400, 0x042f, UniCaseRangeL0400},
+       {0x0490, 0x04cb, UniCaseRangeL0490},
+       {0x1e00, 0x1ff7, UniCaseRangeL1e00},
+       {0xff20, 0xff3a, UniCaseRangeLff20},
+       {0}
+};
+#endif
+
+#endif /* __KSMBD_UNIUPR_H */
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
new file mode 100644 (file)
index 0000000..aee28ee
--- /dev/null
@@ -0,0 +1,1895 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/backing-dev.h>
+#include <linux/writeback.h>
+#include <linux/xattr.h>
+#include <linux/falloc.h>
+#include <linux/genhd.h>
+#include <linux/fsnotify.h>
+#include <linux/dcache.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/xacct.h>
+#include <linux/crc32c.h>
+
+#include "glob.h"
+#include "oplock.h"
+#include "connection.h"
+#include "vfs.h"
+#include "vfs_cache.h"
+#include "smbacl.h"
+#include "ndr.h"
+#include "auth.h"
+#include "misc.h"
+
+#include "smb_common.h"
+#include "mgmt/share_config.h"
+#include "mgmt/tree_connect.h"
+#include "mgmt/user_session.h"
+#include "mgmt/user_config.h"
+
+static char *extract_last_component(char *path)
+{
+       char *p = strrchr(path, '/');
+
+       if (p && p[1] != '\0') {
+               *p = '\0';
+               p++;
+       } else {
+               p = NULL;
+               pr_err("Invalid path %s\n", path);
+       }
+       return p;
+}
+
+static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
+                                   struct inode *parent_inode,
+                                   struct inode *inode)
+{
+       if (!test_share_config_flag(work->tcon->share_conf,
+                                   KSMBD_SHARE_FLAG_INHERIT_OWNER))
+               return;
+
+       i_uid_write(inode, i_uid_read(parent_inode));
+}
+
+/**
+ * ksmbd_vfs_lock_parent() - lock parent dentry if it is stable
+ *
+ * the parent dentry got by dget_parent or @parent could be
+ * unstable, we try to lock a parent inode and lookup the
+ * child dentry again.
+ *
+ * the reference count of @parent isn't incremented.
+ */
+int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child)
+{
+       struct dentry *dentry;
+       int ret = 0;
+
+       inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
+       dentry = lookup_one_len(child->d_name.name, parent,
+                               child->d_name.len);
+       if (IS_ERR(dentry)) {
+               ret = PTR_ERR(dentry);
+               goto out_err;
+       }
+
+       if (dentry != child) {
+               ret = -ESTALE;
+               dput(dentry);
+               goto out_err;
+       }
+
+       dput(dentry);
+       return 0;
+out_err:
+       inode_unlock(d_inode(parent));
+       return ret;
+}
+
+int ksmbd_vfs_may_delete(struct user_namespace *user_ns,
+                        struct dentry *dentry)
+{
+       struct dentry *parent;
+       int ret;
+
+       parent = dget_parent(dentry);
+       ret = ksmbd_vfs_lock_parent(parent, dentry);
+       if (ret) {
+               dput(parent);
+               return ret;
+       }
+
+       ret = inode_permission(user_ns, d_inode(parent),
+                              MAY_EXEC | MAY_WRITE);
+
+       inode_unlock(d_inode(parent));
+       dput(parent);
+       return ret;
+}
+
+int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
+                                  struct dentry *dentry, __le32 *daccess)
+{
+       struct dentry *parent;
+       int ret = 0;
+
+       *daccess = cpu_to_le32(FILE_READ_ATTRIBUTES | READ_CONTROL);
+
+       if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_WRITE))
+               *daccess |= cpu_to_le32(WRITE_DAC | WRITE_OWNER | SYNCHRONIZE |
+                               FILE_WRITE_DATA | FILE_APPEND_DATA |
+                               FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES |
+                               FILE_DELETE_CHILD);
+
+       if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_READ))
+               *daccess |= FILE_READ_DATA_LE | FILE_READ_EA_LE;
+
+       if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_EXEC))
+               *daccess |= FILE_EXECUTE_LE;
+
+       parent = dget_parent(dentry);
+       ret = ksmbd_vfs_lock_parent(parent, dentry);
+       if (ret) {
+               dput(parent);
+               return ret;
+       }
+
+       if (!inode_permission(user_ns, d_inode(parent), MAY_EXEC | MAY_WRITE))
+               *daccess |= FILE_DELETE_LE;
+
+       inode_unlock(d_inode(parent));
+       dput(parent);
+       return ret;
+}
+
+/**
+ * ksmbd_vfs_create() - vfs helper for smb create file
+ * @work:      work
+ * @name:      file name
+ * @mode:      file create mode
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+{
+       struct path path;
+       struct dentry *dentry;
+       int err;
+
+       dentry = kern_path_create(AT_FDCWD, name, &path, 0);
+       if (IS_ERR(dentry)) {
+               err = PTR_ERR(dentry);
+               if (err != -ENOENT)
+                       pr_err("path create failed for %s, err %d\n",
+                              name, err);
+               return err;
+       }
+
+       mode |= S_IFREG;
+       err = vfs_create(mnt_user_ns(path.mnt), d_inode(path.dentry),
+                        dentry, mode, true);
+       if (!err) {
+               ksmbd_vfs_inherit_owner(work, d_inode(path.dentry),
+                                       d_inode(dentry));
+       } else {
+               pr_err("File(%s): creation failed (err:%d)\n", name, err);
+       }
+       done_path_create(&path, dentry);
+       return err;
+}
+
+/**
+ * ksmbd_vfs_mkdir() - vfs helper for smb create directory
+ * @work:      work
+ * @name:      directory name
+ * @mode:      directory create mode
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+{
+       struct path path;
+       struct dentry *dentry;
+       int err;
+
+       dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+       if (IS_ERR(dentry)) {
+               err = PTR_ERR(dentry);
+               if (err != -EEXIST)
+                       ksmbd_debug(VFS, "path create failed for %s, err %d\n",
+                                   name, err);
+               return err;
+       }
+
+       mode |= S_IFDIR;
+       err = vfs_mkdir(mnt_user_ns(path.mnt), d_inode(path.dentry),
+                       dentry, mode);
+       if (err) {
+               goto out;
+       } else if (d_unhashed(dentry)) {
+               struct dentry *d;
+
+               d = lookup_one_len(dentry->d_name.name, dentry->d_parent,
+                                  dentry->d_name.len);
+               if (IS_ERR(d)) {
+                       err = PTR_ERR(d);
+                       goto out;
+               }
+               if (unlikely(d_is_negative(d))) {
+                       dput(d);
+                       err = -ENOENT;
+                       goto out;
+               }
+
+               ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
+               dput(d);
+       }
+out:
+       done_path_create(&path, dentry);
+       if (err)
+               pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
+       return err;
+}
+
+static ssize_t ksmbd_vfs_getcasexattr(struct user_namespace *user_ns,
+                                     struct dentry *dentry, char *attr_name,
+                                     int attr_name_len, char **attr_value)
+{
+       char *name, *xattr_list = NULL;
+       ssize_t value_len = -ENOENT, xattr_list_len;
+
+       xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+       if (xattr_list_len <= 0)
+               goto out;
+
+       for (name = xattr_list; name - xattr_list < xattr_list_len;
+                       name += strlen(name) + 1) {
+               ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name));
+               if (strncasecmp(attr_name, name, attr_name_len))
+                       continue;
+
+               value_len = ksmbd_vfs_getxattr(user_ns,
+                                              dentry,
+                                              name,
+                                              attr_value);
+               if (value_len < 0)
+                       pr_err("failed to get xattr in file\n");
+               break;
+       }
+
+out:
+       kvfree(xattr_list);
+       return value_len;
+}
+
+static int ksmbd_vfs_stream_read(struct ksmbd_file *fp, char *buf, loff_t *pos,
+                                size_t count)
+{
+       ssize_t v_len;
+       char *stream_buf = NULL;
+
+       ksmbd_debug(VFS, "read stream data pos : %llu, count : %zd\n",
+                   *pos, count);
+
+       v_len = ksmbd_vfs_getcasexattr(file_mnt_user_ns(fp->filp),
+                                      fp->filp->f_path.dentry,
+                                      fp->stream.name,
+                                      fp->stream.size,
+                                      &stream_buf);
+       if ((int)v_len <= 0)
+               return (int)v_len;
+
+       if (v_len <= *pos) {
+               count = -EINVAL;
+               goto free_buf;
+       }
+
+       if (v_len - *pos < count)
+               count = v_len - *pos;
+
+       memcpy(buf, &stream_buf[*pos], count);
+
+free_buf:
+       kvfree(stream_buf);
+       return count;
+}
+
+/**
+ * check_lock_range() - vfs helper for smb byte range file locking
+ * @filp:      the file to apply the lock to
+ * @start:     lock start byte offset
+ * @end:       lock end byte offset
+ * @type:      byte range type read/write
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int check_lock_range(struct file *filp, loff_t start, loff_t end,
+                           unsigned char type)
+{
+       struct file_lock *flock;
+       struct file_lock_context *ctx = file_inode(filp)->i_flctx;
+       int error = 0;
+
+       if (!ctx || list_empty_careful(&ctx->flc_posix))
+               return 0;
+
+       spin_lock(&ctx->flc_lock);
+       list_for_each_entry(flock, &ctx->flc_posix, fl_list) {
+               /* check conflict locks */
+               if (flock->fl_end >= start && end >= flock->fl_start) {
+                       if (flock->fl_type == F_RDLCK) {
+                               if (type == WRITE) {
+                                       pr_err("not allow write by shared lock\n");
+                                       error = 1;
+                                       goto out;
+                               }
+                       } else if (flock->fl_type == F_WRLCK) {
+                               /* check owner in lock */
+                               if (flock->fl_file != filp) {
+                                       error = 1;
+                                       pr_err("not allow rw access by exclusive lock from other opens\n");
+                                       goto out;
+                               }
+                       }
+               }
+       }
+out:
+       spin_unlock(&ctx->flc_lock);
+       return error;
+}
+
+/**
+ * ksmbd_vfs_read() - vfs helper for smb file read
+ * @work:      smb work
+ * @fid:       file id of open file
+ * @count:     read byte count
+ * @pos:       file pos
+ *
+ * Return:     number of read bytes on success, otherwise error
+ */
+int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
+                  loff_t *pos)
+{
+       struct file *filp = fp->filp;
+       ssize_t nbytes = 0;
+       char *rbuf = work->aux_payload_buf;
+       struct inode *inode = file_inode(filp);
+
+       if (S_ISDIR(inode->i_mode))
+               return -EISDIR;
+
+       if (unlikely(count == 0))
+               return 0;
+
+       if (work->conn->connection_type) {
+               if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) {
+                       pr_err("no right to read(%pd)\n",
+                              fp->filp->f_path.dentry);
+                       return -EACCES;
+               }
+       }
+
+       if (ksmbd_stream_fd(fp))
+               return ksmbd_vfs_stream_read(fp, rbuf, pos, count);
+
+       if (!work->tcon->posix_extensions) {
+               int ret;
+
+               ret = check_lock_range(filp, *pos, *pos + count - 1, READ);
+               if (ret) {
+                       pr_err("unable to read due to lock\n");
+                       return -EAGAIN;
+               }
+       }
+
+       nbytes = kernel_read(filp, rbuf, count, pos);
+       if (nbytes < 0) {
+               pr_err("smb read failed for (%s), err = %zd\n",
+                      fp->filename, nbytes);
+               return nbytes;
+       }
+
+       filp->f_pos = *pos;
+       return nbytes;
+}
+
+static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+                                 size_t count)
+{
+       char *stream_buf = NULL, *wbuf;
+       struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+       size_t size, v_len;
+       int err = 0;
+
+       ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
+                   *pos, count);
+
+       size = *pos + count;
+       if (size > XATTR_SIZE_MAX) {
+               size = XATTR_SIZE_MAX;
+               count = (*pos + count) - XATTR_SIZE_MAX;
+       }
+
+       v_len = ksmbd_vfs_getcasexattr(user_ns,
+                                      fp->filp->f_path.dentry,
+                                      fp->stream.name,
+                                      fp->stream.size,
+                                      &stream_buf);
+       if ((int)v_len < 0) {
+               pr_err("not found stream in xattr : %zd\n", v_len);
+               err = (int)v_len;
+               goto out;
+       }
+
+       if (v_len < size) {
+               wbuf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+               if (!wbuf) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+
+               if (v_len > 0)
+                       memcpy(wbuf, stream_buf, v_len);
+               kvfree(stream_buf);
+               stream_buf = wbuf;
+       }
+
+       memcpy(&stream_buf[*pos], buf, count);
+
+       err = ksmbd_vfs_setxattr(user_ns,
+                                fp->filp->f_path.dentry,
+                                fp->stream.name,
+                                (void *)stream_buf,
+                                size,
+                                0);
+       if (err < 0)
+               goto out;
+
+       fp->filp->f_pos = *pos;
+       err = 0;
+out:
+       kvfree(stream_buf);
+       return err;
+}
+
+/**
+ * ksmbd_vfs_write() - vfs helper for smb file write
+ * @work:      work
+ * @fid:       file id of open file
+ * @buf:       buf containing data for writing
+ * @count:     read byte count
+ * @pos:       file pos
+ * @sync:      fsync after write
+ * @written:   number of bytes written
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+                   char *buf, size_t count, loff_t *pos, bool sync,
+                   ssize_t *written)
+{
+       struct ksmbd_session *sess = work->sess;
+       struct file *filp;
+       loff_t  offset = *pos;
+       int err = 0;
+
+       if (sess->conn->connection_type) {
+               if (!(fp->daccess & FILE_WRITE_DATA_LE)) {
+                       pr_err("no right to write(%pd)\n",
+                              fp->filp->f_path.dentry);
+                       err = -EACCES;
+                       goto out;
+               }
+       }
+
+       filp = fp->filp;
+
+       if (ksmbd_stream_fd(fp)) {
+               err = ksmbd_vfs_stream_write(fp, buf, pos, count);
+               if (!err)
+                       *written = count;
+               goto out;
+       }
+
+       if (!work->tcon->posix_extensions) {
+               err = check_lock_range(filp, *pos, *pos + count - 1, WRITE);
+               if (err) {
+                       pr_err("unable to write due to lock\n");
+                       err = -EAGAIN;
+                       goto out;
+               }
+       }
+
+       /* Do we need to break any of a levelII oplock? */
+       smb_break_all_levII_oplock(work, fp, 1);
+
+       err = kernel_write(filp, buf, count, pos);
+       if (err < 0) {
+               ksmbd_debug(VFS, "smb write failed, err = %d\n", err);
+               goto out;
+       }
+
+       filp->f_pos = *pos;
+       *written = err;
+       err = 0;
+       if (sync) {
+               err = vfs_fsync_range(filp, offset, offset + *written, 0);
+               if (err < 0)
+                       pr_err("fsync failed for filename = %pd, err = %d\n",
+                              fp->filp->f_path.dentry, err);
+       }
+
+out:
+       return err;
+}
+
+/**
+ * ksmbd_vfs_getattr() - vfs helper for smb getattr
+ * @work:      work
+ * @fid:       file id of open file
+ * @attrs:     inode attributes
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_getattr(struct path *path, struct kstat *stat)
+{
+       int err;
+
+       err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT);
+       if (err)
+               pr_err("getattr failed, err %d\n", err);
+       return err;
+}
+
+/**
+ * ksmbd_vfs_fsync() - vfs helper for smb fsync
+ * @work:      work
+ * @fid:       file id of open file
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
+{
+       struct ksmbd_file *fp;
+       int err;
+
+       fp = ksmbd_lookup_fd_slow(work, fid, p_id);
+       if (!fp) {
+               pr_err("failed to get filp for fid %llu\n", fid);
+               return -ENOENT;
+       }
+       err = vfs_fsync(fp->filp, 0);
+       if (err < 0)
+               pr_err("smb fsync failed, err = %d\n", err);
+       ksmbd_fd_put(work, fp);
+       return err;
+}
+
+/**
+ * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
+ * @name:      absolute directory or file name
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
+{
+       struct path path;
+       struct dentry *parent;
+       int err;
+       int flags = 0;
+
+       if (ksmbd_override_fsids(work))
+               return -ENOMEM;
+
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
+               flags = LOOKUP_FOLLOW;
+
+       err = kern_path(name, flags, &path);
+       if (err) {
+               ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
+               ksmbd_revert_fsids(work);
+               return err;
+       }
+
+       parent = dget_parent(path.dentry);
+       err = ksmbd_vfs_lock_parent(parent, path.dentry);
+       if (err) {
+               dput(parent);
+               path_put(&path);
+               ksmbd_revert_fsids(work);
+               return err;
+       }
+
+       if (!d_inode(path.dentry)->i_nlink) {
+               err = -ENOENT;
+               goto out_err;
+       }
+
+       if (S_ISDIR(d_inode(path.dentry)->i_mode)) {
+               err = vfs_rmdir(mnt_user_ns(path.mnt), d_inode(parent),
+                               path.dentry);
+               if (err && err != -ENOTEMPTY)
+                       ksmbd_debug(VFS, "%s: rmdir failed, err %d\n", name,
+                                   err);
+       } else {
+               err = vfs_unlink(mnt_user_ns(path.mnt), d_inode(parent),
+                                path.dentry, NULL);
+               if (err)
+                       ksmbd_debug(VFS, "%s: unlink failed, err %d\n", name,
+                                   err);
+       }
+
+out_err:
+       inode_unlock(d_inode(parent));
+       dput(parent);
+       path_put(&path);
+       ksmbd_revert_fsids(work);
+       return err;
+}
+
+/**
+ * ksmbd_vfs_link() - vfs helper for creating smb hardlink
+ * @oldname:   source file name
+ * @newname:   hardlink name
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
+                  const char *newname)
+{
+       struct path oldpath, newpath;
+       struct dentry *dentry;
+       int err;
+       int flags = 0;
+
+       if (ksmbd_override_fsids(work))
+               return -ENOMEM;
+
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
+               flags = LOOKUP_FOLLOW;
+
+       err = kern_path(oldname, flags, &oldpath);
+       if (err) {
+               pr_err("cannot get linux path for %s, err = %d\n",
+                      oldname, err);
+               goto out1;
+       }
+
+       dentry = kern_path_create(AT_FDCWD, newname, &newpath,
+                                 flags | LOOKUP_REVAL);
+       if (IS_ERR(dentry)) {
+               err = PTR_ERR(dentry);
+               pr_err("path create err for %s, err %d\n", newname, err);
+               goto out2;
+       }
+
+       err = -EXDEV;
+       if (oldpath.mnt != newpath.mnt) {
+               pr_err("vfs_link failed err %d\n", err);
+               goto out3;
+       }
+
+       err = vfs_link(oldpath.dentry, mnt_user_ns(newpath.mnt),
+                      d_inode(newpath.dentry),
+                      dentry, NULL);
+       if (err)
+               ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
+
+out3:
+       done_path_create(&newpath, dentry);
+out2:
+       path_put(&oldpath);
+out1:
+       ksmbd_revert_fsids(work);
+       return err;
+}
+
+static int ksmbd_validate_entry_in_use(struct dentry *src_dent)
+{
+       struct dentry *dst_dent;
+
+       spin_lock(&src_dent->d_lock);
+       list_for_each_entry(dst_dent, &src_dent->d_subdirs, d_child) {
+               struct ksmbd_file *child_fp;
+
+               if (d_really_is_negative(dst_dent))
+                       continue;
+
+               child_fp = ksmbd_lookup_fd_inode(d_inode(dst_dent));
+               if (child_fp) {
+                       spin_unlock(&src_dent->d_lock);
+                       ksmbd_debug(VFS, "Forbid rename, sub file/dir is in use\n");
+                       return -EACCES;
+               }
+       }
+       spin_unlock(&src_dent->d_lock);
+
+       return 0;
+}
+
+static int __ksmbd_vfs_rename(struct ksmbd_work *work,
+                             struct user_namespace *src_user_ns,
+                             struct dentry *src_dent_parent,
+                             struct dentry *src_dent,
+                             struct user_namespace *dst_user_ns,
+                             struct dentry *dst_dent_parent,
+                             struct dentry *trap_dent,
+                             char *dst_name)
+{
+       struct dentry *dst_dent;
+       int err;
+
+       if (!work->tcon->posix_extensions) {
+               err = ksmbd_validate_entry_in_use(src_dent);
+               if (err)
+                       return err;
+       }
+
+       if (d_really_is_negative(src_dent_parent))
+               return -ENOENT;
+       if (d_really_is_negative(dst_dent_parent))
+               return -ENOENT;
+       if (d_really_is_negative(src_dent))
+               return -ENOENT;
+       if (src_dent == trap_dent)
+               return -EINVAL;
+
+       if (ksmbd_override_fsids(work))
+               return -ENOMEM;
+
+       dst_dent = lookup_one_len(dst_name, dst_dent_parent, strlen(dst_name));
+       err = PTR_ERR(dst_dent);
+       if (IS_ERR(dst_dent)) {
+               pr_err("lookup failed %s [%d]\n", dst_name, err);
+               goto out;
+       }
+
+       err = -ENOTEMPTY;
+       if (dst_dent != trap_dent && !d_really_is_positive(dst_dent)) {
+               struct renamedata rd = {
+                       .old_mnt_userns = src_user_ns,
+                       .old_dir        = d_inode(src_dent_parent),
+                       .old_dentry     = src_dent,
+                       .new_mnt_userns = dst_user_ns,
+                       .new_dir        = d_inode(dst_dent_parent),
+                       .new_dentry     = dst_dent,
+               };
+               err = vfs_rename(&rd);
+       }
+       if (err)
+               pr_err("vfs_rename failed err %d\n", err);
+       if (dst_dent)
+               dput(dst_dent);
+out:
+       ksmbd_revert_fsids(work);
+       return err;
+}
+
+int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+                       char *newname)
+{
+       struct path dst_path;
+       struct dentry *src_dent_parent, *dst_dent_parent;
+       struct dentry *src_dent, *trap_dent, *src_child;
+       char *dst_name;
+       int err;
+       int flags;
+
+       dst_name = extract_last_component(newname);
+       if (!dst_name)
+               return -EINVAL;
+
+       src_dent_parent = dget_parent(fp->filp->f_path.dentry);
+       src_dent = fp->filp->f_path.dentry;
+
+       flags = LOOKUP_DIRECTORY;
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
+               flags |= LOOKUP_FOLLOW;
+
+       err = kern_path(newname, flags, &dst_path);
+       if (err) {
+               ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
+               goto out;
+       }
+       dst_dent_parent = dst_path.dentry;
+
+       trap_dent = lock_rename(src_dent_parent, dst_dent_parent);
+       dget(src_dent);
+       dget(dst_dent_parent);
+       src_child = lookup_one_len(src_dent->d_name.name, src_dent_parent,
+                                  src_dent->d_name.len);
+       if (IS_ERR(src_child)) {
+               err = PTR_ERR(src_child);
+               goto out_lock;
+       }
+
+       if (src_child != src_dent) {
+               err = -ESTALE;
+               dput(src_child);
+               goto out_lock;
+       }
+       dput(src_child);
+
+       err = __ksmbd_vfs_rename(work,
+                                file_mnt_user_ns(fp->filp),
+                                src_dent_parent,
+                                src_dent,
+                                mnt_user_ns(dst_path.mnt),
+                                dst_dent_parent,
+                                trap_dent,
+                                dst_name);
+out_lock:
+       dput(src_dent);
+       dput(dst_dent_parent);
+       unlock_rename(src_dent_parent, dst_dent_parent);
+       path_put(&dst_path);
+out:
+       dput(src_dent_parent);
+       return err;
+}
+
+/**
+ * ksmbd_vfs_truncate() - vfs helper for smb file truncate
+ * @work:      work
+ * @name:      old filename
+ * @fid:       file id of old file
+ * @size:      truncate to given size
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+                      struct ksmbd_file *fp, loff_t size)
+{
+       struct path path;
+       int err = 0;
+
+       if (name) {
+               err = kern_path(name, 0, &path);
+               if (err) {
+                       pr_err("cannot get linux path for %s, err %d\n",
+                              name, err);
+                       return err;
+               }
+               err = vfs_truncate(&path, size);
+               if (err)
+                       pr_err("truncate failed for %s err %d\n",
+                              name, err);
+               path_put(&path);
+       } else {
+               struct file *filp;
+
+               filp = fp->filp;
+
+               /* Do we need to break any of a levelII oplock? */
+               smb_break_all_levII_oplock(work, fp, 1);
+
+               if (!work->tcon->posix_extensions) {
+                       struct inode *inode = file_inode(filp);
+
+                       if (size < inode->i_size) {
+                               err = check_lock_range(filp, size,
+                                                      inode->i_size - 1, WRITE);
+                       } else {
+                               err = check_lock_range(filp, inode->i_size,
+                                                      size - 1, WRITE);
+                       }
+
+                       if (err) {
+                               pr_err("failed due to lock\n");
+                               return -EAGAIN;
+                       }
+               }
+
+               err = vfs_truncate(&filp->f_path, size);
+               if (err)
+                       pr_err("truncate failed for filename : %s err %d\n",
+                              fp->filename, err);
+       }
+
+       return err;
+}
+
+/**
+ * ksmbd_vfs_listxattr() - vfs helper for smb list extended attributes
+ * @dentry:    dentry of file for listing xattrs
+ * @list:      destination buffer
+ * @size:      destination buffer length
+ *
+ * Return:     xattr list length on success, otherwise error
+ */
+ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list)
+{
+       ssize_t size;
+       char *vlist = NULL;
+
+       size = vfs_listxattr(dentry, NULL, 0);
+       if (size <= 0)
+               return size;
+
+       vlist = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+       if (!vlist)
+               return -ENOMEM;
+
+       *list = vlist;
+       size = vfs_listxattr(dentry, vlist, size);
+       if (size < 0) {
+               ksmbd_debug(VFS, "listxattr failed\n");
+               kvfree(vlist);
+               *list = NULL;
+       }
+
+       return size;
+}
+
+static ssize_t ksmbd_vfs_xattr_len(struct user_namespace *user_ns,
+                                  struct dentry *dentry, char *xattr_name)
+{
+       return vfs_getxattr(user_ns, dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * ksmbd_vfs_getxattr() - vfs helper for smb get extended attributes value
+ * @user_ns:   user namespace
+ * @dentry:    dentry of file for getting xattrs
+ * @xattr_name:        name of xattr name to query
+ * @xattr_buf: destination buffer xattr value
+ *
+ * Return:     read xattr value length on success, otherwise error
+ */
+ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
+                          struct dentry *dentry,
+                          char *xattr_name, char **xattr_buf)
+{
+       ssize_t xattr_len;
+       char *buf;
+
+       *xattr_buf = NULL;
+       xattr_len = ksmbd_vfs_xattr_len(user_ns, dentry, xattr_name);
+       if (xattr_len < 0)
+               return xattr_len;
+
+       buf = kmalloc(xattr_len + 1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       xattr_len = vfs_getxattr(user_ns, dentry, xattr_name,
+                                (void *)buf, xattr_len);
+       if (xattr_len > 0)
+               *xattr_buf = buf;
+       else
+               kfree(buf);
+       return xattr_len;
+}
+
+/**
+ * ksmbd_vfs_setxattr() - vfs helper for smb set extended attributes value
+ * @user_ns:   user namespace
+ * @dentry:    dentry to set XATTR at
+ * @name:      xattr name for setxattr
+ * @value:     xattr value to set
+ * @size:      size of xattr value
+ * @flags:     destination buffer length
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
+                      struct dentry *dentry, const char *attr_name,
+                      const void *attr_value, size_t attr_size, int flags)
+{
+       int err;
+
+       err = vfs_setxattr(user_ns,
+                          dentry,
+                          attr_name,
+                          attr_value,
+                          attr_size,
+                          flags);
+       if (err)
+               ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
+       return err;
+}
+
+/**
+ * ksmbd_vfs_set_fadvise() - convert smb IO caching options to linux options
+ * @filp:      file pointer for IO
+ * @options:   smb IO options
+ */
+void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option)
+{
+       struct address_space *mapping;
+
+       mapping = filp->f_mapping;
+
+       if (!option || !mapping)
+               return;
+
+       if (option & FILE_WRITE_THROUGH_LE) {
+               filp->f_flags |= O_SYNC;
+       } else if (option & FILE_SEQUENTIAL_ONLY_LE) {
+               filp->f_ra.ra_pages = inode_to_bdi(mapping->host)->ra_pages * 2;
+               spin_lock(&filp->f_lock);
+               filp->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&filp->f_lock);
+       } else if (option & FILE_RANDOM_ACCESS_LE) {
+               spin_lock(&filp->f_lock);
+               filp->f_mode |= FMODE_RANDOM;
+               spin_unlock(&filp->f_lock);
+       }
+}
+
+int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
+                       loff_t off, loff_t len)
+{
+       smb_break_all_levII_oplock(work, fp, 1);
+       if (fp->f_ci->m_fattr & ATTR_SPARSE_FILE_LE)
+               return vfs_fallocate(fp->filp,
+                                    FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+                                    off, len);
+
+       return vfs_fallocate(fp->filp, FALLOC_FL_ZERO_RANGE, off, len);
+}
+
+int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+                        struct file_allocated_range_buffer *ranges,
+                        int in_count, int *out_count)
+{
+       struct file *f = fp->filp;
+       struct inode *inode = file_inode(fp->filp);
+       loff_t maxbytes = (u64)inode->i_sb->s_maxbytes, end;
+       loff_t extent_start, extent_end;
+       int ret = 0;
+
+       if (start > maxbytes)
+               return -EFBIG;
+
+       if (!in_count)
+               return 0;
+
+       /*
+        * Shrink request scope to what the fs can actually handle.
+        */
+       if (length > maxbytes || (maxbytes - length) < start)
+               length = maxbytes - start;
+
+       if (start + length > inode->i_size)
+               length = inode->i_size - start;
+
+       *out_count = 0;
+       end = start + length;
+       while (start < end && *out_count < in_count) {
+               extent_start = f->f_op->llseek(f, start, SEEK_DATA);
+               if (extent_start < 0) {
+                       if (extent_start != -ENXIO)
+                               ret = (int)extent_start;
+                       break;
+               }
+
+               if (extent_start >= end)
+                       break;
+
+               extent_end = f->f_op->llseek(f, extent_start, SEEK_HOLE);
+               if (extent_end < 0) {
+                       if (extent_end != -ENXIO)
+                               ret = (int)extent_end;
+                       break;
+               } else if (extent_start >= extent_end) {
+                       break;
+               }
+
+               ranges[*out_count].file_offset = cpu_to_le64(extent_start);
+               ranges[(*out_count)++].length =
+                       cpu_to_le64(min(extent_end, end) - extent_start);
+
+               start = extent_end;
+       }
+
+       return ret;
+}
+
+int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
+                          struct dentry *dentry, char *attr_name)
+{
+       return vfs_removexattr(user_ns, dentry, attr_name);
+}
+
+int ksmbd_vfs_unlink(struct user_namespace *user_ns,
+                    struct dentry *dir, struct dentry *dentry)
+{
+       int err = 0;
+
+       err = ksmbd_vfs_lock_parent(dir, dentry);
+       if (err)
+               return err;
+       dget(dentry);
+
+       if (S_ISDIR(d_inode(dentry)->i_mode))
+               err = vfs_rmdir(user_ns, d_inode(dir), dentry);
+       else
+               err = vfs_unlink(user_ns, d_inode(dir), dentry, NULL);
+
+       dput(dentry);
+       inode_unlock(d_inode(dir));
+       if (err)
+               ksmbd_debug(VFS, "failed to delete, err %d\n", err);
+
+       return err;
+}
+
+static int __dir_empty(struct dir_context *ctx, const char *name, int namlen,
+                      loff_t offset, u64 ino, unsigned int d_type)
+{
+       struct ksmbd_readdir_data *buf;
+
+       buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
+       buf->dirent_count++;
+
+       if (buf->dirent_count > 2)
+               return -ENOTEMPTY;
+       return 0;
+}
+
+/**
+ * ksmbd_vfs_empty_dir() - check for empty directory
+ * @fp:        ksmbd file pointer
+ *
+ * Return:     true if directory empty, otherwise false
+ */
+int ksmbd_vfs_empty_dir(struct ksmbd_file *fp)
+{
+       int err;
+       struct ksmbd_readdir_data readdir_data;
+
+       memset(&readdir_data, 0, sizeof(struct ksmbd_readdir_data));
+
+       set_ctx_actor(&readdir_data.ctx, __dir_empty);
+       readdir_data.dirent_count = 0;
+
+       err = iterate_dir(fp->filp, &readdir_data.ctx);
+       if (readdir_data.dirent_count > 2)
+               err = -ENOTEMPTY;
+       else
+               err = 0;
+       return err;
+}
+
+static int __caseless_lookup(struct dir_context *ctx, const char *name,
+                            int namlen, loff_t offset, u64 ino,
+                            unsigned int d_type)
+{
+       struct ksmbd_readdir_data *buf;
+
+       buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
+
+       if (buf->used != namlen)
+               return 0;
+       if (!strncasecmp((char *)buf->private, name, namlen)) {
+               memcpy((char *)buf->private, name, namlen);
+               buf->dirent_count = 1;
+               return -EEXIST;
+       }
+       return 0;
+}
+
+/**
+ * ksmbd_vfs_lookup_in_dir() - lookup a file in a directory
+ * @dir:       path info
+ * @name:      filename to lookup
+ * @namelen:   filename length
+ *
+ * Return:     0 on success, otherwise error
+ */
+static int ksmbd_vfs_lookup_in_dir(struct path *dir, char *name, size_t namelen)
+{
+       int ret;
+       struct file *dfilp;
+       int flags = O_RDONLY | O_LARGEFILE;
+       struct ksmbd_readdir_data readdir_data = {
+               .ctx.actor      = __caseless_lookup,
+               .private        = name,
+               .used           = namelen,
+               .dirent_count   = 0,
+       };
+
+       dfilp = dentry_open(dir, flags, current_cred());
+       if (IS_ERR(dfilp))
+               return PTR_ERR(dfilp);
+
+       ret = iterate_dir(dfilp, &readdir_data.ctx);
+       if (readdir_data.dirent_count > 0)
+               ret = 0;
+       fput(dfilp);
+       return ret;
+}
+
+/**
+ * ksmbd_vfs_kern_path() - lookup a file and get path info
+ * @name:      name of file for lookup
+ * @flags:     lookup flags
+ * @path:      if lookup succeed, return path info
+ * @caseless:  caseless filename lookup
+ *
+ * Return:     0 on success, otherwise error
+ */
+int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
+                       bool caseless)
+{
+       int err;
+
+       if (name[0] != '/')
+               return -EINVAL;
+
+       err = kern_path(name, flags, path);
+       if (!err)
+               return 0;
+
+       if (caseless) {
+               char *filepath;
+               struct path parent;
+               size_t path_len, remain_len;
+
+               filepath = kstrdup(name, GFP_KERNEL);
+               if (!filepath)
+                       return -ENOMEM;
+
+               path_len = strlen(filepath);
+               remain_len = path_len - 1;
+
+               err = kern_path("/", flags, &parent);
+               if (err)
+                       goto out;
+
+               while (d_can_lookup(parent.dentry)) {
+                       char *filename = filepath + path_len - remain_len;
+                       char *next = strchrnul(filename, '/');
+                       size_t filename_len = next - filename;
+                       bool is_last = !next[0];
+
+                       if (filename_len == 0)
+                               break;
+
+                       err = ksmbd_vfs_lookup_in_dir(&parent, filename,
+                                                     filename_len);
+                       if (err) {
+                               path_put(&parent);
+                               goto out;
+                       }
+
+                       path_put(&parent);
+                       next[0] = '\0';
+
+                       err = kern_path(filepath, flags, &parent);
+                       if (err)
+                               goto out;
+
+                       if (is_last) {
+                               path->mnt = parent.mnt;
+                               path->dentry = parent.dentry;
+                               goto out;
+                       }
+
+                       next[0] = '/';
+                       remain_len -= filename_len + 1;
+               }
+
+               path_put(&parent);
+               err = -EINVAL;
+out:
+               kfree(filepath);
+       }
+       return err;
+}
+
+int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+                               struct dentry *dentry)
+{
+       char *name, *xattr_list = NULL;
+       ssize_t xattr_list_len;
+       int err = 0;
+
+       xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+       if (xattr_list_len < 0) {
+               goto out;
+       } else if (!xattr_list_len) {
+               ksmbd_debug(SMB, "empty xattr in the file\n");
+               goto out;
+       }
+
+       for (name = xattr_list; name - xattr_list < xattr_list_len;
+            name += strlen(name) + 1) {
+               ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+
+               if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
+                            sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1) ||
+                   !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
+                            sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1)) {
+                       err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
+                       if (err)
+                               ksmbd_debug(SMB,
+                                           "remove acl xattr failed : %s\n", name);
+               }
+       }
+out:
+       kvfree(xattr_list);
+       return err;
+}
+
+int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
+                              struct dentry *dentry)
+{
+       char *name, *xattr_list = NULL;
+       ssize_t xattr_list_len;
+       int err = 0;
+
+       xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+       if (xattr_list_len < 0) {
+               goto out;
+       } else if (!xattr_list_len) {
+               ksmbd_debug(SMB, "empty xattr in the file\n");
+               goto out;
+       }
+
+       for (name = xattr_list; name - xattr_list < xattr_list_len;
+                       name += strlen(name) + 1) {
+               ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+
+               if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
+                       err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
+                       if (err)
+                               ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+               }
+       }
+out:
+       kvfree(xattr_list);
+       return err;
+}
+
+static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct user_namespace *user_ns,
+                                                           struct inode *inode,
+                                                           int acl_type)
+{
+       struct xattr_smb_acl *smb_acl = NULL;
+       struct posix_acl *posix_acls;
+       struct posix_acl_entry *pa_entry;
+       struct xattr_acl_entry *xa_entry;
+       int i;
+
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+               return NULL;
+
+       posix_acls = get_acl(inode, acl_type);
+       if (!posix_acls)
+               return NULL;
+
+       smb_acl = kzalloc(sizeof(struct xattr_smb_acl) +
+                         sizeof(struct xattr_acl_entry) * posix_acls->a_count,
+                         GFP_KERNEL);
+       if (!smb_acl)
+               goto out;
+
+       smb_acl->count = posix_acls->a_count;
+       pa_entry = posix_acls->a_entries;
+       xa_entry = smb_acl->entries;
+       for (i = 0; i < posix_acls->a_count; i++, pa_entry++, xa_entry++) {
+               switch (pa_entry->e_tag) {
+               case ACL_USER:
+                       xa_entry->type = SMB_ACL_USER;
+                       xa_entry->uid = from_kuid(user_ns, pa_entry->e_uid);
+                       break;
+               case ACL_USER_OBJ:
+                       xa_entry->type = SMB_ACL_USER_OBJ;
+                       break;
+               case ACL_GROUP:
+                       xa_entry->type = SMB_ACL_GROUP;
+                       xa_entry->gid = from_kgid(user_ns, pa_entry->e_gid);
+                       break;
+               case ACL_GROUP_OBJ:
+                       xa_entry->type = SMB_ACL_GROUP_OBJ;
+                       break;
+               case ACL_OTHER:
+                       xa_entry->type = SMB_ACL_OTHER;
+                       break;
+               case ACL_MASK:
+                       xa_entry->type = SMB_ACL_MASK;
+                       break;
+               default:
+                       pr_err("unknown type : 0x%x\n", pa_entry->e_tag);
+                       goto out;
+               }
+
+               if (pa_entry->e_perm & ACL_READ)
+                       xa_entry->perm |= SMB_ACL_READ;
+               if (pa_entry->e_perm & ACL_WRITE)
+                       xa_entry->perm |= SMB_ACL_WRITE;
+               if (pa_entry->e_perm & ACL_EXECUTE)
+                       xa_entry->perm |= SMB_ACL_EXECUTE;
+       }
+out:
+       posix_acl_release(posix_acls);
+       return smb_acl;
+}
+
+int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+                          struct user_namespace *user_ns,
+                          struct dentry *dentry,
+                          struct smb_ntsd *pntsd, int len)
+{
+       int rc;
+       struct ndr sd_ndr = {0}, acl_ndr = {0};
+       struct xattr_ntacl acl = {0};
+       struct xattr_smb_acl *smb_acl, *def_smb_acl = NULL;
+       struct inode *inode = d_inode(dentry);
+
+       acl.version = 4;
+       acl.hash_type = XATTR_SD_HASH_TYPE_SHA256;
+       acl.current_time = ksmbd_UnixTimeToNT(current_time(inode));
+
+       memcpy(acl.desc, "posix_acl", 9);
+       acl.desc_len = 10;
+
+       pntsd->osidoffset =
+               cpu_to_le32(le32_to_cpu(pntsd->osidoffset) + NDR_NTSD_OFFSETOF);
+       pntsd->gsidoffset =
+               cpu_to_le32(le32_to_cpu(pntsd->gsidoffset) + NDR_NTSD_OFFSETOF);
+       pntsd->dacloffset =
+               cpu_to_le32(le32_to_cpu(pntsd->dacloffset) + NDR_NTSD_OFFSETOF);
+
+       acl.sd_buf = (char *)pntsd;
+       acl.sd_size = len;
+
+       rc = ksmbd_gen_sd_hash(conn, acl.sd_buf, acl.sd_size, acl.hash);
+       if (rc) {
+               pr_err("failed to generate hash for ndr acl\n");
+               return rc;
+       }
+
+       smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+                                                ACL_TYPE_ACCESS);
+       if (S_ISDIR(inode->i_mode))
+               def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+                                                            ACL_TYPE_DEFAULT);
+
+       rc = ndr_encode_posix_acl(&acl_ndr, user_ns, inode,
+                                 smb_acl, def_smb_acl);
+       if (rc) {
+               pr_err("failed to encode ndr to posix acl\n");
+               goto out;
+       }
+
+       rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset,
+                              acl.posix_acl_hash);
+       if (rc) {
+               pr_err("failed to generate hash for ndr acl\n");
+               goto out;
+       }
+
+       rc = ndr_encode_v4_ntacl(&sd_ndr, &acl);
+       if (rc) {
+               pr_err("failed to encode ndr to posix acl\n");
+               goto out;
+       }
+
+       rc = ksmbd_vfs_setxattr(user_ns, dentry,
+                               XATTR_NAME_SD, sd_ndr.data,
+                               sd_ndr.offset, 0);
+       if (rc < 0)
+               pr_err("Failed to store XATTR ntacl :%d\n", rc);
+
+       kfree(sd_ndr.data);
+out:
+       kfree(acl_ndr.data);
+       kfree(smb_acl);
+       kfree(def_smb_acl);
+       return rc;
+}
+
+int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+                          struct user_namespace *user_ns,
+                          struct dentry *dentry,
+                          struct smb_ntsd **pntsd)
+{
+       int rc;
+       struct ndr n;
+       struct inode *inode = d_inode(dentry);
+       struct ndr acl_ndr = {0};
+       struct xattr_ntacl acl;
+       struct xattr_smb_acl *smb_acl = NULL, *def_smb_acl = NULL;
+       __u8 cmp_hash[XATTR_SD_HASH_SIZE] = {0};
+
+       rc = ksmbd_vfs_getxattr(user_ns, dentry, XATTR_NAME_SD, &n.data);
+       if (rc <= 0)
+               return rc;
+
+       n.length = rc;
+       rc = ndr_decode_v4_ntacl(&n, &acl);
+       if (rc)
+               goto free_n_data;
+
+       smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+                                                ACL_TYPE_ACCESS);
+       if (S_ISDIR(inode->i_mode))
+               def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+                                                            ACL_TYPE_DEFAULT);
+
+       rc = ndr_encode_posix_acl(&acl_ndr, user_ns, inode, smb_acl,
+                                 def_smb_acl);
+       if (rc) {
+               pr_err("failed to encode ndr to posix acl\n");
+               goto out_free;
+       }
+
+       rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset, cmp_hash);
+       if (rc) {
+               pr_err("failed to generate hash for ndr acl\n");
+               goto out_free;
+       }
+
+       if (memcmp(cmp_hash, acl.posix_acl_hash, XATTR_SD_HASH_SIZE)) {
+               pr_err("hash value diff\n");
+               rc = -EINVAL;
+               goto out_free;
+       }
+
+       *pntsd = acl.sd_buf;
+       (*pntsd)->osidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->osidoffset) -
+                                          NDR_NTSD_OFFSETOF);
+       (*pntsd)->gsidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->gsidoffset) -
+                                          NDR_NTSD_OFFSETOF);
+       (*pntsd)->dacloffset = cpu_to_le32(le32_to_cpu((*pntsd)->dacloffset) -
+                                          NDR_NTSD_OFFSETOF);
+
+       rc = acl.sd_size;
+out_free:
+       kfree(acl_ndr.data);
+       kfree(smb_acl);
+       kfree(def_smb_acl);
+       if (rc < 0) {
+               kfree(acl.sd_buf);
+               *pntsd = NULL;
+       }
+
+free_n_data:
+       kfree(n.data);
+       return rc;
+}
+
+int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
+                                  struct dentry *dentry,
+                                  struct xattr_dos_attrib *da)
+{
+       struct ndr n;
+       int err;
+
+       err = ndr_encode_dos_attr(&n, da);
+       if (err)
+               return err;
+
+       err = ksmbd_vfs_setxattr(user_ns, dentry, XATTR_NAME_DOS_ATTRIBUTE,
+                                (void *)n.data, n.offset, 0);
+       if (err)
+               ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
+       kfree(n.data);
+
+       return err;
+}
+
+int ksmbd_vfs_get_dos_attrib_xattr(struct user_namespace *user_ns,
+                                  struct dentry *dentry,
+                                  struct xattr_dos_attrib *da)
+{
+       struct ndr n;
+       int err;
+
+       err = ksmbd_vfs_getxattr(user_ns, dentry, XATTR_NAME_DOS_ATTRIBUTE,
+                                (char **)&n.data);
+       if (err > 0) {
+               n.length = err;
+               if (ndr_decode_dos_attr(&n, da))
+                       err = -EINVAL;
+               kfree(n.data);
+       } else {
+               ksmbd_debug(SMB, "failed to load dos attribute in xattr\n");
+       }
+
+       return err;
+}
+
+/**
+ * ksmbd_vfs_init_kstat() - convert unix stat information to smb stat format
+ * @p:          destination buffer
+ * @ksmbd_kstat:      ksmbd kstat wrapper
+ */
+void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat)
+{
+       struct file_directory_info *info = (struct file_directory_info *)(*p);
+       struct kstat *kstat = ksmbd_kstat->kstat;
+       u64 time;
+
+       info->FileIndex = 0;
+       info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time);
+       time = ksmbd_UnixTimeToNT(kstat->atime);
+       info->LastAccessTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(kstat->mtime);
+       info->LastWriteTime = cpu_to_le64(time);
+       time = ksmbd_UnixTimeToNT(kstat->ctime);
+       info->ChangeTime = cpu_to_le64(time);
+
+       if (ksmbd_kstat->file_attributes & ATTR_DIRECTORY_LE) {
+               info->EndOfFile = 0;
+               info->AllocationSize = 0;
+       } else {
+               info->EndOfFile = cpu_to_le64(kstat->size);
+               info->AllocationSize = cpu_to_le64(kstat->blocks << 9);
+       }
+       info->ExtFileAttributes = ksmbd_kstat->file_attributes;
+
+       return info;
+}
+
+int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
+                               struct user_namespace *user_ns,
+                               struct dentry *dentry,
+                               struct ksmbd_kstat *ksmbd_kstat)
+{
+       u64 time;
+       int rc;
+
+       generic_fillattr(user_ns, d_inode(dentry), ksmbd_kstat->kstat);
+
+       time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
+       ksmbd_kstat->create_time = time;
+
+       /*
+        * set default value for the case that store dos attributes is not yes
+        * or that acl is disable in server's filesystem and the config is yes.
+        */
+       if (S_ISDIR(ksmbd_kstat->kstat->mode))
+               ksmbd_kstat->file_attributes = ATTR_DIRECTORY_LE;
+       else
+               ksmbd_kstat->file_attributes = ATTR_ARCHIVE_LE;
+
+       if (test_share_config_flag(work->tcon->share_conf,
+                                  KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+               struct xattr_dos_attrib da;
+
+               rc = ksmbd_vfs_get_dos_attrib_xattr(user_ns, dentry, &da);
+               if (rc > 0) {
+                       ksmbd_kstat->file_attributes = cpu_to_le32(da.attr);
+                       ksmbd_kstat->create_time = da.create_time;
+               } else {
+                       ksmbd_debug(VFS, "fail to load dos attribute.\n");
+               }
+       }
+
+       return 0;
+}
+
+ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
+                               struct dentry *dentry, char *attr_name,
+                               int attr_name_len)
+{
+       char *name, *xattr_list = NULL;
+       ssize_t value_len = -ENOENT, xattr_list_len;
+
+       xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+       if (xattr_list_len <= 0)
+               goto out;
+
+       for (name = xattr_list; name - xattr_list < xattr_list_len;
+                       name += strlen(name) + 1) {
+               ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name));
+               if (strncasecmp(attr_name, name, attr_name_len))
+                       continue;
+
+               value_len = ksmbd_vfs_xattr_len(user_ns, dentry, name);
+               break;
+       }
+
+out:
+       kvfree(xattr_list);
+       return value_len;
+}
+
+int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+                               size_t *xattr_stream_name_size, int s_type)
+{
+       char *type, *buf;
+
+       if (s_type == DIR_STREAM)
+               type = ":$INDEX_ALLOCATION";
+       else
+               type = ":$DATA";
+
+       buf = kasprintf(GFP_KERNEL, "%s%s%s",
+                       XATTR_NAME_STREAM, stream_name, type);
+       if (!buf)
+               return -ENOMEM;
+
+       *xattr_stream_name = buf;
+       *xattr_stream_name_size = strlen(buf) + 1;
+
+       return 0;
+}
+
+int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
+                              struct ksmbd_file *src_fp,
+                              struct ksmbd_file *dst_fp,
+                              struct srv_copychunk *chunks,
+                              unsigned int chunk_count,
+                              unsigned int *chunk_count_written,
+                              unsigned int *chunk_size_written,
+                              loff_t *total_size_written)
+{
+       unsigned int i;
+       loff_t src_off, dst_off, src_file_size;
+       size_t len;
+       int ret;
+
+       *chunk_count_written = 0;
+       *chunk_size_written = 0;
+       *total_size_written = 0;
+
+       if (!(src_fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) {
+               pr_err("no right to read(%pd)\n", src_fp->filp->f_path.dentry);
+               return -EACCES;
+       }
+       if (!(dst_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
+               pr_err("no right to write(%pd)\n", dst_fp->filp->f_path.dentry);
+               return -EACCES;
+       }
+
+       if (ksmbd_stream_fd(src_fp) || ksmbd_stream_fd(dst_fp))
+               return -EBADF;
+
+       smb_break_all_levII_oplock(work, dst_fp, 1);
+
+       if (!work->tcon->posix_extensions) {
+               for (i = 0; i < chunk_count; i++) {
+                       src_off = le64_to_cpu(chunks[i].SourceOffset);
+                       dst_off = le64_to_cpu(chunks[i].TargetOffset);
+                       len = le32_to_cpu(chunks[i].Length);
+
+                       if (check_lock_range(src_fp->filp, src_off,
+                                            src_off + len - 1, READ))
+                               return -EAGAIN;
+                       if (check_lock_range(dst_fp->filp, dst_off,
+                                            dst_off + len - 1, WRITE))
+                               return -EAGAIN;
+               }
+       }
+
+       src_file_size = i_size_read(file_inode(src_fp->filp));
+
+       for (i = 0; i < chunk_count; i++) {
+               src_off = le64_to_cpu(chunks[i].SourceOffset);
+               dst_off = le64_to_cpu(chunks[i].TargetOffset);
+               len = le32_to_cpu(chunks[i].Length);
+
+               if (src_off + len > src_file_size)
+                       return -E2BIG;
+
+               ret = vfs_copy_file_range(src_fp->filp, src_off,
+                                         dst_fp->filp, dst_off, len, 0);
+               if (ret < 0)
+                       return ret;
+
+               *chunk_count_written += 1;
+               *total_size_written += ret;
+       }
+       return 0;
+}
+
+void ksmbd_vfs_posix_lock_wait(struct file_lock *flock)
+{
+       wait_event(flock->fl_wait, !flock->fl_blocker);
+}
+
+int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout)
+{
+       return wait_event_interruptible_timeout(flock->fl_wait,
+                                               !flock->fl_blocker,
+                                               timeout);
+}
+
+void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock)
+{
+       locks_delete_block(flock);
+}
+
+int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+                                struct inode *inode)
+{
+       struct posix_acl_state acl_state;
+       struct posix_acl *acls;
+       int rc;
+
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+               return -EOPNOTSUPP;
+
+       ksmbd_debug(SMB, "Set posix acls\n");
+       rc = init_acl_state(&acl_state, 1);
+       if (rc)
+               return rc;
+
+       /* Set default owner group */
+       acl_state.owner.allow = (inode->i_mode & 0700) >> 6;
+       acl_state.group.allow = (inode->i_mode & 0070) >> 3;
+       acl_state.other.allow = inode->i_mode & 0007;
+       acl_state.users->aces[acl_state.users->n].uid = inode->i_uid;
+       acl_state.users->aces[acl_state.users->n++].perms.allow =
+               acl_state.owner.allow;
+       acl_state.groups->aces[acl_state.groups->n].gid = inode->i_gid;
+       acl_state.groups->aces[acl_state.groups->n++].perms.allow =
+               acl_state.group.allow;
+       acl_state.mask.allow = 0x07;
+
+       acls = posix_acl_alloc(6, GFP_KERNEL);
+       if (!acls) {
+               free_acl_state(&acl_state);
+               return -ENOMEM;
+       }
+       posix_state_to_acl(&acl_state, acls->a_entries);
+       rc = set_posix_acl(user_ns, inode, ACL_TYPE_ACCESS, acls);
+       if (rc < 0)
+               ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+                           rc);
+       else if (S_ISDIR(inode->i_mode)) {
+               posix_state_to_acl(&acl_state, acls->a_entries);
+               rc = set_posix_acl(user_ns, inode, ACL_TYPE_DEFAULT,
+                                  acls);
+               if (rc < 0)
+                       ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+                                   rc);
+       }
+       free_acl_state(&acl_state);
+       posix_acl_release(acls);
+       return rc;
+}
+
+int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
+                               struct inode *inode, struct inode *parent_inode)
+{
+       struct posix_acl *acls;
+       struct posix_acl_entry *pace;
+       int rc, i;
+
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+               return -EOPNOTSUPP;
+
+       acls = get_acl(parent_inode, ACL_TYPE_DEFAULT);
+       if (!acls)
+               return -ENOENT;
+       pace = acls->a_entries;
+
+       for (i = 0; i < acls->a_count; i++, pace++) {
+               if (pace->e_tag == ACL_MASK) {
+                       pace->e_perm = 0x07;
+                       break;
+               }
+       }
+
+       rc = set_posix_acl(user_ns, inode, ACL_TYPE_ACCESS, acls);
+       if (rc < 0)
+               ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+                           rc);
+       if (S_ISDIR(inode->i_mode)) {
+               rc = set_posix_acl(user_ns, inode, ACL_TYPE_DEFAULT,
+                                  acls);
+               if (rc < 0)
+                       ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+                                   rc);
+       }
+       posix_acl_release(acls);
+       return rc;
+}
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
new file mode 100644 (file)
index 0000000..cb0cba0
--- /dev/null
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __KSMBD_VFS_H__
+#define __KSMBD_VFS_H__
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <uapi/linux/xattr.h>
+#include <linux/posix_acl.h>
+
+#include "smbacl.h"
+#include "xattr.h"
+
+/*
+ * Enumeration for stream type.
+ */
+enum {
+       DATA_STREAM     = 1,    /* type $DATA */
+       DIR_STREAM              /* type $INDEX_ALLOCATION */
+};
+
+/* CreateOptions */
+/* Flag is set, it must not be a file , valid for directory only */
+#define FILE_DIRECTORY_FILE_LE                 cpu_to_le32(0x00000001)
+#define FILE_WRITE_THROUGH_LE                  cpu_to_le32(0x00000002)
+#define FILE_SEQUENTIAL_ONLY_LE                        cpu_to_le32(0x00000004)
+
+/* Should not buffer on server*/
+#define FILE_NO_INTERMEDIATE_BUFFERING_LE      cpu_to_le32(0x00000008)
+/* MBZ */
+#define FILE_SYNCHRONOUS_IO_ALERT_LE           cpu_to_le32(0x00000010)
+/* MBZ */
+#define FILE_SYNCHRONOUS_IO_NONALERT_LE                cpu_to_le32(0x00000020)
+
+/* Flaf must not be set for directory */
+#define FILE_NON_DIRECTORY_FILE_LE             cpu_to_le32(0x00000040)
+
+/* Should be zero */
+#define CREATE_TREE_CONNECTION                 cpu_to_le32(0x00000080)
+#define FILE_COMPLETE_IF_OPLOCKED_LE           cpu_to_le32(0x00000100)
+#define FILE_NO_EA_KNOWLEDGE_LE                        cpu_to_le32(0x00000200)
+#define FILE_OPEN_REMOTE_INSTANCE              cpu_to_le32(0x00000400)
+
+/**
+ * Doc says this is obsolete "open for recovery" flag should be zero
+ * in any case.
+ */
+#define CREATE_OPEN_FOR_RECOVERY               cpu_to_le32(0x00000400)
+#define FILE_RANDOM_ACCESS_LE                  cpu_to_le32(0x00000800)
+#define FILE_DELETE_ON_CLOSE_LE                        cpu_to_le32(0x00001000)
+#define FILE_OPEN_BY_FILE_ID_LE                        cpu_to_le32(0x00002000)
+#define FILE_OPEN_FOR_BACKUP_INTENT_LE         cpu_to_le32(0x00004000)
+#define FILE_NO_COMPRESSION_LE                 cpu_to_le32(0x00008000)
+
+/* Should be zero*/
+#define FILE_OPEN_REQUIRING_OPLOCK             cpu_to_le32(0x00010000)
+#define FILE_DISALLOW_EXCLUSIVE                        cpu_to_le32(0x00020000)
+#define FILE_RESERVE_OPFILTER_LE               cpu_to_le32(0x00100000)
+#define FILE_OPEN_REPARSE_POINT_LE             cpu_to_le32(0x00200000)
+#define FILE_OPEN_NO_RECALL_LE                 cpu_to_le32(0x00400000)
+
+/* Should be zero */
+#define FILE_OPEN_FOR_FREE_SPACE_QUERY_LE      cpu_to_le32(0x00800000)
+#define CREATE_OPTIONS_MASK                    cpu_to_le32(0x00FFFFFF)
+#define CREATE_OPTION_READONLY                 0x10000000
+/* system. NB not sent over wire */
+#define CREATE_OPTION_SPECIAL                  0x20000000
+
+struct ksmbd_work;
+struct ksmbd_file;
+struct ksmbd_conn;
+
+struct ksmbd_dir_info {
+       const char      *name;
+       char            *wptr;
+       char            *rptr;
+       int             name_len;
+       int             out_buf_len;
+       int             num_entry;
+       int             data_count;
+       int             last_entry_offset;
+       bool            hide_dot_file;
+       int             flags;
+};
+
+struct ksmbd_readdir_data {
+       struct dir_context      ctx;
+       union {
+               void            *private;
+               char            *dirent;
+       };
+
+       unsigned int            used;
+       unsigned int            dirent_count;
+       unsigned int            file_attr;
+};
+
+/* ksmbd kstat wrapper to get valid create time when reading dir entry */
+struct ksmbd_kstat {
+       struct kstat            *kstat;
+       unsigned long long      create_time;
+       __le32                  file_attributes;
+};
+
+int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child);
+int ksmbd_vfs_may_delete(struct user_namespace *user_ns, struct dentry *dentry);
+int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
+                                  struct dentry *dentry, __le32 *daccess);
+int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
+int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
+int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp,
+                  size_t count, loff_t *pos);
+int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+                   char *buf, size_t count, loff_t *pos, bool sync,
+                   ssize_t *written);
+int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id);
+int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name);
+int ksmbd_vfs_link(struct ksmbd_work *work,
+                  const char *oldname, const char *newname);
+int ksmbd_vfs_getattr(struct path *path, struct kstat *stat);
+int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+                       char *newname);
+int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+                      struct ksmbd_file *fp, loff_t size);
+struct srv_copychunk;
+int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
+                              struct ksmbd_file *src_fp,
+                              struct ksmbd_file *dst_fp,
+                              struct srv_copychunk *chunks,
+                              unsigned int chunk_count,
+                              unsigned int *chunk_count_written,
+                              unsigned int *chunk_size_written,
+                              loff_t  *total_size_written);
+ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list);
+ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
+                          struct dentry *dentry,
+                          char *xattr_name,
+                          char **xattr_buf);
+ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
+                               struct dentry *dentry, char *attr_name,
+                               int attr_name_len);
+int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
+                      struct dentry *dentry, const char *attr_name,
+                      const void *attr_value, size_t attr_size, int flags);
+int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+                               size_t *xattr_stream_name_size, int s_type);
+int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
+                          struct dentry *dentry, char *attr_name);
+int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
+                       bool caseless);
+int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
+void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
+int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
+                       loff_t off, loff_t len);
+struct file_allocated_range_buffer;
+int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+                        struct file_allocated_range_buffer *ranges,
+                        int in_count, int *out_count);
+int ksmbd_vfs_unlink(struct user_namespace *user_ns,
+                    struct dentry *dir, struct dentry *dentry);
+void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
+int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
+                               struct user_namespace *user_ns,
+                               struct dentry *dentry,
+                               struct ksmbd_kstat *ksmbd_kstat);
+void ksmbd_vfs_posix_lock_wait(struct file_lock *flock);
+int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout);
+void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock);
+int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+                               struct dentry *dentry);
+int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
+                              struct dentry *dentry);
+int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+                          struct user_namespace *user_ns,
+                          struct dentry *dentry,
+                          struct smb_ntsd *pntsd, int len);
+int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+                          struct user_namespace *user_ns,
+                          struct dentry *dentry,
+                          struct smb_ntsd **pntsd);
+int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
+                                  struct dentry *dentry,
+                                  struct xattr_dos_attrib *da);
+int ksmbd_vfs_get_dos_attrib_xattr(struct user_namespace *user_ns,
+                                  struct dentry *dentry,
+                                  struct xattr_dos_attrib *da);
+int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+                                struct inode *inode);
+int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
+                               struct inode *inode,
+                               struct inode *parent_inode);
+#endif /* __KSMBD_VFS_H__ */
diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
new file mode 100644 (file)
index 0000000..92d8c61
--- /dev/null
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "glob.h"
+#include "vfs_cache.h"
+#include "oplock.h"
+#include "vfs.h"
+#include "connection.h"
+#include "mgmt/tree_connect.h"
+#include "mgmt/user_session.h"
+#include "smb_common.h"
+
+#define S_DEL_PENDING                  1
+#define S_DEL_ON_CLS                   2
+#define S_DEL_ON_CLS_STREAM            8
+
+static unsigned int inode_hash_mask __read_mostly;
+static unsigned int inode_hash_shift __read_mostly;
+static struct hlist_head *inode_hashtable __read_mostly;
+static DEFINE_RWLOCK(inode_hash_lock);
+
+static struct ksmbd_file_table global_ft;
+static atomic_long_t fd_limit;
+static struct kmem_cache *filp_cache;
+
+void ksmbd_set_fd_limit(unsigned long limit)
+{
+       limit = min(limit, get_max_files());
+       atomic_long_set(&fd_limit, limit);
+}
+
+static bool fd_limit_depleted(void)
+{
+       long v = atomic_long_dec_return(&fd_limit);
+
+       if (v >= 0)
+               return false;
+       atomic_long_inc(&fd_limit);
+       return true;
+}
+
+static void fd_limit_close(void)
+{
+       atomic_long_inc(&fd_limit);
+}
+
+/*
+ * INODE hash
+ */
+
+static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
+{
+       unsigned long tmp;
+
+       tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
+               L1_CACHE_BYTES;
+       tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
+       return tmp & inode_hash_mask;
+}
+
+static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
+{
+       struct hlist_head *head = inode_hashtable +
+               inode_hash(inode->i_sb, inode->i_ino);
+       struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
+
+       hlist_for_each_entry(ci, head, m_hash) {
+               if (ci->m_inode == inode) {
+                       if (atomic_inc_not_zero(&ci->m_count))
+                               ret_ci = ci;
+                       break;
+               }
+       }
+       return ret_ci;
+}
+
+static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
+{
+       return __ksmbd_inode_lookup(file_inode(fp->filp));
+}
+
+static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
+{
+       struct ksmbd_inode *ci;
+
+       read_lock(&inode_hash_lock);
+       ci = __ksmbd_inode_lookup(inode);
+       read_unlock(&inode_hash_lock);
+       return ci;
+}
+
+int ksmbd_query_inode_status(struct inode *inode)
+{
+       struct ksmbd_inode *ci;
+       int ret = KSMBD_INODE_STATUS_UNKNOWN;
+
+       read_lock(&inode_hash_lock);
+       ci = __ksmbd_inode_lookup(inode);
+       if (ci) {
+               ret = KSMBD_INODE_STATUS_OK;
+               if (ci->m_flags & S_DEL_PENDING)
+                       ret = KSMBD_INODE_STATUS_PENDING_DELETE;
+               atomic_dec(&ci->m_count);
+       }
+       read_unlock(&inode_hash_lock);
+       return ret;
+}
+
+bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
+{
+       return (fp->f_ci->m_flags & S_DEL_PENDING);
+}
+
+void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
+{
+       fp->f_ci->m_flags |= S_DEL_PENDING;
+}
+
+void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
+{
+       fp->f_ci->m_flags &= ~S_DEL_PENDING;
+}
+
+void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
+                                 int file_info)
+{
+       if (ksmbd_stream_fd(fp)) {
+               fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
+               return;
+       }
+
+       fp->f_ci->m_flags |= S_DEL_ON_CLS;
+}
+
+static void ksmbd_inode_hash(struct ksmbd_inode *ci)
+{
+       struct hlist_head *b = inode_hashtable +
+               inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
+
+       hlist_add_head(&ci->m_hash, b);
+}
+
+static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
+{
+       write_lock(&inode_hash_lock);
+       hlist_del_init(&ci->m_hash);
+       write_unlock(&inode_hash_lock);
+}
+
+static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
+{
+       ci->m_inode = file_inode(fp->filp);
+       atomic_set(&ci->m_count, 1);
+       atomic_set(&ci->op_count, 0);
+       atomic_set(&ci->sop_count, 0);
+       ci->m_flags = 0;
+       ci->m_fattr = 0;
+       INIT_LIST_HEAD(&ci->m_fp_list);
+       INIT_LIST_HEAD(&ci->m_op_list);
+       rwlock_init(&ci->m_lock);
+       return 0;
+}
+
+static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
+{
+       struct ksmbd_inode *ci, *tmpci;
+       int rc;
+
+       read_lock(&inode_hash_lock);
+       ci = ksmbd_inode_lookup(fp);
+       read_unlock(&inode_hash_lock);
+       if (ci)
+               return ci;
+
+       ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
+       if (!ci)
+               return NULL;
+
+       rc = ksmbd_inode_init(ci, fp);
+       if (rc) {
+               pr_err("inode initialized failed\n");
+               kfree(ci);
+               return NULL;
+       }
+
+       write_lock(&inode_hash_lock);
+       tmpci = ksmbd_inode_lookup(fp);
+       if (!tmpci) {
+               ksmbd_inode_hash(ci);
+       } else {
+               kfree(ci);
+               ci = tmpci;
+       }
+       write_unlock(&inode_hash_lock);
+       return ci;
+}
+
+static void ksmbd_inode_free(struct ksmbd_inode *ci)
+{
+       ksmbd_inode_unhash(ci);
+       kfree(ci);
+}
+
+static void ksmbd_inode_put(struct ksmbd_inode *ci)
+{
+       if (atomic_dec_and_test(&ci->m_count))
+               ksmbd_inode_free(ci);
+}
+
+int __init ksmbd_inode_hash_init(void)
+{
+       unsigned int loop;
+       unsigned long numentries = 16384;
+       unsigned long bucketsize = sizeof(struct hlist_head);
+       unsigned long size;
+
+       inode_hash_shift = ilog2(numentries);
+       inode_hash_mask = (1 << inode_hash_shift) - 1;
+
+       size = bucketsize << inode_hash_shift;
+
+       /* init master fp hash table */
+       inode_hashtable = vmalloc(size);
+       if (!inode_hashtable)
+               return -ENOMEM;
+
+       for (loop = 0; loop < (1U << inode_hash_shift); loop++)
+               INIT_HLIST_HEAD(&inode_hashtable[loop]);
+       return 0;
+}
+
+void ksmbd_release_inode_hash(void)
+{
+       vfree(inode_hashtable);
+}
+
+static void __ksmbd_inode_close(struct ksmbd_file *fp)
+{
+       struct dentry *dir, *dentry;
+       struct ksmbd_inode *ci = fp->f_ci;
+       int err;
+       struct file *filp;
+
+       filp = fp->filp;
+       if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
+               ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
+               err = ksmbd_vfs_remove_xattr(file_mnt_user_ns(filp),
+                                            filp->f_path.dentry,
+                                            fp->stream.name);
+               if (err)
+                       pr_err("remove xattr failed : %s\n",
+                              fp->stream.name);
+       }
+
+       if (atomic_dec_and_test(&ci->m_count)) {
+               write_lock(&ci->m_lock);
+               if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
+                       dentry = filp->f_path.dentry;
+                       dir = dentry->d_parent;
+                       ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
+                       write_unlock(&ci->m_lock);
+                       ksmbd_vfs_unlink(file_mnt_user_ns(filp), dir, dentry);
+                       write_lock(&ci->m_lock);
+               }
+               write_unlock(&ci->m_lock);
+
+               ksmbd_inode_free(ci);
+       }
+}
+
+static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
+{
+       if (!has_file_id(fp->persistent_id))
+               return;
+
+       write_lock(&global_ft.lock);
+       idr_remove(global_ft.idr, fp->persistent_id);
+       write_unlock(&global_ft.lock);
+}
+
+static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
+{
+       if (!has_file_id(fp->volatile_id))
+               return;
+
+       write_lock(&fp->f_ci->m_lock);
+       list_del_init(&fp->node);
+       write_unlock(&fp->f_ci->m_lock);
+
+       write_lock(&ft->lock);
+       idr_remove(ft->idr, fp->volatile_id);
+       write_unlock(&ft->lock);
+}
+
+static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
+{
+       struct file *filp;
+       struct ksmbd_lock *smb_lock, *tmp_lock;
+
+       fd_limit_close();
+       __ksmbd_remove_durable_fd(fp);
+       __ksmbd_remove_fd(ft, fp);
+
+       close_id_del_oplock(fp);
+       filp = fp->filp;
+
+       __ksmbd_inode_close(fp);
+       if (!IS_ERR_OR_NULL(filp))
+               fput(filp);
+
+       /* because the reference count of fp is 0, it is guaranteed that
+        * there are not accesses to fp->lock_list.
+        */
+       list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
+               spin_lock(&fp->conn->llist_lock);
+               list_del(&smb_lock->clist);
+               spin_unlock(&fp->conn->llist_lock);
+
+               list_del(&smb_lock->flist);
+               locks_free_lock(smb_lock->fl);
+               kfree(smb_lock);
+       }
+
+       kfree(fp->filename);
+       if (ksmbd_stream_fd(fp))
+               kfree(fp->stream.name);
+       kmem_cache_free(filp_cache, fp);
+}
+
+static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
+{
+       if (!atomic_inc_not_zero(&fp->refcount))
+               return NULL;
+       return fp;
+}
+
+static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
+                                           u64 id)
+{
+       struct ksmbd_file *fp;
+
+       if (!has_file_id(id))
+               return NULL;
+
+       read_lock(&ft->lock);
+       fp = idr_find(ft->idr, id);
+       if (fp)
+               fp = ksmbd_fp_get(fp);
+       read_unlock(&ft->lock);
+       return fp;
+}
+
+static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
+{
+       __ksmbd_close_fd(&work->sess->file_table, fp);
+       atomic_dec(&work->conn->stats.open_files_count);
+}
+
+static void set_close_state_blocked_works(struct ksmbd_file *fp)
+{
+       struct ksmbd_work *cancel_work, *ctmp;
+
+       spin_lock(&fp->f_lock);
+       list_for_each_entry_safe(cancel_work, ctmp, &fp->blocked_works,
+                                fp_entry) {
+               list_del(&cancel_work->fp_entry);
+               cancel_work->state = KSMBD_WORK_CLOSED;
+               cancel_work->cancel_fn(cancel_work->cancel_argv);
+       }
+       spin_unlock(&fp->f_lock);
+}
+
+int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
+{
+       struct ksmbd_file       *fp;
+       struct ksmbd_file_table *ft;
+
+       if (!has_file_id(id))
+               return 0;
+
+       ft = &work->sess->file_table;
+       read_lock(&ft->lock);
+       fp = idr_find(ft->idr, id);
+       if (fp) {
+               set_close_state_blocked_works(fp);
+
+               if (!atomic_dec_and_test(&fp->refcount))
+                       fp = NULL;
+       }
+       read_unlock(&ft->lock);
+
+       if (!fp)
+               return -EINVAL;
+
+       __put_fd_final(work, fp);
+       return 0;
+}
+
+void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
+{
+       if (!fp)
+               return;
+
+       if (!atomic_dec_and_test(&fp->refcount))
+               return;
+       __put_fd_final(work, fp);
+}
+
+static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
+{
+       if (!fp)
+               return false;
+       if (fp->tcon != tcon)
+               return false;
+       return true;
+}
+
+struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
+{
+       return __ksmbd_lookup_fd(&work->sess->file_table, id);
+}
+
+struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
+{
+       struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
+
+       if (__sanity_check(work->tcon, fp))
+               return fp;
+
+       ksmbd_fd_put(work, fp);
+       return NULL;
+}
+
+struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
+                                       u64 pid)
+{
+       struct ksmbd_file *fp;
+
+       if (!has_file_id(id)) {
+               id = work->compound_fid;
+               pid = work->compound_pfid;
+       }
+
+       fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
+       if (!__sanity_check(work->tcon, fp)) {
+               ksmbd_fd_put(work, fp);
+               return NULL;
+       }
+       if (fp->persistent_id != pid) {
+               ksmbd_fd_put(work, fp);
+               return NULL;
+       }
+       return fp;
+}
+
+struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
+{
+       return __ksmbd_lookup_fd(&global_ft, id);
+}
+
+struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
+{
+       struct ksmbd_file       *fp = NULL;
+       unsigned int            id;
+
+       read_lock(&global_ft.lock);
+       idr_for_each_entry(global_ft.idr, fp, id) {
+               if (!memcmp(fp->create_guid,
+                           cguid,
+                           SMB2_CREATE_GUID_SIZE)) {
+                       fp = ksmbd_fp_get(fp);
+                       break;
+               }
+       }
+       read_unlock(&global_ft.lock);
+
+       return fp;
+}
+
+struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
+{
+       struct ksmbd_file       *lfp;
+       struct ksmbd_inode      *ci;
+
+       ci = ksmbd_inode_lookup_by_vfsinode(inode);
+       if (!ci)
+               return NULL;
+
+       read_lock(&ci->m_lock);
+       list_for_each_entry(lfp, &ci->m_fp_list, node) {
+               if (inode == file_inode(lfp->filp)) {
+                       atomic_dec(&ci->m_count);
+                       read_unlock(&ci->m_lock);
+                       return lfp;
+               }
+       }
+       atomic_dec(&ci->m_count);
+       read_unlock(&ci->m_lock);
+       return NULL;
+}
+
+#define OPEN_ID_TYPE_VOLATILE_ID       (0)
+#define OPEN_ID_TYPE_PERSISTENT_ID     (1)
+
+static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
+{
+       if (type == OPEN_ID_TYPE_VOLATILE_ID)
+               fp->volatile_id = id;
+       if (type == OPEN_ID_TYPE_PERSISTENT_ID)
+               fp->persistent_id = id;
+}
+
+static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+                    int type)
+{
+       u64                     id = 0;
+       int                     ret;
+
+       if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
+               __open_id_set(fp, KSMBD_NO_FID, type);
+               return -EMFILE;
+       }
+
+       idr_preload(GFP_KERNEL);
+       write_lock(&ft->lock);
+       ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
+       if (ret >= 0) {
+               id = ret;
+               ret = 0;
+       } else {
+               id = KSMBD_NO_FID;
+               fd_limit_close();
+       }
+
+       __open_id_set(fp, id, type);
+       write_unlock(&ft->lock);
+       idr_preload_end();
+       return ret;
+}
+
+unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
+{
+       __open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
+       return fp->persistent_id;
+}
+
+struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
+{
+       struct ksmbd_file *fp;
+       int ret;
+
+       fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
+       if (!fp) {
+               pr_err("Failed to allocate memory\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_LIST_HEAD(&fp->blocked_works);
+       INIT_LIST_HEAD(&fp->node);
+       INIT_LIST_HEAD(&fp->lock_list);
+       spin_lock_init(&fp->f_lock);
+       atomic_set(&fp->refcount, 1);
+
+       fp->filp                = filp;
+       fp->conn                = work->sess->conn;
+       fp->tcon                = work->tcon;
+       fp->volatile_id         = KSMBD_NO_FID;
+       fp->persistent_id       = KSMBD_NO_FID;
+       fp->f_ci                = ksmbd_inode_get(fp);
+
+       if (!fp->f_ci) {
+               ret = -ENOMEM;
+               goto err_out;
+       }
+
+       ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
+       if (ret) {
+               ksmbd_inode_put(fp->f_ci);
+               goto err_out;
+       }
+
+       atomic_inc(&work->conn->stats.open_files_count);
+       return fp;
+
+err_out:
+       kmem_cache_free(filp_cache, fp);
+       return ERR_PTR(ret);
+}
+
+static int
+__close_file_table_ids(struct ksmbd_file_table *ft,
+                      struct ksmbd_tree_connect *tcon,
+                      bool (*skip)(struct ksmbd_tree_connect *tcon,
+                                   struct ksmbd_file *fp))
+{
+       unsigned int                    id;
+       struct ksmbd_file               *fp;
+       int                             num = 0;
+
+       idr_for_each_entry(ft->idr, fp, id) {
+               if (skip(tcon, fp))
+                       continue;
+
+               set_close_state_blocked_works(fp);
+
+               if (!atomic_dec_and_test(&fp->refcount))
+                       continue;
+               __ksmbd_close_fd(ft, fp);
+               num++;
+       }
+       return num;
+}
+
+static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
+                              struct ksmbd_file *fp)
+{
+       return fp->tcon != tcon;
+}
+
+static bool session_fd_check(struct ksmbd_tree_connect *tcon,
+                            struct ksmbd_file *fp)
+{
+       return false;
+}
+
+void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
+{
+       int num = __close_file_table_ids(&work->sess->file_table,
+                                        work->tcon,
+                                        tree_conn_fd_check);
+
+       atomic_sub(num, &work->conn->stats.open_files_count);
+}
+
+void ksmbd_close_session_fds(struct ksmbd_work *work)
+{
+       int num = __close_file_table_ids(&work->sess->file_table,
+                                        work->tcon,
+                                        session_fd_check);
+
+       atomic_sub(num, &work->conn->stats.open_files_count);
+}
+
+int ksmbd_init_global_file_table(void)
+{
+       return ksmbd_init_file_table(&global_ft);
+}
+
+void ksmbd_free_global_file_table(void)
+{
+       struct ksmbd_file       *fp = NULL;
+       unsigned int            id;
+
+       idr_for_each_entry(global_ft.idr, fp, id) {
+               __ksmbd_remove_durable_fd(fp);
+               kmem_cache_free(filp_cache, fp);
+       }
+
+       ksmbd_destroy_file_table(&global_ft);
+}
+
+int ksmbd_file_table_flush(struct ksmbd_work *work)
+{
+       struct ksmbd_file       *fp = NULL;
+       unsigned int            id;
+       int                     ret;
+
+       read_lock(&work->sess->file_table.lock);
+       idr_for_each_entry(work->sess->file_table.idr, fp, id) {
+               ret = ksmbd_vfs_fsync(work, fp->volatile_id, KSMBD_NO_FID);
+               if (ret)
+                       break;
+       }
+       read_unlock(&work->sess->file_table.lock);
+       return ret;
+}
+
+int ksmbd_init_file_table(struct ksmbd_file_table *ft)
+{
+       ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
+       if (!ft->idr)
+               return -ENOMEM;
+
+       idr_init(ft->idr);
+       rwlock_init(&ft->lock);
+       return 0;
+}
+
+void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
+{
+       if (!ft->idr)
+               return;
+
+       __close_file_table_ids(ft, NULL, session_fd_check);
+       idr_destroy(ft->idr);
+       kfree(ft->idr);
+       ft->idr = NULL;
+}
+
+int ksmbd_init_file_cache(void)
+{
+       filp_cache = kmem_cache_create("ksmbd_file_cache",
+                                      sizeof(struct ksmbd_file), 0,
+                                      SLAB_HWCACHE_ALIGN, NULL);
+       if (!filp_cache)
+               goto out;
+
+       return 0;
+
+out:
+       pr_err("failed to allocate file cache\n");
+       return -ENOMEM;
+}
+
+void ksmbd_exit_file_cache(void)
+{
+       kmem_cache_destroy(filp_cache);
+}
diff --git a/fs/ksmbd/vfs_cache.h b/fs/ksmbd/vfs_cache.h
new file mode 100644 (file)
index 0000000..70dfe6a
--- /dev/null
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __VFS_CACHE_H__
+#define __VFS_CACHE_H__
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+
+#include "vfs.h"
+
+/* Windows style file permissions for extended response */
+#define        FILE_GENERIC_ALL        0x1F01FF
+#define        FILE_GENERIC_READ       0x120089
+#define        FILE_GENERIC_WRITE      0x120116
+#define        FILE_GENERIC_EXECUTE    0X1200a0
+
+#define KSMBD_START_FID                0
+#define KSMBD_NO_FID           (INT_MAX)
+#define SMB2_NO_FID            (0xFFFFFFFFFFFFFFFFULL)
+
+struct ksmbd_conn;
+struct ksmbd_session;
+
+struct ksmbd_lock {
+       struct file_lock *fl;
+       struct list_head clist;
+       struct list_head flist;
+       struct list_head llist;
+       unsigned int flags;
+       int cmd;
+       int zero_len;
+       unsigned long long start;
+       unsigned long long end;
+};
+
+struct stream {
+       char *name;
+       ssize_t size;
+};
+
+struct ksmbd_inode {
+       rwlock_t                        m_lock;
+       atomic_t                        m_count;
+       atomic_t                        op_count;
+       /* opinfo count for streams */
+       atomic_t                        sop_count;
+       struct inode                    *m_inode;
+       unsigned int                    m_flags;
+       struct hlist_node               m_hash;
+       struct list_head                m_fp_list;
+       struct list_head                m_op_list;
+       struct oplock_info              *m_opinfo;
+       __le32                          m_fattr;
+};
+
+struct ksmbd_file {
+       struct file                     *filp;
+       char                            *filename;
+       u64                             persistent_id;
+       u64                             volatile_id;
+
+       spinlock_t                      f_lock;
+
+       struct ksmbd_inode              *f_ci;
+       struct ksmbd_inode              *f_parent_ci;
+       struct oplock_info __rcu        *f_opinfo;
+       struct ksmbd_conn               *conn;
+       struct ksmbd_tree_connect       *tcon;
+
+       atomic_t                        refcount;
+       __le32                          daccess;
+       __le32                          saccess;
+       __le32                          coption;
+       __le32                          cdoption;
+       __u64                           create_time;
+       __u64                           itime;
+
+       bool                            is_nt_open;
+       bool                            attrib_only;
+
+       char                            client_guid[16];
+       char                            create_guid[16];
+       char                            app_instance_id[16];
+
+       struct stream                   stream;
+       struct list_head                node;
+       struct list_head                blocked_works;
+       struct list_head                lock_list;
+
+       int                             durable_timeout;
+
+       /* for SMB1 */
+       int                             pid;
+
+       /* conflict lock fail count for SMB1 */
+       unsigned int                    cflock_cnt;
+       /* last lock failure start offset for SMB1 */
+       unsigned long long              llock_fstart;
+
+       int                             dirent_offset;
+
+       /* if ls is happening on directory, below is valid*/
+       struct ksmbd_readdir_data       readdir_data;
+       int                             dot_dotdot[2];
+};
+
+static inline void set_ctx_actor(struct dir_context *ctx,
+                                filldir_t actor)
+{
+       ctx->actor = actor;
+}
+
+#define KSMBD_NR_OPEN_DEFAULT BITS_PER_LONG
+
+struct ksmbd_file_table {
+       rwlock_t                lock;
+       struct idr              *idr;
+};
+
+static inline bool has_file_id(u64 id)
+{
+       return id < KSMBD_NO_FID;
+}
+
+static inline bool ksmbd_stream_fd(struct ksmbd_file *fp)
+{
+       return fp->stream.name != NULL;
+}
+
+int ksmbd_init_file_table(struct ksmbd_file_table *ft);
+void ksmbd_destroy_file_table(struct ksmbd_file_table *ft);
+int ksmbd_close_fd(struct ksmbd_work *work, u64 id);
+struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id);
+struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id);
+struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
+                                       u64 pid);
+void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
+struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
+struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
+struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
+unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
+struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
+void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
+void ksmbd_close_session_fds(struct ksmbd_work *work);
+int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
+int ksmbd_init_global_file_table(void);
+void ksmbd_free_global_file_table(void);
+int ksmbd_file_table_flush(struct ksmbd_work *work);
+void ksmbd_set_fd_limit(unsigned long limit);
+
+/*
+ * INODE hash
+ */
+int __init ksmbd_inode_hash_init(void);
+void ksmbd_release_inode_hash(void);
+
+enum KSMBD_INODE_STATUS {
+       KSMBD_INODE_STATUS_OK,
+       KSMBD_INODE_STATUS_UNKNOWN,
+       KSMBD_INODE_STATUS_PENDING_DELETE,
+};
+
+int ksmbd_query_inode_status(struct inode *inode);
+bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
+void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
+void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
+void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
+                                 int file_info);
+int ksmbd_init_file_cache(void);
+void ksmbd_exit_file_cache(void);
+#endif /* __VFS_CACHE_H__ */
diff --git a/fs/ksmbd/xattr.h b/fs/ksmbd/xattr.h
new file mode 100644 (file)
index 0000000..8857c01
--- /dev/null
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2021 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __XATTR_H__
+#define __XATTR_H__
+
+/*
+ * These are on-disk structures to store additional metadata into xattr to
+ * reproduce windows filesystem semantics. And they are encoded with NDR to
+ * compatible with samba's xattr meta format. The compatibility with samba
+ * is important because it can lose the information(file attribute,
+ * creation time, acls) about the existing files when switching between
+ * ksmbd and samba.
+ */
+
+/*
+ * Dos attribute flags used for what variable is valid.
+ */
+enum {
+       XATTR_DOSINFO_ATTRIB            = 0x00000001,
+       XATTR_DOSINFO_EA_SIZE           = 0x00000002,
+       XATTR_DOSINFO_SIZE              = 0x00000004,
+       XATTR_DOSINFO_ALLOC_SIZE        = 0x00000008,
+       XATTR_DOSINFO_CREATE_TIME       = 0x00000010,
+       XATTR_DOSINFO_CHANGE_TIME       = 0x00000020,
+       XATTR_DOSINFO_ITIME             = 0x00000040
+};
+
+/*
+ * Dos attribute structure which is compatible with samba's one.
+ * Storing it into the xattr named "DOSATTRIB" separately from inode
+ * allows ksmbd to faithfully reproduce windows filesystem semantics
+ * on top of a POSIX filesystem.
+ */
+struct xattr_dos_attrib {
+       __u16   version;        /* version 3 or version 4 */
+       __u32   flags;          /* valid flags */
+       __u32   attr;           /* Dos attribute */
+       __u32   ea_size;        /* EA size */
+       __u64   size;
+       __u64   alloc_size;
+       __u64   create_time;    /* File creation time */
+       __u64   change_time;    /* File change time */
+       __u64   itime;          /* Invented/Initial time */
+};
+
+/*
+ * Enumeration is used for computing posix acl hash.
+ */
+enum {
+       SMB_ACL_TAG_INVALID = 0,
+       SMB_ACL_USER,
+       SMB_ACL_USER_OBJ,
+       SMB_ACL_GROUP,
+       SMB_ACL_GROUP_OBJ,
+       SMB_ACL_OTHER,
+       SMB_ACL_MASK
+};
+
+#define SMB_ACL_READ                   4
+#define SMB_ACL_WRITE                  2
+#define SMB_ACL_EXECUTE                        1
+
+struct xattr_acl_entry {
+       int type;
+       uid_t uid;
+       gid_t gid;
+       mode_t perm;
+};
+
+/*
+ * xattr_smb_acl structure is used for computing posix acl hash.
+ */
+struct xattr_smb_acl {
+       int count;
+       int next;
+       struct xattr_acl_entry entries[0];
+};
+
+/* 64bytes hash in xattr_ntacl is computed with sha256 */
+#define XATTR_SD_HASH_TYPE_SHA256      0x1
+#define XATTR_SD_HASH_SIZE             64
+
+/*
+ * xattr_ntacl is used for storing ntacl and hashes.
+ * Hash is used for checking valid posix acl and ntacl in xattr.
+ */
+struct xattr_ntacl {
+       __u16   version; /* version 4*/
+       void    *sd_buf;
+       __u32   sd_size;
+       __u16   hash_type; /* hash type */
+       __u8    desc[10]; /* posix_acl description */
+       __u16   desc_len;
+       __u64   current_time;
+       __u8    hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for ntacl */
+       __u8    posix_acl_hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for posix acl */
+};
+
+/* DOS ATTRIBUITE XATTR PREFIX */
+#define DOS_ATTRIBUTE_PREFIX           "DOSATTRIB"
+#define DOS_ATTRIBUTE_PREFIX_LEN       (sizeof(DOS_ATTRIBUTE_PREFIX) - 1)
+#define XATTR_NAME_DOS_ATTRIBUTE       (XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX)
+#define XATTR_NAME_DOS_ATTRIBUTE_LEN   \
+               (sizeof(XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX) - 1)
+
+/* STREAM XATTR PREFIX */
+#define STREAM_PREFIX                  "DosStream."
+#define STREAM_PREFIX_LEN              (sizeof(STREAM_PREFIX) - 1)
+#define XATTR_NAME_STREAM              (XATTR_USER_PREFIX STREAM_PREFIX)
+#define XATTR_NAME_STREAM_LEN          (sizeof(XATTR_NAME_STREAM) - 1)
+
+/* SECURITY DESCRIPTOR(NTACL) XATTR PREFIX */
+#define SD_PREFIX                      "NTACL"
+#define SD_PREFIX_LEN  (sizeof(SD_PREFIX) - 1)
+#define XATTR_NAME_SD  (XATTR_SECURITY_PREFIX SD_PREFIX)
+#define XATTR_NAME_SD_LEN      \
+               (sizeof(XATTR_SECURITY_PREFIX SD_PREFIX) - 1)
+
+#endif /* __XATTR_H__ */
index 2de048f..0ab9756 100644 (file)
@@ -584,7 +584,7 @@ static struct ctl_table nlm_sysctls[] = {
                .data           = &nsm_use_hostnames,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dobool,
        },
        {
                .procname       = "nsm_local_state",
index 4c10fb5..e10ae2c 100644 (file)
@@ -40,12 +40,15 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain file pointer. Not used by FREE_ALL call. */
        if (filp != NULL) {
-               if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+               int mode = lock_to_openmode(&lock->fl);
+
+               error = nlm_lookup_file(rqstp, &file, lock);
+               if (error)
                        goto no_locks;
                *filp = file;
 
                /* Set up the missing parts of the file_lock structure */
-               lock->fl.fl_file  = file->f_file;
+               lock->fl.fl_file  = file->f_file[mode];
                lock->fl.fl_pid = current->tgid;
                lock->fl.fl_lmops = &nlmsvc_lock_operations;
                nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid);
index 61d3cc2..e9b85d8 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/lockd/nlm.h>
 #include <linux/lockd/lockd.h>
 #include <linux/kthread.h>
+#include <linux/exportfs.h>
 
 #define NLMDBG_FACILITY                NLMDBG_SVCLOCK
 
@@ -395,28 +396,10 @@ nlmsvc_release_lockowner(struct nlm_lock *lock)
                nlmsvc_put_lockowner(lock->fl.fl_owner);
 }
 
-static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
-{
-       struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
-       new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
-}
-
-static void nlmsvc_locks_release_private(struct file_lock *fl)
-{
-       nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
-}
-
-static const struct file_lock_operations nlmsvc_lock_ops = {
-       .fl_copy_lock = nlmsvc_locks_copy_lock,
-       .fl_release_private = nlmsvc_locks_release_private,
-};
-
 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
                                                pid_t pid)
 {
        fl->fl_owner = nlmsvc_find_lockowner(host, pid);
-       if (fl->fl_owner != NULL)
-               fl->fl_ops = &nlmsvc_lock_ops;
 }
 
 /*
@@ -488,17 +471,24 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
            struct nlm_cookie *cookie, int reclaim)
 {
        struct nlm_block        *block = NULL;
+       struct inode            *inode = nlmsvc_file_inode(file);
        int                     error;
+       int                     mode;
+       int                     async_block = 0;
        __be32                  ret;
 
        dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
-                               locks_inode(file->f_file)->i_sb->s_id,
-                               locks_inode(file->f_file)->i_ino,
+                               inode->i_sb->s_id, inode->i_ino,
                                lock->fl.fl_type, lock->fl.fl_pid,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end,
                                wait);
 
+       if (inode->i_sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS) {
+               async_block = wait;
+               wait = 0;
+       }
+
        /* Lock file against concurrent access */
        mutex_lock(&file->f_mutex);
        /* Get existing block (in case client is busy-waiting)
@@ -542,7 +532,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
 
        if (!wait)
                lock->fl.fl_flags &= ~FL_SLEEP;
-       error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
+       mode = lock_to_openmode(&lock->fl);
+       error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
        lock->fl.fl_flags &= ~FL_SLEEP;
 
        dprintk("lockd: vfs_lock_file returned %d\n", error);
@@ -558,7 +549,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
                         */
                        if (wait)
                                break;
-                       ret = nlm_lck_denied;
+                       ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
                        goto out;
                case FILE_LOCK_DEFERRED:
                        if (wait)
@@ -595,12 +586,13 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
                struct nlm_lock *conflock, struct nlm_cookie *cookie)
 {
        int                     error;
+       int                     mode;
        __be32                  ret;
        struct nlm_lockowner    *test_owner;
 
        dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
-                               locks_inode(file->f_file)->i_sb->s_id,
-                               locks_inode(file->f_file)->i_ino,
+                               nlmsvc_file_inode(file)->i_sb->s_id,
+                               nlmsvc_file_inode(file)->i_ino,
                                lock->fl.fl_type,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end);
@@ -613,7 +605,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
        /* If there's a conflicting lock, remember to clean up the test lock */
        test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
 
-       error = vfs_test_lock(file->f_file, &lock->fl);
+       mode = lock_to_openmode(&lock->fl);
+       error = vfs_test_lock(file->f_file[mode], &lock->fl);
        if (error) {
                /* We can't currently deal with deferred test requests */
                if (error == FILE_LOCK_DEFERRED)
@@ -634,7 +627,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
        conflock->caller = "somehost";  /* FIXME */
        conflock->len = strlen(conflock->caller);
        conflock->oh.len = 0;           /* don't return OH info */
-       conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
+       conflock->svid = lock->fl.fl_pid;
        conflock->fl.fl_type = lock->fl.fl_type;
        conflock->fl.fl_start = lock->fl.fl_start;
        conflock->fl.fl_end = lock->fl.fl_end;
@@ -659,11 +652,11 @@ out:
 __be32
 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
 {
-       int     error;
+       int     error = 0;
 
        dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
-                               locks_inode(file->f_file)->i_sb->s_id,
-                               locks_inode(file->f_file)->i_ino,
+                               nlmsvc_file_inode(file)->i_sb->s_id,
+                               nlmsvc_file_inode(file)->i_ino,
                                lock->fl.fl_pid,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end);
@@ -672,7 +665,12 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
        nlmsvc_cancel_blocked(net, file, lock);
 
        lock->fl.fl_type = F_UNLCK;
-       error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
+       if (file->f_file[O_RDONLY])
+               error = vfs_lock_file(file->f_file[O_RDONLY], F_SETLK,
+                                       &lock->fl, NULL);
+       if (file->f_file[O_WRONLY])
+               error = vfs_lock_file(file->f_file[O_WRONLY], F_SETLK,
+                                       &lock->fl, NULL);
 
        return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
 }
@@ -689,10 +687,11 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
 {
        struct nlm_block        *block;
        int status = 0;
+       int mode;
 
        dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
-                               locks_inode(file->f_file)->i_sb->s_id,
-                               locks_inode(file->f_file)->i_ino,
+                               nlmsvc_file_inode(file)->i_sb->s_id,
+                               nlmsvc_file_inode(file)->i_ino,
                                lock->fl.fl_pid,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end);
@@ -704,7 +703,8 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
        block = nlmsvc_lookup_block(file, lock);
        mutex_unlock(&file->f_mutex);
        if (block != NULL) {
-               vfs_cancel_lock(block->b_file->f_file,
+               mode = lock_to_openmode(&lock->fl);
+               vfs_cancel_lock(block->b_file->f_file[mode],
                                &block->b_call->a_args.lock.fl);
                status = nlmsvc_unlink_block(block);
                nlmsvc_release_block(block);
@@ -788,9 +788,21 @@ nlmsvc_notify_blocked(struct file_lock *fl)
        printk(KERN_WARNING "lockd: notification for unknown block!\n");
 }
 
+static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
+{
+       return nlmsvc_get_lockowner(owner);
+}
+
+static void nlmsvc_put_owner(fl_owner_t owner)
+{
+       nlmsvc_put_lockowner(owner);
+}
+
 const struct lock_manager_operations nlmsvc_lock_operations = {
        .lm_notify = nlmsvc_notify_blocked,
        .lm_grant = nlmsvc_grant_deferred,
+       .lm_get_owner = nlmsvc_get_owner,
+       .lm_put_owner = nlmsvc_put_owner,
 };
 
 /*
@@ -809,6 +821,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
 {
        struct nlm_file         *file = block->b_file;
        struct nlm_lock         *lock = &block->b_call->a_args.lock;
+       int                     mode;
        int                     error;
        loff_t                  fl_start, fl_end;
 
@@ -834,7 +847,8 @@ nlmsvc_grant_blocked(struct nlm_block *block)
        lock->fl.fl_flags |= FL_SLEEP;
        fl_start = lock->fl.fl_start;
        fl_end = lock->fl.fl_end;
-       error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
+       mode = lock_to_openmode(&lock->fl);
+       error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
        lock->fl.fl_flags &= ~FL_SLEEP;
        lock->fl.fl_start = fl_start;
        lock->fl.fl_end = fl_end;
index 4ae4b63..99696d3 100644 (file)
@@ -55,6 +55,7 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
        struct nlm_host         *host = NULL;
        struct nlm_file         *file = NULL;
        struct nlm_lock         *lock = &argp->lock;
+       int                     mode;
        __be32                  error = 0;
 
        /* nfsd callbacks must have been installed for this procedure */
@@ -69,13 +70,14 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain file pointer. Not used by FREE_ALL call. */
        if (filp != NULL) {
-               error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
+               error = cast_status(nlm_lookup_file(rqstp, &file, lock));
                if (error != 0)
                        goto no_locks;
                *filp = file;
 
                /* Set up the missing parts of the file_lock structure */
-               lock->fl.fl_file  = file->f_file;
+               mode = lock_to_openmode(&lock->fl);
+               lock->fl.fl_file  = file->f_file[mode];
                lock->fl.fl_pid = current->tgid;
                lock->fl.fl_lmops = &nlmsvc_lock_operations;
                nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid);
index 028fc15..cb3a751 100644 (file)
@@ -45,7 +45,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
 
 static inline void nlm_debug_print_file(char *msg, struct nlm_file *file)
 {
-       struct inode *inode = locks_inode(file->f_file);
+       struct inode *inode = nlmsvc_file_inode(file);
 
        dprintk("lockd: %s %s/%ld\n",
                msg, inode->i_sb->s_id, inode->i_ino);
@@ -71,56 +71,75 @@ static inline unsigned int file_hash(struct nfs_fh *f)
        return tmp & (FILE_NRHASH - 1);
 }
 
+int lock_to_openmode(struct file_lock *lock)
+{
+       return (lock->fl_type == F_WRLCK) ? O_WRONLY : O_RDONLY;
+}
+
+/*
+ * Open the file. Note that if we're reexporting, for example,
+ * this could block the lockd thread for a while.
+ *
+ * We have to make sure we have the right credential to open
+ * the file.
+ */
+static __be32 nlm_do_fopen(struct svc_rqst *rqstp,
+                          struct nlm_file *file, int mode)
+{
+       struct file **fp = &file->f_file[mode];
+       __be32  nfserr;
+
+       if (*fp)
+               return 0;
+       nfserr = nlmsvc_ops->fopen(rqstp, &file->f_handle, fp, mode);
+       if (nfserr)
+               dprintk("lockd: open failed (error %d)\n", nfserr);
+       return nfserr;
+}
+
 /*
  * Lookup file info. If it doesn't exist, create a file info struct
  * and open a (VFS) file for the given inode.
- *
- * FIXME:
- * Note that we open the file O_RDONLY even when creating write locks.
- * This is not quite right, but for now, we assume the client performs
- * the proper R/W checking.
  */
 __be32
 nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
-                                       struct nfs_fh *f)
+                                       struct nlm_lock *lock)
 {
        struct nlm_file *file;
        unsigned int    hash;
        __be32          nfserr;
+       int             mode;
 
-       nlm_debug_print_fh("nlm_lookup_file", f);
+       nlm_debug_print_fh("nlm_lookup_file", &lock->fh);
 
-       hash = file_hash(f);
+       hash = file_hash(&lock->fh);
+       mode = lock_to_openmode(&lock->fl);
 
        /* Lock file table */
        mutex_lock(&nlm_file_mutex);
 
        hlist_for_each_entry(file, &nlm_files[hash], f_list)
-               if (!nfs_compare_fh(&file->f_handle, f))
+               if (!nfs_compare_fh(&file->f_handle, &lock->fh)) {
+                       mutex_lock(&file->f_mutex);
+                       nfserr = nlm_do_fopen(rqstp, file, mode);
+                       mutex_unlock(&file->f_mutex);
                        goto found;
-
-       nlm_debug_print_fh("creating file for", f);
+               }
+       nlm_debug_print_fh("creating file for", &lock->fh);
 
        nfserr = nlm_lck_denied_nolocks;
        file = kzalloc(sizeof(*file), GFP_KERNEL);
        if (!file)
-               goto out_unlock;
+               goto out_free;
 
-       memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
+       memcpy(&file->f_handle, &lock->fh, sizeof(struct nfs_fh));
        mutex_init(&file->f_mutex);
        INIT_HLIST_NODE(&file->f_list);
        INIT_LIST_HEAD(&file->f_blocks);
 
-       /* Open the file. Note that this must not sleep for too long, else
-        * we would lock up lockd:-) So no NFS re-exports, folks.
-        *
-        * We have to make sure we have the right credential to open
-        * the file.
-        */
-       if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) {
-               dprintk("lockd: open failed (error %d)\n", nfserr);
-               goto out_free;
-       }
+       nfserr = nlm_do_fopen(rqstp, file, mode);
+       if (nfserr)
+               goto out_unlock;
 
        hlist_add_head(&file->f_list, &nlm_files[hash]);
 
@@ -128,7 +147,6 @@ found:
        dprintk("lockd: found file %p (count %d)\n", file, file->f_count);
        *result = file;
        file->f_count++;
-       nfserr = 0;
 
 out_unlock:
        mutex_unlock(&nlm_file_mutex);
@@ -148,13 +166,34 @@ nlm_delete_file(struct nlm_file *file)
        nlm_debug_print_file("closing file", file);
        if (!hlist_unhashed(&file->f_list)) {
                hlist_del(&file->f_list);
-               nlmsvc_ops->fclose(file->f_file);
+               if (file->f_file[O_RDONLY])
+                       nlmsvc_ops->fclose(file->f_file[O_RDONLY]);
+               if (file->f_file[O_WRONLY])
+                       nlmsvc_ops->fclose(file->f_file[O_WRONLY]);
                kfree(file);
        } else {
                printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
        }
 }
 
+static int nlm_unlock_files(struct nlm_file *file)
+{
+       struct file_lock lock;
+       struct file *f;
+
+       lock.fl_type  = F_UNLCK;
+       lock.fl_start = 0;
+       lock.fl_end   = OFFSET_MAX;
+       for (f = file->f_file[0]; f <= file->f_file[1]; f++) {
+               if (f && vfs_lock_file(f, F_SETLK, &lock, NULL) < 0) {
+                       pr_warn("lockd: unlock failure in %s:%d\n",
+                               __FILE__, __LINE__);
+                       return 1;
+               }
+       }
+       return 0;
+}
+
 /*
  * Loop over all locks on the given file and perform the specified
  * action.
@@ -182,17 +221,10 @@ again:
 
                lockhost = ((struct nlm_lockowner *)fl->fl_owner)->host;
                if (match(lockhost, host)) {
-                       struct file_lock lock = *fl;
 
                        spin_unlock(&flctx->flc_lock);
-                       lock.fl_type  = F_UNLCK;
-                       lock.fl_start = 0;
-                       lock.fl_end   = OFFSET_MAX;
-                       if (vfs_lock_file(file->f_file, F_SETLK, &lock, NULL) < 0) {
-                               printk("lockd: unlock failure in %s:%d\n",
-                                               __FILE__, __LINE__);
+                       if (nlm_unlock_files(file))
                                return 1;
-                       }
                        goto again;
                }
        }
@@ -246,6 +278,15 @@ nlm_file_inuse(struct nlm_file *file)
        return 0;
 }
 
+static void nlm_close_files(struct nlm_file *file)
+{
+       struct file *f;
+
+       for (f = file->f_file[0]; f <= file->f_file[1]; f++)
+               if (f)
+                       nlmsvc_ops->fclose(f);
+}
+
 /*
  * Loop over all files in the file table.
  */
@@ -276,7 +317,7 @@ nlm_traverse_files(void *data, nlm_host_match_fn_t match,
                        if (list_empty(&file->f_blocks) && !file->f_locks
                         && !file->f_shares && !file->f_count) {
                                hlist_del(&file->f_list);
-                               nlmsvc_ops->fclose(file->f_file);
+                               nlm_close_files(file);
                                kfree(file);
                        }
                }
@@ -410,12 +451,13 @@ nlmsvc_invalidate_all(void)
        nlm_traverse_files(NULL, nlmsvc_is_client, NULL);
 }
 
+
 static int
 nlmsvc_match_sb(void *datap, struct nlm_file *file)
 {
        struct super_block *sb = datap;
 
-       return sb == locks_inode(file->f_file)->i_sb;
+       return sb == nlmsvc_file_inode(file)->i_sb;
 }
 
 /**
index 74b2a1d..3d6fb4a 100644 (file)
@@ -1397,103 +1397,6 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
        return error;
 }
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-/**
- * locks_mandatory_locked - Check for an active lock
- * @file: the file to check
- *
- * Searches the inode's list of locks to find any POSIX locks which conflict.
- * This function is called from locks_verify_locked() only.
- */
-int locks_mandatory_locked(struct file *file)
-{
-       int ret;
-       struct inode *inode = locks_inode(file);
-       struct file_lock_context *ctx;
-       struct file_lock *fl;
-
-       ctx = smp_load_acquire(&inode->i_flctx);
-       if (!ctx || list_empty_careful(&ctx->flc_posix))
-               return 0;
-
-       /*
-        * Search the lock list for this inode for any POSIX locks.
-        */
-       spin_lock(&ctx->flc_lock);
-       ret = 0;
-       list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
-               if (fl->fl_owner != current->files &&
-                   fl->fl_owner != file) {
-                       ret = -EAGAIN;
-                       break;
-               }
-       }
-       spin_unlock(&ctx->flc_lock);
-       return ret;
-}
-
-/**
- * locks_mandatory_area - Check for a conflicting lock
- * @inode:     the file to check
- * @filp:       how the file was opened (if it was)
- * @start:     first byte in the file to check
- * @end:       lastbyte in the file to check
- * @type:      %F_WRLCK for a write lock, else %F_RDLCK
- *
- * Searches the inode's list of locks to find any POSIX locks which conflict.
- */
-int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
-                        loff_t end, unsigned char type)
-{
-       struct file_lock fl;
-       int error;
-       bool sleep = false;
-
-       locks_init_lock(&fl);
-       fl.fl_pid = current->tgid;
-       fl.fl_file = filp;
-       fl.fl_flags = FL_POSIX | FL_ACCESS;
-       if (filp && !(filp->f_flags & O_NONBLOCK))
-               sleep = true;
-       fl.fl_type = type;
-       fl.fl_start = start;
-       fl.fl_end = end;
-
-       for (;;) {
-               if (filp) {
-                       fl.fl_owner = filp;
-                       fl.fl_flags &= ~FL_SLEEP;
-                       error = posix_lock_inode(inode, &fl, NULL);
-                       if (!error)
-                               break;
-               }
-
-               if (sleep)
-                       fl.fl_flags |= FL_SLEEP;
-               fl.fl_owner = current->files;
-               error = posix_lock_inode(inode, &fl, NULL);
-               if (error != FILE_LOCK_DEFERRED)
-                       break;
-               error = wait_event_interruptible(fl.fl_wait,
-                                       list_empty(&fl.fl_blocked_member));
-               if (!error) {
-                       /*
-                        * If we've been sleeping someone might have
-                        * changed the permissions behind our back.
-                        */
-                       if (__mandatory_lock(inode))
-                               continue;
-               }
-
-               break;
-       }
-       locks_delete_block(&fl);
-
-       return error;
-}
-EXPORT_SYMBOL(locks_mandatory_area);
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
 static void lease_clear_pending(struct file_lock *fl, int arg)
 {
        switch (arg) {
@@ -2486,14 +2389,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
        if (file_lock == NULL)
                return -ENOLCK;
 
-       /* Don't allow mandatory locks on files that may be memory mapped
-        * and shared.
-        */
-       if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
-               error = -EAGAIN;
-               goto out;
-       }
-
        error = flock_to_posix_lock(filp, file_lock, flock);
        if (error)
                goto out;
@@ -2611,21 +2506,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
                struct flock64 *flock)
 {
        struct file_lock *file_lock = locks_alloc_lock();
-       struct inode *inode = locks_inode(filp);
        struct file *f;
        int error;
 
        if (file_lock == NULL)
                return -ENOLCK;
 
-       /* Don't allow mandatory locks on files that may be memory mapped
-        * and shared.
-        */
-       if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
-               error = -EAGAIN;
-               goto out;
-       }
-
        error = flock64_to_posix_lock(filp, file_lock, flock);
        if (error)
                goto out;
@@ -2857,8 +2743,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
                        seq_puts(f, "POSIX ");
 
                seq_printf(f, " %s ",
-                            (inode == NULL) ? "*NOINODE*" :
-                            mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
+                            (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
        } else if (IS_FLOCK(fl)) {
                if (fl->fl_type & LOCK_MAND) {
                        seq_puts(f, "FLOCK  MSNFS     ");
index bf6d8a7..d049d39 100644 (file)
@@ -203,6 +203,14 @@ getname_flags(const char __user *filename, int flags, int *empty)
        return result;
 }
 
+struct filename *
+getname_uflags(const char __user *filename, int uflags)
+{
+       int flags = (uflags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
+
+       return getname_flags(filename, flags, NULL);
+}
+
 struct filename *
 getname(const char __user * filename)
 {
@@ -247,6 +255,9 @@ getname_kernel(const char * filename)
 
 void putname(struct filename *name)
 {
+       if (IS_ERR_OR_NULL(name))
+               return;
+
        BUG_ON(name->refcnt <= 0);
 
        if (--name->refcnt > 0)
@@ -2456,7 +2467,7 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
        return err;
 }
 
-int filename_lookup(int dfd, struct filename *name, unsigned flags,
+static int __filename_lookup(int dfd, struct filename *name, unsigned flags,
                    struct path *path, struct path *root)
 {
        int retval;
@@ -2474,6 +2485,14 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
                audit_inode(name, path->dentry,
                            flags & LOOKUP_MOUNTPOINT ? AUDIT_INODE_NOEVAL : 0);
        restore_nameidata();
+       return retval;
+}
+
+int filename_lookup(int dfd, struct filename *name, unsigned flags,
+                   struct path *path, struct path *root)
+{
+       int retval = __filename_lookup(dfd, name, flags, path, root);
+
        putname(name);
        return retval;
 }
@@ -2495,7 +2514,7 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
        return err;
 }
 
-static struct filename *filename_parentat(int dfd, struct filename *name,
+static int __filename_parentat(int dfd, struct filename *name,
                                unsigned int flags, struct path *parent,
                                struct qstr *last, int *type)
 {
@@ -2503,7 +2522,7 @@ static struct filename *filename_parentat(int dfd, struct filename *name,
        struct nameidata nd;
 
        if (IS_ERR(name))
-               return name;
+               return PTR_ERR(name);
        set_nameidata(&nd, dfd, name, NULL);
        retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
        if (unlikely(retval == -ECHILD))
@@ -2514,29 +2533,34 @@ static struct filename *filename_parentat(int dfd, struct filename *name,
                *last = nd.last;
                *type = nd.last_type;
                audit_inode(name, parent->dentry, AUDIT_INODE_PARENT);
-       } else {
-               putname(name);
-               name = ERR_PTR(retval);
        }
        restore_nameidata();
-       return name;
+       return retval;
+}
+
+static int filename_parentat(int dfd, struct filename *name,
+                               unsigned int flags, struct path *parent,
+                               struct qstr *last, int *type)
+{
+       int retval = __filename_parentat(dfd, name, flags, parent, last, type);
+
+       putname(name);
+       return retval;
 }
 
 /* does lookup, returns the object with parent locked */
 struct dentry *kern_path_locked(const char *name, struct path *path)
 {
-       struct filename *filename;
        struct dentry *d;
        struct qstr last;
-       int type;
+       int type, error;
 
-       filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
+       error = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
                                    &last, &type);
-       if (IS_ERR(filename))
-               return ERR_CAST(filename);
+       if (error)
+               return ERR_PTR(error);
        if (unlikely(type != LAST_NORM)) {
                path_put(path);
-               putname(filename);
                return ERR_PTR(-EINVAL);
        }
        inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
@@ -2545,7 +2569,6 @@ struct dentry *kern_path_locked(const char *name, struct path *path)
                inode_unlock(path->dentry->d_inode);
                path_put(path);
        }
-       putname(filename);
        return d;
 }
 
@@ -2575,8 +2598,9 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
 }
 EXPORT_SYMBOL(vfs_path_lookup);
 
-static int lookup_one_len_common(const char *name, struct dentry *base,
-                                int len, struct qstr *this)
+static int lookup_one_common(struct user_namespace *mnt_userns,
+                            const char *name, struct dentry *base, int len,
+                            struct qstr *this)
 {
        this->name = name;
        this->len = len;
@@ -2604,7 +2628,7 @@ static int lookup_one_len_common(const char *name, struct dentry *base,
                        return err;
        }
 
-       return inode_permission(&init_user_ns, base->d_inode, MAY_EXEC);
+       return inode_permission(mnt_userns, base->d_inode, MAY_EXEC);
 }
 
 /**
@@ -2628,7 +2652,7 @@ struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len
 
        WARN_ON_ONCE(!inode_is_locked(base->d_inode));
 
-       err = lookup_one_len_common(name, base, len, &this);
+       err = lookup_one_common(&init_user_ns, name, base, len, &this);
        if (err)
                return ERR_PTR(err);
 
@@ -2655,7 +2679,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 
        WARN_ON_ONCE(!inode_is_locked(base->d_inode));
 
-       err = lookup_one_len_common(name, base, len, &this);
+       err = lookup_one_common(&init_user_ns, name, base, len, &this);
        if (err)
                return ERR_PTR(err);
 
@@ -2664,6 +2688,36 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 }
 EXPORT_SYMBOL(lookup_one_len);
 
+/**
+ * lookup_one - filesystem helper to lookup single pathname component
+ * @mnt_userns:        user namespace of the mount the lookup is performed from
+ * @name:      pathname component to lookup
+ * @base:      base directory to lookup from
+ * @len:       maximum length @len should be interpreted to
+ *
+ * Note that this routine is purely a helper for filesystem usage and should
+ * not be called by generic code.
+ *
+ * The caller must hold base->i_mutex.
+ */
+struct dentry *lookup_one(struct user_namespace *mnt_userns, const char *name,
+                         struct dentry *base, int len)
+{
+       struct dentry *dentry;
+       struct qstr this;
+       int err;
+
+       WARN_ON_ONCE(!inode_is_locked(base->d_inode));
+
+       err = lookup_one_common(mnt_userns, name, base, len, &this);
+       if (err)
+               return ERR_PTR(err);
+
+       dentry = lookup_dcache(&this, base, 0);
+       return dentry ? dentry : __lookup_slow(&this, base, 0);
+}
+EXPORT_SYMBOL(lookup_one);
+
 /**
  * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
  * @name:      pathname component to lookup
@@ -2683,7 +2737,7 @@ struct dentry *lookup_one_len_unlocked(const char *name,
        int err;
        struct dentry *ret;
 
-       err = lookup_one_len_common(name, base, len, &this);
+       err = lookup_one_common(&init_user_ns, name, base, len, &this);
        if (err)
                return ERR_PTR(err);
 
@@ -3023,9 +3077,7 @@ static int handle_truncate(struct user_namespace *mnt_userns, struct file *filp)
        /*
         * Refuse to truncate files with mandatory locks held on them.
         */
-       error = locks_verify_locked(filp);
-       if (!error)
-               error = security_path_truncate(path);
+       error = security_path_truncate(path);
        if (!error) {
                error = do_truncate(mnt_userns, path->dentry, 0,
                                    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -3566,7 +3618,7 @@ struct file *do_file_open_root(const struct path *root,
        return file;
 }
 
-static struct dentry *filename_create(int dfd, struct filename *name,
+static struct dentry *__filename_create(int dfd, struct filename *name,
                                struct path *path, unsigned int lookup_flags)
 {
        struct dentry *dentry = ERR_PTR(-EEXIST);
@@ -3582,9 +3634,9 @@ static struct dentry *filename_create(int dfd, struct filename *name,
         */
        lookup_flags &= LOOKUP_REVAL;
 
-       name = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
-       if (IS_ERR(name))
-               return ERR_CAST(name);
+       error = __filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+       if (error)
+               return ERR_PTR(error);
 
        /*
         * Yucky last component or no last component at all?
@@ -3622,7 +3674,6 @@ static struct dentry *filename_create(int dfd, struct filename *name,
                error = err2;
                goto fail;
        }
-       putname(name);
        return dentry;
 fail:
        dput(dentry);
@@ -3633,10 +3684,18 @@ unlock:
                mnt_drop_write(path->mnt);
 out:
        path_put(path);
-       putname(name);
        return dentry;
 }
 
+static inline struct dentry *filename_create(int dfd, struct filename *name,
+                               struct path *path, unsigned int lookup_flags)
+{
+       struct dentry *res = __filename_create(dfd, name, path, lookup_flags);
+
+       putname(name);
+       return res;
+}
+
 struct dentry *kern_path_create(int dfd, const char *pathname,
                                struct path *path, unsigned int lookup_flags)
 {
@@ -3725,7 +3784,7 @@ static int may_mknod(umode_t mode)
        }
 }
 
-static long do_mknodat(int dfd, const char __user *filename, umode_t mode,
+static int do_mknodat(int dfd, struct filename *name, umode_t mode,
                unsigned int dev)
 {
        struct user_namespace *mnt_userns;
@@ -3736,17 +3795,18 @@ static long do_mknodat(int dfd, const char __user *filename, umode_t mode,
 
        error = may_mknod(mode);
        if (error)
-               return error;
+               goto out1;
 retry:
-       dentry = user_path_create(dfd, filename, &path, lookup_flags);
+       dentry = __filename_create(dfd, name, &path, lookup_flags);
+       error = PTR_ERR(dentry);
        if (IS_ERR(dentry))
-               return PTR_ERR(dentry);
+               goto out1;
 
        if (!IS_POSIXACL(path.dentry->d_inode))
                mode &= ~current_umask();
        error = security_path_mknod(&path, dentry, mode, dev);
        if (error)
-               goto out;
+               goto out2;
 
        mnt_userns = mnt_user_ns(path.mnt);
        switch (mode & S_IFMT) {
@@ -3765,24 +3825,26 @@ retry:
                                          dentry, mode, 0);
                        break;
        }
-out:
+out2:
        done_path_create(&path, dentry);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
                goto retry;
        }
+out1:
+       putname(name);
        return error;
 }
 
 SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
                unsigned int, dev)
 {
-       return do_mknodat(dfd, filename, mode, dev);
+       return do_mknodat(dfd, getname(filename), mode, dev);
 }
 
 SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
 {
-       return do_mknodat(AT_FDCWD, filename, mode, dev);
+       return do_mknodat(AT_FDCWD, getname(filename), mode, dev);
 }
 
 /**
@@ -3827,7 +3889,7 @@ int vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
 }
 EXPORT_SYMBOL(vfs_mkdir);
 
-static long do_mkdirat(int dfd, const char __user *pathname, umode_t mode)
+int do_mkdirat(int dfd, struct filename *name, umode_t mode)
 {
        struct dentry *dentry;
        struct path path;
@@ -3835,9 +3897,10 @@ static long do_mkdirat(int dfd, const char __user *pathname, umode_t mode)
        unsigned int lookup_flags = LOOKUP_DIRECTORY;
 
 retry:
-       dentry = user_path_create(dfd, pathname, &path, lookup_flags);
+       dentry = __filename_create(dfd, name, &path, lookup_flags);
+       error = PTR_ERR(dentry);
        if (IS_ERR(dentry))
-               return PTR_ERR(dentry);
+               goto out_putname;
 
        if (!IS_POSIXACL(path.dentry->d_inode))
                mode &= ~current_umask();
@@ -3853,17 +3916,19 @@ retry:
                lookup_flags |= LOOKUP_REVAL;
                goto retry;
        }
+out_putname:
+       putname(name);
        return error;
 }
 
 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
 {
-       return do_mkdirat(dfd, pathname, mode);
+       return do_mkdirat(dfd, getname(pathname), mode);
 }
 
 SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
 {
-       return do_mkdirat(AT_FDCWD, pathname, mode);
+       return do_mkdirat(AT_FDCWD, getname(pathname), mode);
 }
 
 /**
@@ -3921,62 +3986,62 @@ out:
 }
 EXPORT_SYMBOL(vfs_rmdir);
 
-long do_rmdir(int dfd, struct filename *name)
+int do_rmdir(int dfd, struct filename *name)
 {
        struct user_namespace *mnt_userns;
-       int error = 0;
+       int error;
        struct dentry *dentry;
        struct path path;
        struct qstr last;
        int type;
        unsigned int lookup_flags = 0;
 retry:
-       name = filename_parentat(dfd, name, lookup_flags,
-                               &path, &last, &type);
-       if (IS_ERR(name))
-               return PTR_ERR(name);
+       error = __filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
+       if (error)
+               goto exit1;
 
        switch (type) {
        case LAST_DOTDOT:
                error = -ENOTEMPTY;
-               goto exit1;
+               goto exit2;
        case LAST_DOT:
                error = -EINVAL;
-               goto exit1;
+               goto exit2;
        case LAST_ROOT:
                error = -EBUSY;
-               goto exit1;
+               goto exit2;
        }
 
        error = mnt_want_write(path.mnt);
        if (error)
-               goto exit1;
+               goto exit2;
 
        inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
        dentry = __lookup_hash(&last, path.dentry, lookup_flags);
        error = PTR_ERR(dentry);
        if (IS_ERR(dentry))
-               goto exit2;
+               goto exit3;
        if (!dentry->d_inode) {
                error = -ENOENT;
-               goto exit3;
+               goto exit4;
        }
        error = security_path_rmdir(&path, dentry);
        if (error)
-               goto exit3;
+               goto exit4;
        mnt_userns = mnt_user_ns(path.mnt);
        error = vfs_rmdir(mnt_userns, path.dentry->d_inode, dentry);
-exit3:
+exit4:
        dput(dentry);
-exit2:
+exit3:
        inode_unlock(path.dentry->d_inode);
        mnt_drop_write(path.mnt);
-exit1:
+exit2:
        path_put(&path);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
                goto retry;
        }
+exit1:
        putname(name);
        return error;
 }
@@ -4059,7 +4124,7 @@ EXPORT_SYMBOL(vfs_unlink);
  * writeout happening, and we don't want to prevent access to the directory
  * while waiting on the I/O.
  */
-long do_unlinkat(int dfd, struct filename *name)
+int do_unlinkat(int dfd, struct filename *name)
 {
        int error;
        struct dentry *dentry;
@@ -4070,17 +4135,17 @@ long do_unlinkat(int dfd, struct filename *name)
        struct inode *delegated_inode = NULL;
        unsigned int lookup_flags = 0;
 retry:
-       name = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
-       if (IS_ERR(name))
-               return PTR_ERR(name);
+       error = __filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
+       if (error)
+               goto exit1;
 
        error = -EISDIR;
        if (type != LAST_NORM)
-               goto exit1;
+               goto exit2;
 
        error = mnt_want_write(path.mnt);
        if (error)
-               goto exit1;
+               goto exit2;
 retry_deleg:
        inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
        dentry = __lookup_hash(&last, path.dentry, lookup_flags);
@@ -4097,11 +4162,11 @@ retry_deleg:
                ihold(inode);
                error = security_path_unlink(&path, dentry);
                if (error)
-                       goto exit2;
+                       goto exit3;
                mnt_userns = mnt_user_ns(path.mnt);
                error = vfs_unlink(mnt_userns, path.dentry->d_inode, dentry,
                                   &delegated_inode);
-exit2:
+exit3:
                dput(dentry);
        }
        inode_unlock(path.dentry->d_inode);
@@ -4114,13 +4179,14 @@ exit2:
                        goto retry_deleg;
        }
        mnt_drop_write(path.mnt);
-exit1:
+exit2:
        path_put(&path);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
                inode = NULL;
                goto retry;
        }
+exit1:
        putname(name);
        return error;
 
@@ -4131,7 +4197,7 @@ slashes:
                error = -EISDIR;
        else
                error = -ENOTDIR;
-       goto exit2;
+       goto exit3;
 }
 
 SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
@@ -4186,23 +4252,22 @@ int vfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
 }
 EXPORT_SYMBOL(vfs_symlink);
 
-static long do_symlinkat(const char __user *oldname, int newdfd,
-                 const char __user *newname)
+int do_symlinkat(struct filename *from, int newdfd, struct filename *to)
 {
        int error;
-       struct filename *from;
        struct dentry *dentry;
        struct path path;
        unsigned int lookup_flags = 0;
 
-       from = getname(oldname);
-       if (IS_ERR(from))
-               return PTR_ERR(from);
+       if (IS_ERR(from)) {
+               error = PTR_ERR(from);
+               goto out_putnames;
+       }
 retry:
-       dentry = user_path_create(newdfd, newname, &path, lookup_flags);
+       dentry = __filename_create(newdfd, to, &path, lookup_flags);
        error = PTR_ERR(dentry);
        if (IS_ERR(dentry))
-               goto out_putname;
+               goto out_putnames;
 
        error = security_path_symlink(&path, dentry, from->name);
        if (!error) {
@@ -4217,7 +4282,8 @@ retry:
                lookup_flags |= LOOKUP_REVAL;
                goto retry;
        }
-out_putname:
+out_putnames:
+       putname(to);
        putname(from);
        return error;
 }
@@ -4225,12 +4291,12 @@ out_putname:
 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
                int, newdfd, const char __user *, newname)
 {
-       return do_symlinkat(oldname, newdfd, newname);
+       return do_symlinkat(getname(oldname), newdfd, getname(newname));
 }
 
 SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
 {
-       return do_symlinkat(oldname, AT_FDCWD, newname);
+       return do_symlinkat(getname(oldname), AT_FDCWD, getname(newname));
 }
 
 /**
@@ -4331,8 +4397,8 @@ EXPORT_SYMBOL(vfs_link);
  * with linux 2.0, and to avoid hard-linking to directories
  * and other special files.  --ADM
  */
-static int do_linkat(int olddfd, const char __user *oldname, int newdfd,
-             const char __user *newname, int flags)
+int do_linkat(int olddfd, struct filename *old, int newdfd,
+             struct filename *new, int flags)
 {
        struct user_namespace *mnt_userns;
        struct dentry *new_dentry;
@@ -4341,31 +4407,32 @@ static int do_linkat(int olddfd, const char __user *oldname, int newdfd,
        int how = 0;
        int error;
 
-       if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
-               return -EINVAL;
+       if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) {
+               error = -EINVAL;
+               goto out_putnames;
+       }
        /*
         * To use null names we require CAP_DAC_READ_SEARCH
         * This ensures that not everyone will be able to create
         * handlink using the passed filedescriptor.
         */
-       if (flags & AT_EMPTY_PATH) {
-               if (!capable(CAP_DAC_READ_SEARCH))
-                       return -ENOENT;
-               how = LOOKUP_EMPTY;
+       if (flags & AT_EMPTY_PATH && !capable(CAP_DAC_READ_SEARCH)) {
+               error = -ENOENT;
+               goto out_putnames;
        }
 
        if (flags & AT_SYMLINK_FOLLOW)
                how |= LOOKUP_FOLLOW;
 retry:
-       error = user_path_at(olddfd, oldname, how, &old_path);
+       error = __filename_lookup(olddfd, old, how, &old_path, NULL);
        if (error)
-               return error;
+               goto out_putnames;
 
-       new_dentry = user_path_create(newdfd, newname, &new_path,
+       new_dentry = __filename_create(newdfd, new, &new_path,
                                        (how & LOOKUP_REVAL));
        error = PTR_ERR(new_dentry);
        if (IS_ERR(new_dentry))
-               goto out;
+               goto out_putpath;
 
        error = -EXDEV;
        if (old_path.mnt != new_path.mnt)
@@ -4393,8 +4460,11 @@ out_dput:
                how |= LOOKUP_REVAL;
                goto retry;
        }
-out:
+out_putpath:
        path_put(&old_path);
+out_putnames:
+       putname(old);
+       putname(new);
 
        return error;
 }
@@ -4402,12 +4472,13 @@ out:
 SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
                int, newdfd, const char __user *, newname, int, flags)
 {
-       return do_linkat(olddfd, oldname, newdfd, newname, flags);
+       return do_linkat(olddfd, getname_uflags(oldname, flags),
+               newdfd, getname(newname), flags);
 }
 
 SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
 {
-       return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
+       return do_linkat(AT_FDCWD, getname(oldname), AT_FDCWD, getname(newname), 0);
 }
 
 /**
@@ -4602,29 +4673,25 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
        int error = -EINVAL;
 
        if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
-               goto put_both;
+               goto put_names;
 
        if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
            (flags & RENAME_EXCHANGE))
-               goto put_both;
+               goto put_names;
 
        if (flags & RENAME_EXCHANGE)
                target_flags = 0;
 
 retry:
-       from = filename_parentat(olddfd, from, lookup_flags, &old_path,
+       error = __filename_parentat(olddfd, from, lookup_flags, &old_path,
                                        &old_last, &old_type);
-       if (IS_ERR(from)) {
-               error = PTR_ERR(from);
-               goto put_new;
-       }
+       if (error)
+               goto put_names;
 
-       to = filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last,
+       error = __filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last,
                                &new_type);
-       if (IS_ERR(to)) {
-               error = PTR_ERR(to);
+       if (error)
                goto exit1;
-       }
 
        error = -EXDEV;
        if (old_path.mnt != new_path.mnt)
@@ -4727,12 +4794,9 @@ exit1:
                lookup_flags |= LOOKUP_REVAL;
                goto retry;
        }
-put_both:
-       if (!IS_ERR(from))
-               putname(from);
-put_new:
-       if (!IS_ERR(to))
-               putname(to);
+put_names:
+       putname(from);
+       putname(to);
        return error;
 }
 
index ab4174a..1285236 100644 (file)
@@ -1715,18 +1715,14 @@ static inline bool may_mount(void)
        return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
 }
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-static inline bool may_mandlock(void)
+static void warn_mandlock(void)
 {
-       return capable(CAP_SYS_ADMIN);
+       pr_warn_once("=======================================================\n"
+                    "WARNING: The mand mount option has been deprecated and\n"
+                    "         and is ignored by this kernel. Remove the mand\n"
+                    "         option from the mount to silence this warning.\n"
+                    "=======================================================\n");
 }
-#else
-static inline bool may_mandlock(void)
-{
-       pr_warn("VFS: \"mand\" mount option not supported");
-       return false;
-}
-#endif
 
 static int can_umount(const struct path *path, int flags)
 {
@@ -1938,6 +1934,20 @@ void drop_collected_mounts(struct vfsmount *mnt)
        namespace_unlock();
 }
 
+static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+       struct mount *child;
+
+       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+               if (!is_subdir(child->mnt_mountpoint, dentry))
+                       continue;
+
+               if (child->mnt.mnt_flags & MNT_LOCKED)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * clone_private_mount - create a private clone of a path
  * @path: path to clone
@@ -1953,10 +1963,19 @@ struct vfsmount *clone_private_mount(const struct path *path)
        struct mount *old_mnt = real_mount(path->mnt);
        struct mount *new_mnt;
 
+       down_read(&namespace_sem);
        if (IS_MNT_UNBINDABLE(old_mnt))
-               return ERR_PTR(-EINVAL);
+               goto invalid;
+
+       if (!check_mnt(old_mnt))
+               goto invalid;
+
+       if (has_locked_children(old_mnt, path->dentry))
+               goto invalid;
 
        new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+       up_read(&namespace_sem);
+
        if (IS_ERR(new_mnt))
                return ERR_CAST(new_mnt);
 
@@ -1964,6 +1983,10 @@ struct vfsmount *clone_private_mount(const struct path *path)
        new_mnt->mnt_ns = MNT_NS_INTERNAL;
 
        return &new_mnt->mnt;
+
+invalid:
+       up_read(&namespace_sem);
+       return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL_GPL(clone_private_mount);
 
@@ -2315,19 +2338,6 @@ static int do_change_type(struct path *path, int ms_flags)
        return err;
 }
 
-static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
-{
-       struct mount *child;
-       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
-               if (!is_subdir(child->mnt_mountpoint, dentry))
-                       continue;
-
-               if (child->mnt.mnt_flags & MNT_LOCKED)
-                       return true;
-       }
-       return false;
-}
-
 static struct mount *__do_loopback(struct path *old_path, int recurse)
 {
        struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
@@ -2684,6 +2694,78 @@ out:
        return ret;
 }
 
+static int do_set_group(struct path *from_path, struct path *to_path)
+{
+       struct mount *from, *to;
+       int err;
+
+       from = real_mount(from_path->mnt);
+       to = real_mount(to_path->mnt);
+
+       namespace_lock();
+
+       err = -EINVAL;
+       /* To and From must be mounted */
+       if (!is_mounted(&from->mnt))
+               goto out;
+       if (!is_mounted(&to->mnt))
+               goto out;
+
+       err = -EPERM;
+       /* We should be allowed to modify mount namespaces of both mounts */
+       if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
+               goto out;
+       if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
+               goto out;
+
+       err = -EINVAL;
+       /* To and From paths should be mount roots */
+       if (from_path->dentry != from_path->mnt->mnt_root)
+               goto out;
+       if (to_path->dentry != to_path->mnt->mnt_root)
+               goto out;
+
+       /* Setting sharing groups is only allowed across same superblock */
+       if (from->mnt.mnt_sb != to->mnt.mnt_sb)
+               goto out;
+
+       /* From mount root should be wider than To mount root */
+       if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
+               goto out;
+
+       /* From mount should not have locked children in place of To's root */
+       if (has_locked_children(from, to->mnt.mnt_root))
+               goto out;
+
+       /* Setting sharing groups is only allowed on private mounts */
+       if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
+               goto out;
+
+       /* From should not be private */
+       if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
+               goto out;
+
+       if (IS_MNT_SLAVE(from)) {
+               struct mount *m = from->mnt_master;
+
+               list_add(&to->mnt_slave, &m->mnt_slave_list);
+               to->mnt_master = m;
+       }
+
+       if (IS_MNT_SHARED(from)) {
+               to->mnt_group_id = from->mnt_group_id;
+               list_add(&to->mnt_share, &from->mnt_share);
+               lock_mount_hash();
+               set_mnt_shared(to);
+               unlock_mount_hash();
+       }
+
+       err = 0;
+out:
+       namespace_unlock();
+       return err;
+}
+
 static int do_move_mount(struct path *old_path, struct path *new_path)
 {
        struct mnt_namespace *ns;
@@ -3179,8 +3261,8 @@ int path_mount(const char *dev_name, struct path *path,
                return ret;
        if (!may_mount())
                return -EPERM;
-       if ((flags & SB_MANDLOCK) && !may_mandlock())
-               return -EPERM;
+       if (flags & SB_MANDLOCK)
+               warn_mandlock();
 
        /* Default to relatime unless overriden */
        if (!(flags & MS_NOATIME))
@@ -3563,9 +3645,8 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
        if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
                goto err_unlock;
 
-       ret = -EPERM;
-       if ((fc->sb_flags & SB_MANDLOCK) && !may_mandlock())
-               goto err_unlock;
+       if (fc->sb_flags & SB_MANDLOCK)
+               warn_mandlock();
 
        newmount.mnt = vfs_create_mount(fc);
        if (IS_ERR(newmount.mnt)) {
@@ -3669,7 +3750,10 @@ SYSCALL_DEFINE5(move_mount,
        if (ret < 0)
                goto out_to;
 
-       ret = do_move_mount(&from_path, &to_path);
+       if (flags & MOVE_MOUNT_SET_GROUP)
+               ret = do_set_group(&from_path, &to_path);
+       else
+               ret = do_move_mount(&from_path, &to_path);
 
 out_to:
        path_put(&to_path);
index 37a1a88..d772c20 100644 (file)
@@ -180,5 +180,5 @@ const struct export_operations nfs_export_ops = {
        .fetch_iversion = nfs_fetch_iversion,
        .flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
                EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
-               EXPORT_OP_NOATOMIC_ATTR,
+               EXPORT_OP_NOATOMIC_ATTR|EXPORT_OP_SYNC_LOCKS,
 };
index 1fef107..aa353fd 100644 (file)
@@ -806,9 +806,8 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        nfs_inc_stats(inode, NFSIOS_VFSLOCK);
 
-       /* No mandatory locks over NFS */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
+       if (fl->fl_flags & FL_RECLAIM)
+               return -ENOGRACE;
 
        if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
                is_local = 1;
index 3f5b3d7..606fa15 100644 (file)
  * Note: we hold the dentry use count while the file is open.
  */
 static __be32
-nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
+nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
+               int mode)
 {
        __be32          nfserr;
+       int             access;
        struct svc_fh   fh;
 
        /* must initialize before using! but maxsize doesn't matter */
@@ -36,7 +38,9 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
        memcpy((char*)&fh.fh_handle.fh_base, f->data, f->size);
        fh.fh_export = NULL;
 
-       nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
+       access = (mode == O_WRONLY) ? NFSD_MAY_WRITE : NFSD_MAY_READ;
+       access |= NFSD_MAY_LOCK;
+       nfserr = nfsd_open(rqstp, &fh, S_IFREG, access, filp);
        fh_put(&fh);
        /* We return nlm error codes as nlm doesn't know
         * about nfsd, but nfsd does know about nlm..
index fa67ecd..4235641 100644 (file)
@@ -2687,9 +2687,9 @@ static void force_expire_client(struct nfs4_client *clp)
 
        trace_nfsd_clid_admin_expired(&clp->cl_clientid);
 
-       spin_lock(&clp->cl_lock);
+       spin_lock(&nn->client_lock);
        clp->cl_time = 0;
-       spin_unlock(&clp->cl_lock);
+       spin_unlock(&nn->client_lock);
 
        wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
        spin_lock(&nn->client_lock);
@@ -5735,16 +5735,6 @@ check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid,
                                NFS4_SHARE_DENY_READ);
 }
 
-/*
- * Allow READ/WRITE during grace period on recovered state only for files
- * that are not able to provide mandatory locking.
- */
-static inline int
-grace_disallows_io(struct net *net, struct inode *inode)
-{
-       return opens_in_grace(net) && mandatory_lock(inode);
-}
-
 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
 {
        /*
@@ -6026,7 +6016,6 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                stateid_t *stateid, int flags, struct nfsd_file **nfp,
                struct nfs4_stid **cstid)
 {
-       struct inode *ino = d_inode(fhp->fh_dentry);
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct nfs4_stid *s = NULL;
@@ -6035,9 +6024,6 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
        if (nfp)
                *nfp = NULL;
 
-       if (grace_disallows_io(net, ino))
-               return nfserr_grace;
-
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
                status = check_special_stateids(net, fhp, stateid, flags);
                goto done;
@@ -6835,6 +6821,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct nfsd4_blocked_lock *nbl = NULL;
        struct file_lock *file_lock = NULL;
        struct file_lock *conflock = NULL;
+       struct super_block *sb;
        __be32 status = 0;
        int lkflg;
        int err;
@@ -6856,6 +6843,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                dprintk("NFSD: nfsd4_lock: permission denied!\n");
                return status;
        }
+       sb = cstate->current_fh.fh_dentry->d_sb;
 
        if (lock->lk_is_new) {
                if (nfsd4_has_session(cstate))
@@ -6901,10 +6889,14 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (!locks_in_grace(net) && lock->lk_reclaim)
                goto out;
 
+       if (lock->lk_reclaim)
+               fl_flags |= FL_RECLAIM;
+
        fp = lock_stp->st_stid.sc_file;
        switch (lock->lk_type) {
                case NFS4_READW_LT:
-                       if (nfsd4_has_session(cstate))
+                       if (nfsd4_has_session(cstate) &&
+                           !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
                                fl_flags |= FL_SLEEP;
                        fallthrough;
                case NFS4_READ_LT:
@@ -6916,7 +6908,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        fl_type = F_RDLCK;
                        break;
                case NFS4_WRITEW_LT:
-                       if (nfsd4_has_session(cstate))
+                       if (nfsd4_has_session(cstate) &&
+                           !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
                                fl_flags |= FL_SLEEP;
                        fallthrough;
                case NFS4_WRITE_LT:
@@ -7036,8 +7029,7 @@ out:
 /*
  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
  * so we do a temporary open here just to get an open file to pass to
- * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
- * inode operation.)
+ * vfs_test_lock.
  */
 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
 {
@@ -7052,7 +7044,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
                                                        NFSD_MAY_READ));
        if (err)
                goto out;
+       lock->fl_file = nf->nf_file;
        err = nfserrno(vfs_test_lock(nf->nf_file, lock));
+       lock->fl_file = NULL;
 out:
        fh_unlock(fhp);
        nfsd_file_put(nf);
index 60d7c59..90fcd61 100644 (file)
@@ -881,6 +881,7 @@ nfserrno (int errno)
                { nfserr_serverfault, -ENFILE },
                { nfserr_io, -EUCLEAN },
                { nfserr_perm, -ENOKEY },
+               { nfserr_no_grace, -ENOGRACE},
        };
        int     i;
 
index adaec43..5385209 100644 (file)
@@ -400,18 +400,16 @@ TRACE_EVENT(nfsd_dirent,
        TP_STRUCT__entry(
                __field(u32, fh_hash)
                __field(u64, ino)
-               __field(int, len)
-               __dynamic_array(unsigned char, name, namlen)
+               __string_len(name, name, namlen)
        ),
        TP_fast_assign(
                __entry->fh_hash = fhp ? knfsd_fh_hash(&fhp->fh_handle) : 0;
                __entry->ino = ino;
-               __entry->len = namlen;
-               memcpy(__get_str(name), name, namlen);
+               __assign_str_len(name, name, namlen)
        ),
-       TP_printk("fh_hash=0x%08x ino=%llu name=%.*s",
-               __entry->fh_hash, __entry->ino,
-               __entry->len, __get_str(name))
+       TP_printk("fh_hash=0x%08x ino=%llu name=%s",
+               __entry->fh_hash, __entry->ino, __get_str(name)
+       )
 )
 
 #include "state.h"
@@ -608,7 +606,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
                __array(unsigned char, addr, sizeof(struct sockaddr_in6))
                __field(unsigned long, flavor)
                __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
-               __dynamic_array(char, name, clp->cl_name.len + 1)
+               __string_len(name, name, clp->cl_name.len)
        ),
        TP_fast_assign(
                __entry->cl_boot = clp->cl_clientid.cl_boot;
@@ -618,8 +616,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
                __entry->flavor = clp->cl_cred.cr_flavor;
                memcpy(__entry->verifier, (void *)&clp->cl_verifier,
                       NFS4_VERIFIER_SIZE);
-               memcpy(__get_str(name), clp->cl_name.data, clp->cl_name.len);
-               __get_str(name)[clp->cl_name.len] = '\0';
+               __assign_str_len(name, clp->cl_name.data, clp->cl_name.len);
        ),
        TP_printk("addr=%pISpc name='%s' verifier=0x%s flavor=%s client=%08x:%08x",
                __entry->addr, __get_str(name),
index a224a5e..738d564 100644 (file)
@@ -244,7 +244,6 @@ out_nfserr:
  * returned. Otherwise the covered directory is returned.
  * NOTE: this mountpoint crossing is not supported properly by all
  *   clients and is explicitly disallowed for NFSv3
- *      NeilBrown <neilb@cse.unsw.edu.au>
  */
 __be32
 nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
@@ -333,7 +332,6 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
                struct iattr *iap)
 {
        struct inode *inode = d_inode(fhp->fh_dentry);
-       int host_err;
 
        if (iap->ia_size < inode->i_size) {
                __be32 err;
@@ -343,20 +341,7 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
                if (err)
                        return err;
        }
-
-       host_err = get_write_access(inode);
-       if (host_err)
-               goto out_nfserrno;
-
-       host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
-       if (host_err)
-               goto out_put_write_access;
-       return 0;
-
-out_put_write_access:
-       put_write_access(inode);
-out_nfserrno:
-       return nfserrno(host_err);
+       return nfserrno(get_write_access(inode));
 }
 
 /*
@@ -750,13 +735,6 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
        err = nfserr_perm;
        if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
                goto out;
-       /*
-        * We must ignore files (but only files) which might have mandatory
-        * locks on them because there is no way to know if the accesser has
-        * the lock.
-        */
-       if (S_ISREG((inode)->i_mode) && mandatory_lock(inode))
-               goto out;
 
        if (!inode->i_fop)
                goto out;
@@ -847,26 +825,16 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
        struct svc_rqst *rqstp = sd->u.data;
        struct page **pp = rqstp->rq_next_page;
        struct page *page = buf->page;
-       size_t size;
-
-       size = sd->len;
 
        if (rqstp->rq_res.page_len == 0) {
-               get_page(page);
-               put_page(*rqstp->rq_next_page);
-               *(rqstp->rq_next_page++) = page;
+               svc_rqst_replace_page(rqstp, page);
                rqstp->rq_res.page_base = buf->offset;
-               rqstp->rq_res.page_len = size;
        } else if (page != pp[-1]) {
-               get_page(page);
-               if (*rqstp->rq_next_page)
-                       put_page(*rqstp->rq_next_page);
-               *(rqstp->rq_next_page++) = page;
-               rqstp->rq_res.page_len += size;
-       } else
-               rqstp->rq_res.page_len += size;
+               svc_rqst_replace_page(rqstp, page);
+       }
+       rqstp->rq_res.page_len += sd->len;
 
-       return size;
+       return sd->len;
 }
 
 static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
index 4abd928..f6b2d28 100644 (file)
@@ -1053,7 +1053,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_time_gran = 1;
        sb->s_max_links = NILFS_LINK_MAX;
 
-       sb->s_bdi = bdi_get(sb->s_bdev->bd_bdi);
+       sb->s_bdi = bdi_get(sb->s_bdev->bd_disk->bdi);
 
        err = load_nilfs(nilfs, sb);
        if (err)
index 64864fb..6facdf4 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/fanotify.h>
 #include <linux/fcntl.h>
+#include <linux/fdtable.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/anon_inodes.h>
@@ -54,22 +55,27 @@ static int fanotify_max_queued_events __read_mostly;
 
 #include <linux/sysctl.h>
 
+static long ft_zero = 0;
+static long ft_int_max = INT_MAX;
+
 struct ctl_table fanotify_table[] = {
        {
                .procname       = "max_user_groups",
                .data   = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &ft_zero,
+               .extra2         = &ft_int_max,
        },
        {
                .procname       = "max_user_marks",
                .data   = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &ft_zero,
+               .extra2         = &ft_int_max,
        },
        {
                .procname       = "max_queued_events",
@@ -104,8 +110,10 @@ struct kmem_cache *fanotify_path_event_cachep __read_mostly;
 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
 
 #define FANOTIFY_EVENT_ALIGN 4
-#define FANOTIFY_INFO_HDR_LEN \
+#define FANOTIFY_FID_INFO_HDR_LEN \
        (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle))
+#define FANOTIFY_PIDFD_INFO_HDR_LEN \
+       sizeof(struct fanotify_event_info_pidfd)
 
 static int fanotify_fid_info_len(int fh_len, int name_len)
 {
@@ -114,10 +122,11 @@ static int fanotify_fid_info_len(int fh_len, int name_len)
        if (name_len)
                info_len += name_len + 1;
 
-       return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN);
+       return roundup(FANOTIFY_FID_INFO_HDR_LEN + info_len,
+                      FANOTIFY_EVENT_ALIGN);
 }
 
-static int fanotify_event_info_len(unsigned int fid_mode,
+static int fanotify_event_info_len(unsigned int info_mode,
                                   struct fanotify_event *event)
 {
        struct fanotify_info *info = fanotify_event_info(event);
@@ -128,7 +137,8 @@ static int fanotify_event_info_len(unsigned int fid_mode,
 
        if (dir_fh_len) {
                info_len += fanotify_fid_info_len(dir_fh_len, info->name_len);
-       } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) {
+       } else if ((info_mode & FAN_REPORT_NAME) &&
+                  (event->mask & FAN_ONDIR)) {
                /*
                 * With group flag FAN_REPORT_NAME, if name was not recorded in
                 * event on a directory, we will report the name ".".
@@ -136,6 +146,9 @@ static int fanotify_event_info_len(unsigned int fid_mode,
                dot_len = 1;
        }
 
+       if (info_mode & FAN_REPORT_PIDFD)
+               info_len += FANOTIFY_PIDFD_INFO_HDR_LEN;
+
        if (fh_len)
                info_len += fanotify_fid_info_len(fh_len, dot_len);
 
@@ -171,7 +184,7 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group,
        size_t event_size = FAN_EVENT_METADATA_LEN;
        struct fanotify_event *event = NULL;
        struct fsnotify_event *fsn_event;
-       unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
+       unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES);
 
        pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
 
@@ -181,8 +194,8 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group,
                goto out;
 
        event = FANOTIFY_E(fsn_event);
-       if (fid_mode)
-               event_size += fanotify_event_info_len(fid_mode, event);
+       if (info_mode)
+               event_size += fanotify_event_info_len(info_mode, event);
 
        if (event_size > count) {
                event = ERR_PTR(-EINVAL);
@@ -303,9 +316,10 @@ static int process_access_response(struct fsnotify_group *group,
        return -ENOENT;
 }
 
-static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
-                            int info_type, const char *name, size_t name_len,
-                            char __user *buf, size_t count)
+static int copy_fid_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
+                                int info_type, const char *name,
+                                size_t name_len,
+                                char __user *buf, size_t count)
 {
        struct fanotify_event_info_fid info = { };
        struct file_handle handle = { };
@@ -398,6 +412,117 @@ static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
        return info_len;
 }
 
+static int copy_pidfd_info_to_user(int pidfd,
+                                  char __user *buf,
+                                  size_t count)
+{
+       struct fanotify_event_info_pidfd info = { };
+       size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN;
+
+       if (WARN_ON_ONCE(info_len > count))
+               return -EFAULT;
+
+       info.hdr.info_type = FAN_EVENT_INFO_TYPE_PIDFD;
+       info.hdr.len = info_len;
+       info.pidfd = pidfd;
+
+       if (copy_to_user(buf, &info, info_len))
+               return -EFAULT;
+
+       return info_len;
+}
+
+static int copy_info_records_to_user(struct fanotify_event *event,
+                                    struct fanotify_info *info,
+                                    unsigned int info_mode, int pidfd,
+                                    char __user *buf, size_t count)
+{
+       int ret, total_bytes = 0, info_type = 0;
+       unsigned int fid_mode = info_mode & FANOTIFY_FID_BITS;
+       unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
+
+       /*
+        * Event info records order is as follows: dir fid + name, child fid.
+        */
+       if (fanotify_event_dir_fh_len(event)) {
+               info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
+                                            FAN_EVENT_INFO_TYPE_DFID;
+               ret = copy_fid_info_to_user(fanotify_event_fsid(event),
+                                           fanotify_info_dir_fh(info),
+                                           info_type,
+                                           fanotify_info_name(info),
+                                           info->name_len, buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       if (fanotify_event_object_fh_len(event)) {
+               const char *dot = NULL;
+               int dot_len = 0;
+
+               if (fid_mode == FAN_REPORT_FID || info_type) {
+                       /*
+                        * With only group flag FAN_REPORT_FID only type FID is
+                        * reported. Second info record type is always FID.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_FID;
+               } else if ((fid_mode & FAN_REPORT_NAME) &&
+                          (event->mask & FAN_ONDIR)) {
+                       /*
+                        * With group flag FAN_REPORT_NAME, if name was not
+                        * recorded in an event on a directory, report the name
+                        * "." with info type DFID_NAME.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_DFID_NAME;
+                       dot = ".";
+                       dot_len = 1;
+               } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) ||
+                          (event->mask & FAN_ONDIR)) {
+                       /*
+                        * With group flag FAN_REPORT_DIR_FID, a single info
+                        * record has type DFID for directory entry modification
+                        * event and for event on a directory.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_DFID;
+               } else {
+                       /*
+                        * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID,
+                        * a single info record has type FID for event on a
+                        * non-directory, when there is no directory to report.
+                        * For example, on FAN_DELETE_SELF event.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_FID;
+               }
+
+               ret = copy_fid_info_to_user(fanotify_event_fsid(event),
+                                           fanotify_event_object_fh(event),
+                                           info_type, dot, dot_len,
+                                           buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       if (pidfd_mode) {
+               ret = copy_pidfd_info_to_user(pidfd, buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       return total_bytes;
+}
+
 static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                  struct fanotify_event *event,
                                  char __user *buf, size_t count)
@@ -405,15 +530,15 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        struct fanotify_event_metadata metadata;
        struct path *path = fanotify_event_path(event);
        struct fanotify_info *info = fanotify_event_info(event);
-       unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
+       unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES);
+       unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
        struct file *f = NULL;
-       int ret, fd = FAN_NOFD;
-       int info_type = 0;
+       int ret, pidfd = FAN_NOPIDFD, fd = FAN_NOFD;
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
        metadata.event_len = FAN_EVENT_METADATA_LEN +
-                               fanotify_event_info_len(fid_mode, event);
+                               fanotify_event_info_len(info_mode, event);
        metadata.metadata_len = FAN_EVENT_METADATA_LEN;
        metadata.vers = FANOTIFY_METADATA_VERSION;
        metadata.reserved = 0;
@@ -442,6 +567,33 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        }
        metadata.fd = fd;
 
+       if (pidfd_mode) {
+               /*
+                * Complain if the FAN_REPORT_PIDFD and FAN_REPORT_TID mutual
+                * exclusion is ever lifted. At the time of incoporating pidfd
+                * support within fanotify, the pidfd API only supported the
+                * creation of pidfds for thread-group leaders.
+                */
+               WARN_ON_ONCE(FAN_GROUP_FLAG(group, FAN_REPORT_TID));
+
+               /*
+                * The PIDTYPE_TGID check for an event->pid is performed
+                * preemptively in an attempt to catch out cases where the event
+                * listener reads events after the event generating process has
+                * already terminated. Report FAN_NOPIDFD to the event listener
+                * in those cases, with all other pidfd creation errors being
+                * reported as FAN_EPIDFD.
+                */
+               if (metadata.pid == 0 ||
+                   !pid_has_task(event->pid, PIDTYPE_TGID)) {
+                       pidfd = FAN_NOPIDFD;
+               } else {
+                       pidfd = pidfd_create(event->pid, 0);
+                       if (pidfd < 0)
+                               pidfd = FAN_EPIDFD;
+               }
+       }
+
        ret = -EFAULT;
        /*
         * Sanity check copy size in case get_one_event() and
@@ -462,67 +614,11 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        if (f)
                fd_install(fd, f);
 
-       /* Event info records order is: dir fid + name, child fid */
-       if (fanotify_event_dir_fh_len(event)) {
-               info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
-                                            FAN_EVENT_INFO_TYPE_DFID;
-               ret = copy_info_to_user(fanotify_event_fsid(event),
-                                       fanotify_info_dir_fh(info),
-                                       info_type, fanotify_info_name(info),
-                                       info->name_len, buf, count);
+       if (info_mode) {
+               ret = copy_info_records_to_user(event, info, info_mode, pidfd,
+                                               buf, count);
                if (ret < 0)
                        goto out_close_fd;
-
-               buf += ret;
-               count -= ret;
-       }
-
-       if (fanotify_event_object_fh_len(event)) {
-               const char *dot = NULL;
-               int dot_len = 0;
-
-               if (fid_mode == FAN_REPORT_FID || info_type) {
-                       /*
-                        * With only group flag FAN_REPORT_FID only type FID is
-                        * reported. Second info record type is always FID.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_FID;
-               } else if ((fid_mode & FAN_REPORT_NAME) &&
-                          (event->mask & FAN_ONDIR)) {
-                       /*
-                        * With group flag FAN_REPORT_NAME, if name was not
-                        * recorded in an event on a directory, report the
-                        * name "." with info type DFID_NAME.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_DFID_NAME;
-                       dot = ".";
-                       dot_len = 1;
-               } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) ||
-                          (event->mask & FAN_ONDIR)) {
-                       /*
-                        * With group flag FAN_REPORT_DIR_FID, a single info
-                        * record has type DFID for directory entry modification
-                        * event and for event on a directory.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_DFID;
-               } else {
-                       /*
-                        * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID,
-                        * a single info record has type FID for event on a
-                        * non-directory, when there is no directory to report.
-                        * For example, on FAN_DELETE_SELF event.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_FID;
-               }
-
-               ret = copy_info_to_user(fanotify_event_fsid(event),
-                                       fanotify_event_object_fh(event),
-                                       info_type, dot, dot_len, buf, count);
-               if (ret < 0)
-                       goto out_close_fd;
-
-               buf += ret;
-               count -= ret;
        }
 
        return metadata.event_len;
@@ -532,6 +628,10 @@ out_close_fd:
                put_unused_fd(fd);
                fput(f);
        }
+
+       if (pidfd >= 0)
+               close_fd(pidfd);
+
        return ret;
 }
 
@@ -1077,6 +1177,14 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
 #endif
                return -EINVAL;
 
+       /*
+        * A pidfd can only be returned for a thread-group leader; thus
+        * FAN_REPORT_PIDFD and FAN_REPORT_TID need to remain mutually
+        * exclusive.
+        */
+       if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID))
+               return -EINVAL;
+
        if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
                return -EINVAL;
 
@@ -1478,7 +1586,7 @@ static int __init fanotify_user_setup(void)
                                     FANOTIFY_DEFAULT_MAX_USER_MARKS);
 
        BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
-       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
+       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 11);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
 
        fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
index 30d422b..963e6ce 100644 (file)
@@ -87,15 +87,15 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
 
        if (iput_inode)
                iput(iput_inode);
-       /* Wait for outstanding inode references from connectors */
-       wait_var_event(&sb->s_fsnotify_inode_refs,
-                      !atomic_long_read(&sb->s_fsnotify_inode_refs));
 }
 
 void fsnotify_sb_delete(struct super_block *sb)
 {
        fsnotify_unmount_inodes(sb);
        fsnotify_clear_marks_by_sb(sb);
+       /* Wait for outstanding object references from connectors */
+       wait_var_event(&sb->s_fsnotify_connectors,
+                      !atomic_long_read(&sb->s_fsnotify_connectors));
 }
 
 /*
index ff2063e..87d8a50 100644 (file)
@@ -27,6 +27,21 @@ static inline struct super_block *fsnotify_conn_sb(
        return container_of(conn->obj, struct super_block, s_fsnotify_marks);
 }
 
+static inline struct super_block *fsnotify_connector_sb(
+                               struct fsnotify_mark_connector *conn)
+{
+       switch (conn->type) {
+       case FSNOTIFY_OBJ_TYPE_INODE:
+               return fsnotify_conn_inode(conn)->i_sb;
+       case FSNOTIFY_OBJ_TYPE_VFSMOUNT:
+               return fsnotify_conn_mount(conn)->mnt.mnt_sb;
+       case FSNOTIFY_OBJ_TYPE_SB:
+               return fsnotify_conn_sb(conn);
+       default:
+               return NULL;
+       }
+}
+
 /* destroy all events sitting in this groups notification queue */
 extern void fsnotify_flush_notify(struct fsnotify_group *group);
 
index 98f61b3..6205124 100644 (file)
@@ -55,22 +55,27 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
 
 #include <linux/sysctl.h>
 
+static long it_zero = 0;
+static long it_int_max = INT_MAX;
+
 struct ctl_table inotify_table[] = {
        {
                .procname       = "max_user_instances",
                .data           = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &it_zero,
+               .extra2         = &it_int_max,
        },
        {
                .procname       = "max_user_watches",
                .data           = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &it_zero,
+               .extra2         = &it_int_max,
        },
        {
                .procname       = "max_queued_events",
index d32ab34..95006d1 100644 (file)
@@ -169,6 +169,37 @@ static void fsnotify_connector_destroy_workfn(struct work_struct *work)
        }
 }
 
+static void fsnotify_get_inode_ref(struct inode *inode)
+{
+       ihold(inode);
+       atomic_long_inc(&inode->i_sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_put_inode_ref(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       iput(inode);
+       if (atomic_long_dec_and_test(&sb->s_fsnotify_connectors))
+               wake_up_var(&sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_get_sb_connectors(struct fsnotify_mark_connector *conn)
+{
+       struct super_block *sb = fsnotify_connector_sb(conn);
+
+       if (sb)
+               atomic_long_inc(&sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_put_sb_connectors(struct fsnotify_mark_connector *conn)
+{
+       struct super_block *sb = fsnotify_connector_sb(conn);
+
+       if (sb && atomic_long_dec_and_test(&sb->s_fsnotify_connectors))
+               wake_up_var(&sb->s_fsnotify_connectors);
+}
+
 static void *fsnotify_detach_connector_from_object(
                                        struct fsnotify_mark_connector *conn,
                                        unsigned int *type)
@@ -182,13 +213,13 @@ static void *fsnotify_detach_connector_from_object(
        if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
                inode = fsnotify_conn_inode(conn);
                inode->i_fsnotify_mask = 0;
-               atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
        } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
                fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
        } else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
                fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
        }
 
+       fsnotify_put_sb_connectors(conn);
        rcu_assign_pointer(*(conn->obj), NULL);
        conn->obj = NULL;
        conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
@@ -209,19 +240,12 @@ static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
 /* Drop object reference originally held by a connector */
 static void fsnotify_drop_object(unsigned int type, void *objp)
 {
-       struct inode *inode;
-       struct super_block *sb;
-
        if (!objp)
                return;
        /* Currently only inode references are passed to be dropped */
        if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
                return;
-       inode = objp;
-       sb = inode->i_sb;
-       iput(inode);
-       if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
-               wake_up_var(&sb->s_fsnotify_inode_refs);
+       fsnotify_put_inode_ref(objp);
 }
 
 void fsnotify_put_mark(struct fsnotify_mark *mark)
@@ -493,8 +517,12 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
                conn->fsid.val[0] = conn->fsid.val[1] = 0;
                conn->flags = 0;
        }
-       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
-               inode = igrab(fsnotify_conn_inode(conn));
+       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
+               inode = fsnotify_conn_inode(conn);
+               fsnotify_get_inode_ref(inode);
+       }
+       fsnotify_get_sb_connectors(conn);
+
        /*
         * cmpxchg() provides the barrier so that readers of *connp can see
         * only initialized structure
@@ -502,7 +530,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
        if (cmpxchg(connp, NULL, conn)) {
                /* Someone else created list structure for us */
                if (inode)
-                       iput(inode);
+                       fsnotify_put_inode_ref(inode);
                kmem_cache_free(fsnotify_mark_connector_cachep, conn);
        }
 
index fab7c6a..73a3854 100644 (file)
@@ -101,8 +101,6 @@ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_FLOCK))
                return -ENOLCK;
-       if (__mandatory_lock(inode))
-               return -ENOLCK;
 
        if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
            ocfs2_mount_local(osb))
@@ -121,8 +119,6 @@ int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
 
        return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
 }
index 94bef26..daa3246 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -105,9 +105,7 @@ long vfs_truncate(const struct path *path, loff_t length)
        if (error)
                goto put_write_and_out;
 
-       error = locks_verify_truncate(inode, NULL, length);
-       if (!error)
-               error = security_path_truncate(path);
+       error = security_path_truncate(path);
        if (!error)
                error = do_truncate(mnt_userns, path->dentry, length, 0, NULL);
 
@@ -189,9 +187,7 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
        if (IS_APPEND(file_inode(f.file)))
                goto out_putf;
        sb_start_write(inode->i_sb);
-       error = locks_verify_truncate(inode, f.file, length);
-       if (!error)
-               error = security_path_truncate(&f.file->f_path);
+       error = security_path_truncate(&f.file->f_path);
        if (!error)
                error = do_truncate(file_mnt_user_ns(f.file), dentry, length,
                                    ATTR_MTIME | ATTR_CTIME, f.file);
index 41ebf52..ebde05c 100644 (file)
@@ -392,6 +392,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
         */
        take_dentry_name_snapshot(&name, real);
        this = lookup_one_len(name.name.name, connected, name.name.len);
+       release_dentry_name_snapshot(&name);
        err = PTR_ERR(this);
        if (IS_ERR(this)) {
                goto fail;
@@ -406,7 +407,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
        }
 
 out:
-       release_dentry_name_snapshot(&name);
        dput(parent);
        inode_unlock(dir);
        return this;
index 4d53d3b..d081faa 100644 (file)
@@ -392,6 +392,51 @@ out_unlock:
        return ret;
 }
 
+/*
+ * Calling iter_file_splice_write() directly from overlay's f_op may deadlock
+ * due to lock order inversion between pipe->mutex in iter_file_splice_write()
+ * and file_start_write(real.file) in ovl_write_iter().
+ *
+ * So do everything ovl_write_iter() does and call iter_file_splice_write() on
+ * the real file.
+ */
+static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+                               loff_t *ppos, size_t len, unsigned int flags)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       struct inode *inode = file_inode(out);
+       struct inode *realinode = ovl_inode_real(inode);
+       ssize_t ret;
+
+       inode_lock(inode);
+       /* Update mode */
+       ovl_copyattr(realinode, inode);
+       ret = file_remove_privs(out);
+       if (ret)
+               goto out_unlock;
+
+       ret = ovl_real_fdget(out, &real);
+       if (ret)
+               goto out_unlock;
+
+       old_cred = ovl_override_creds(inode->i_sb);
+       file_start_write(real.file);
+
+       ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+
+       file_end_write(real.file);
+       /* Update size */
+       ovl_copyattr(realinode, inode);
+       revert_creds(old_cred);
+       fdput(real);
+
+out_unlock:
+       inode_unlock(inode);
+
+       return ret;
+}
+
 static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct fd real;
@@ -603,7 +648,7 @@ const struct file_operations ovl_file_operations = {
        .fadvise        = ovl_fadvise,
        .flush          = ovl_flush,
        .splice_read    = generic_file_splice_read,
-       .splice_write   = iter_file_splice_write,
+       .splice_write   = ovl_splice_write,
 
        .copy_file_range        = ovl_copy_file_range,
        .remap_file_range       = ovl_remap_file_range,
index e8ad2c2..150fdf3 100644 (file)
@@ -481,6 +481,8 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
        }
        this = lookup_one_len(p->name, dir, p->len);
        if (IS_ERR_OR_NULL(this) || !this->d_inode) {
+               /* Mark a stale entry */
+               p->is_whiteout = true;
                if (IS_ERR(this)) {
                        err = PTR_ERR(this);
                        this = NULL;
@@ -776,6 +778,9 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                                if (err)
                                        goto out;
                        }
+               }
+               /* ovl_cache_update_ino() sets is_whiteout on stale entry */
+               if (!p->is_whiteout) {
                        if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
                                break;
                }
index 9ef4231..6d4342b 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
 
 #include "internal.h"
 
+/*
+ * New pipe buffers will be restricted to this size while the user is exceeding
+ * their pipe buffer quota. The general pipe use case needs at least two
+ * buffers: one for data yet to be read, and one for new data. If this is less
+ * than two, then a write to a non-empty pipe may block even if the pipe is not
+ * full. This can occur with GNU make jobserver or similar uses of pipes as
+ * semaphores: multiple processes may be waiting to write tokens back to the
+ * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
+ *
+ * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
+ * own risk, namely: pipe writes to non-full pipes may block until the pipe is
+ * emptied.
+ */
+#define PIPE_MIN_DEF_BUFFERS 2
+
 /*
  * The max size that a non-root user is allowed to grow the pipe. Can
  * be set by root in /proc/sys/fs/pipe-max-size
@@ -348,10 +363,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                 * _very_ unlikely case that the pipe was full, but we got
                 * no data.
                 */
-               if (unlikely(was_full)) {
+               if (unlikely(was_full))
                        wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
-                       kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
-               }
+               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 
                /*
                 * But because we didn't read anything, at this point we can
@@ -370,12 +384,11 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                wake_next_reader = false;
        __pipe_unlock(pipe);
 
-       if (was_full) {
+       if (was_full)
                wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
-               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
-       }
        if (wake_next_reader)
                wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
+       kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        if (ret > 0)
                file_accessed(filp);
        return ret;
@@ -429,9 +442,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 #endif
 
        /*
-        * Epoll nonsensically wants a wakeup whether the pipe
-        * was already empty or not.
-        *
         * If it wasn't empty we try to merge new data into
         * the last buffer.
         *
@@ -440,9 +450,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
         * spanning multiple pages.
         */
        head = pipe->head;
-       was_empty = true;
+       was_empty = pipe_empty(head, pipe->tail);
        chars = total_len & (PAGE_SIZE-1);
-       if (chars && !pipe_empty(head, pipe->tail)) {
+       if (chars && !was_empty) {
                unsigned int mask = pipe->ring_size - 1;
                struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
                int offset = buf->offset + buf->len;
@@ -553,10 +563,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
                 * become empty while we dropped the lock.
                 */
                __pipe_unlock(pipe);
-               if (was_empty) {
+               if (was_empty)
                        wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
-                       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
-               }
+               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
                __pipe_lock(pipe);
                was_empty = pipe_empty(pipe->head, pipe->tail);
@@ -575,11 +584,13 @@ out:
         * This is particularly important for small writes, because of
         * how (for example) the GNU make jobserver uses small writes to
         * wake up pending jobs
+        *
+        * Epoll nonsensically wants a wakeup whether the pipe
+        * was already empty or not.
         */
-       if (was_empty) {
+       if (was_empty || pipe->poll_usage)
                wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
-               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
-       }
+       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        if (wake_next_writer)
                wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
        if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
@@ -639,6 +650,9 @@ pipe_poll(struct file *filp, poll_table *wait)
        struct pipe_inode_info *pipe = filp->private_data;
        unsigned int head, tail;
 
+       /* Epoll has some historical nasty semantics, this enables them */
+       pipe->poll_usage = 1;
+
        /*
         * Reading pipe state only -- no need for acquiring the semaphore.
         *
@@ -781,8 +795,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
        user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
 
        if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
-               user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
-               pipe_bufs = 1;
+               user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
+               pipe_bufs = PIPE_MIN_DEF_BUFFERS;
        }
 
        if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
index 9db7adf..af057c5 100644 (file)
@@ -365,12 +365,8 @@ out_putf:
 
 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
 {
-       struct inode *inode;
-       int retval = -EINVAL;
-
-       inode = file_inode(file);
        if (unlikely((ssize_t) count < 0))
-               return retval;
+               return -EINVAL;
 
        /*
         * ranged mandatory locking does not apply to streams - it makes sense
@@ -381,19 +377,12 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
 
                if (unlikely(pos < 0)) {
                        if (!unsigned_offsets(file))
-                               return retval;
+                               return -EINVAL;
                        if (count >= -pos) /* both values are in 0..LLONG_MAX */
                                return -EOVERFLOW;
                } else if (unlikely((loff_t) (pos + count) < 0)) {
                        if (!unsigned_offsets(file))
-                               return retval;
-               }
-
-               if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
-                       retval = locks_mandatory_area(inode, file, pos, pos + count - 1,
-                                       read_write == READ ? F_RDLCK : F_WRLCK);
-                       if (retval < 0)
-                               return retval;
+                               return -EINVAL;
                }
        }
 
index e4a5fdd..6d4a9be 100644 (file)
@@ -99,24 +99,12 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in,
 static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
                             bool write)
 {
-       struct inode *inode = file_inode(file);
-
        if (unlikely(pos < 0 || len < 0))
                return -EINVAL;
 
        if (unlikely((loff_t) (pos + len) < 0))
                return -EINVAL;
 
-       if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
-               loff_t end = len ? pos + len - 1 : OFFSET_MAX;
-               int retval;
-
-               retval = locks_mandatory_area(inode, file, pos, end,
-                               write ? F_WRLCK : F_RDLCK);
-               if (retval < 0)
-                       return retval;
-       }
-
        return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
 }
 
index 855f0e8..2db8bcf 100644 (file)
@@ -49,8 +49,7 @@ static int copy_bio_to_actor(struct bio *bio,
 
                bytes_to_copy = min_t(int, bytes_to_copy,
                                      req_length - copied_bytes);
-               memcpy(actor_addr + actor_offset,
-                      page_address(bvec->bv_page) + bvec->bv_offset + offset,
+               memcpy(actor_addr + actor_offset, bvec_virt(bvec) + offset,
                       bytes_to_copy);
 
                actor_offset += bytes_to_copy;
@@ -177,7 +176,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                        goto out_free_bio;
                }
                /* Extract the length of the metadata block */
-               data = page_address(bvec->bv_page) + bvec->bv_offset;
+               data = bvec_virt(bvec);
                length = data[offset];
                if (offset < bvec->bv_len - 1) {
                        length |= data[offset + 1] << 8;
@@ -186,7 +185,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                                res = -EIO;
                                goto out_free_bio;
                        }
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length |= data[0] << 8;
                }
                bio_free_pages(bio);
index 233d558..b685b62 100644 (file)
@@ -101,7 +101,7 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
        while (bio_next_segment(bio, &iter_all)) {
                int avail = min(bytes, ((int)bvec->bv_len) - offset);
 
-               data = page_address(bvec->bv_page) + bvec->bv_offset;
+               data = bvec_virt(bvec);
                memcpy(buff, data + offset, avail);
                buff += avail;
                bytes -= avail;
index 97bb7d9..cb510a6 100644 (file)
@@ -76,7 +76,7 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
        while (bio_next_segment(bio, &iter_all)) {
                int avail = min(bytes, ((int)bvec->bv_len) - offset);
 
-               data = page_address(bvec->bv_page) + bvec->bv_offset;
+               data = bvec_virt(bvec);
                memcpy(buff, data + offset, avail);
                buff += avail;
                bytes -= avail;
index e80419a..68f6d09 100644 (file)
@@ -146,7 +146,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
                        }
 
                        avail = min(length, ((int)bvec->bv_len) - offset);
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length -= avail;
                        stream->buf.in = data + offset;
                        stream->buf.in_size = avail;
index bcb881e..a20e904 100644 (file)
@@ -76,7 +76,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
                        }
 
                        avail = min(length, ((int)bvec->bv_len) - offset);
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length -= avail;
                        stream->next_in = data + offset;
                        stream->avail_in = avail;
index b7cb1fa..0015cf8 100644 (file)
@@ -94,7 +94,7 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
                        }
 
                        avail = min(length, ((int)bvec->bv_len) - offset);
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length -= avail;
                        in_buf.src = data + offset;
                        in_buf.size = avail;
index 91b7f15..bcef3a6 100644 (file)
@@ -1203,7 +1203,7 @@ static int set_bdev_super(struct super_block *s, void *data)
 {
        s->s_bdev = data;
        s->s_dev = s->s_bdev->bd_dev;
-       s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
+       s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi);
 
        if (blk_queue_stable_writes(s->s_bdev->bd_disk->queue))
                s->s_iflags |= SB_I_STABLE_WRITES;
index c5509d2..e9c96a0 100644 (file)
@@ -115,6 +115,22 @@ void timerfd_clock_was_set(void)
        rcu_read_unlock();
 }
 
+static void timerfd_resume_work(struct work_struct *work)
+{
+       timerfd_clock_was_set();
+}
+
+static DECLARE_WORK(timerfd_work, timerfd_resume_work);
+
+/*
+ * Invoked from timekeeping_resume(). Defer the actual update to work so
+ * timerfd_clock_was_set() runs in task context.
+ */
+void timerfd_resume(void)
+{
+       schedule_work(&timerfd_work);
+}
+
 static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
 {
        if (ctx->might_cancel) {
index 2e4e1d1..5cfa28c 100644 (file)
@@ -1630,6 +1630,17 @@ static const char *ubifs_get_link(struct dentry *dentry,
        return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
 }
 
+static int ubifs_symlink_getattr(struct user_namespace *mnt_userns,
+                                const struct path *path, struct kstat *stat,
+                                u32 request_mask, unsigned int query_flags)
+{
+       ubifs_getattr(mnt_userns, path, stat, request_mask, query_flags);
+
+       if (IS_ENCRYPTED(d_inode(path->dentry)))
+               return fscrypt_symlink_getattr(path, stat);
+       return 0;
+}
+
 const struct address_space_operations ubifs_file_address_operations = {
        .readpage       = ubifs_readpage,
        .writepage      = ubifs_writepage,
@@ -1655,7 +1666,7 @@ const struct inode_operations ubifs_file_inode_operations = {
 const struct inode_operations ubifs_symlink_inode_operations = {
        .get_link    = ubifs_get_link,
        .setattr     = ubifs_setattr,
-       .getattr     = ubifs_getattr,
+       .getattr     = ubifs_symlink_getattr,
        .listxattr   = ubifs_listxattr,
        .update_time = ubifs_update_time,
 };
index c19dba4..70abdfa 100644 (file)
@@ -35,7 +35,6 @@
 #include "udf_i.h"
 #include "udf_sb.h"
 
-
 static int udf_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *dir = file_inode(file);
@@ -135,7 +134,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                lfi = cfi.lengthFileIdent;
 
                if (fibh.sbh == fibh.ebh) {
-                       nameptr = fi->fileIdent + liu;
+                       nameptr = udf_get_fi_ident(fi);
                } else {
                        int poffset;    /* Unpaded ending offset */
 
@@ -153,7 +152,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                                        }
                                }
                                nameptr = copy_name;
-                               memcpy(nameptr, fi->fileIdent + liu,
+                               memcpy(nameptr, udf_get_fi_ident(fi),
                                       lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
                                       fibh.ebh->b_data, poffset);
index 185c3e2..de17a97 100644 (file)
@@ -307,14 +307,14 @@ struct logicalVolDesc {
        struct regid            impIdent;
        uint8_t                 impUse[128];
        struct extent_ad        integritySeqExt;
-       uint8_t                 partitionMaps[0];
+       uint8_t                 partitionMaps[];
 } __packed;
 
 /* Generic Partition Map (ECMA 167r3 3/10.7.1) */
 struct genericPartitionMap {
        uint8_t         partitionMapType;
        uint8_t         partitionMapLength;
-       uint8_t         partitionMapping[0];
+       uint8_t         partitionMapping[];
 } __packed;
 
 /* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
@@ -342,7 +342,7 @@ struct unallocSpaceDesc {
        struct tag              descTag;
        __le32                  volDescSeqNum;
        __le32                  numAllocDescs;
-       struct extent_ad        allocDescs[0];
+       struct extent_ad        allocDescs[];
 } __packed;
 
 /* Terminating Descriptor (ECMA 167r3 3/10.9) */
@@ -360,9 +360,9 @@ struct logicalVolIntegrityDesc {
        uint8_t                 logicalVolContentsUse[32];
        __le32                  numOfPartitions;
        __le32                  lengthOfImpUse;
-       __le32                  freeSpaceTable[0];
-       __le32                  sizeTable[0];
-       uint8_t                 impUse[0];
+       __le32                  freeSpaceTable[];
+       /* __le32               sizeTable[]; */
+       /* uint8_t              impUse[]; */
 } __packed;
 
 /* Integrity Type (ECMA 167r3 3/10.10.3) */
@@ -471,9 +471,9 @@ struct fileIdentDesc {
        uint8_t         lengthFileIdent;
        struct long_ad  icb;
        __le16          lengthOfImpUse;
-       uint8_t         impUse[0];
-       uint8_t         fileIdent[0];
-       uint8_t         padding[0];
+       uint8_t         impUse[];
+       /* uint8_t      fileIdent[]; */
+       /* uint8_t      padding[]; */
 } __packed;
 
 /* File Characteristics (ECMA 167r3 4/14.4.3) */
@@ -578,8 +578,8 @@ struct fileEntry {
        __le64                  uniqueID;
        __le32                  lengthExtendedAttr;
        __le32                  lengthAllocDescs;
-       uint8_t                 extendedAttr[0];
-       uint8_t                 allocDescs[0];
+       uint8_t                 extendedAttr[];
+       /* uint8_t              allocDescs[]; */
 } __packed;
 
 /* Permissions (ECMA 167r3 4/14.9.5) */
@@ -632,7 +632,7 @@ struct genericFormat {
        uint8_t         attrSubtype;
        uint8_t         reserved[3];
        __le32          attrLength;
-       uint8_t         attrData[0];
+       uint8_t         attrData[];
 } __packed;
 
 /* Character Set Information (ECMA 167r3 4/14.10.3) */
@@ -643,7 +643,7 @@ struct charSetInfo {
        __le32          attrLength;
        __le32          escapeSeqLength;
        uint8_t         charSetType;
-       uint8_t         escapeSeq[0];
+       uint8_t         escapeSeq[];
 } __packed;
 
 /* Alternate Permissions (ECMA 167r3 4/14.10.4) */
@@ -682,7 +682,7 @@ struct infoTimesExtAttr {
        __le32          attrLength;
        __le32          dataLength;
        __le32          infoTimeExistence;
-       uint8_t         infoTimes[0];
+       uint8_t         infoTimes[];
 } __packed;
 
 /* Device Specification (ECMA 167r3 4/14.10.7) */
@@ -694,7 +694,7 @@ struct deviceSpec {
        __le32          impUseLength;
        __le32          majorDeviceIdent;
        __le32          minorDeviceIdent;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */
@@ -705,7 +705,7 @@ struct impUseExtAttr {
        __le32          attrLength;
        __le32          impUseLength;
        struct regid    impIdent;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */
@@ -716,7 +716,7 @@ struct appUseExtAttr {
        __le32          attrLength;
        __le32          appUseLength;
        struct regid    appIdent;
-       uint8_t         appUse[0];
+       uint8_t         appUse[];
 } __packed;
 
 #define EXTATTR_CHAR_SET               1
@@ -733,7 +733,7 @@ struct unallocSpaceEntry {
        struct tag      descTag;
        struct icbtag   icbTag;
        __le32          lengthAllocDescs;
-       uint8_t         allocDescs[0];
+       uint8_t         allocDescs[];
 } __packed;
 
 /* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
@@ -741,7 +741,7 @@ struct spaceBitmapDesc {
        struct tag      descTag;
        __le32          numOfBits;
        __le32          numOfBytes;
-       uint8_t         bitmap[0];
+       uint8_t         bitmap[];
 } __packed;
 
 /* Partition Integrity Entry (ECMA 167r3 4/14.13) */
@@ -780,7 +780,7 @@ struct pathComponent {
        uint8_t         componentType;
        uint8_t         lengthComponentIdent;
        __le16          componentFileVersionNum;
-       dchars          componentIdent[0];
+       dchars          componentIdent[];
 } __packed;
 
 /* File Entry (ECMA 167r3 4/14.17) */
@@ -809,8 +809,8 @@ struct extendedFileEntry {
        __le64                  uniqueID;
        __le32                  lengthExtendedAttr;
        __le32                  lengthAllocDescs;
-       uint8_t                 extendedAttr[0];
-       uint8_t                 allocDescs[0];
+       uint8_t                 extendedAttr[];
+       /* uint8_t              allocDescs[]; */
 } __packed;
 
 #endif /* _ECMA_167_H */
index 4917670..1d6b7a5 100644 (file)
@@ -390,8 +390,7 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode,
                dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
                dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
                if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
-                                sfi->fileIdent +
-                                       le16_to_cpu(sfi->lengthOfImpUse))) {
+                                udf_get_fi_ident(sfi))) {
                        iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
                        brelse(dbh);
                        return NULL;
index eab9452..1614d30 100644 (file)
@@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
                else
                        offset = le32_to_cpu(eahd->appAttrLocation);
 
-               while (offset < iinfo->i_lenEAttr) {
+               while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
+                       uint32_t attrLength;
+
                        gaf = (struct genericFormat *)&ea[offset];
+                       attrLength = le32_to_cpu(gaf->attrLength);
+
+                       /* Detect undersized elements and buffer overflows */
+                       if ((attrLength < sizeof(*gaf)) ||
+                           (attrLength > (iinfo->i_lenEAttr - offset)))
+                               break;
+
                        if (le32_to_cpu(gaf->attrType) == type &&
                                        gaf->attrSubtype == subtype)
                                return gaf;
                        else
-                               offset += le32_to_cpu(gaf->attrLength);
+                               offset += attrLength;
                }
        }
 
index 7c7c9bb..caeef08 100644 (file)
@@ -74,12 +74,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
 
        if (fileident) {
                if (adinicb || (offset + lfi < 0)) {
-                       memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
+                       memcpy(udf_get_fi_ident(sfi), fileident, lfi);
                } else if (offset >= 0) {
                        memcpy(fibh->ebh->b_data + offset, fileident, lfi);
                } else {
-                       memcpy((uint8_t *)sfi->fileIdent + liu, fileident,
-                               -offset);
+                       memcpy(udf_get_fi_ident(sfi), fileident, -offset);
                        memcpy(fibh->ebh->b_data, fileident - offset,
                                lfi + offset);
                }
@@ -88,11 +87,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
        offset += lfi;
 
        if (adinicb || (offset + padlen < 0)) {
-               memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
+               memset(udf_get_fi_ident(sfi) + lfi, 0x00, padlen);
        } else if (offset >= 0) {
                memset(fibh->ebh->b_data + offset, 0x00, padlen);
        } else {
-               memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
+               memset(udf_get_fi_ident(sfi) + lfi, 0x00, -offset);
                memset(fibh->ebh->b_data, 0x00, padlen + offset);
        }
 
@@ -226,7 +225,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                lfi = cfi->lengthFileIdent;
 
                if (fibh->sbh == fibh->ebh) {
-                       nameptr = fi->fileIdent + liu;
+                       nameptr = udf_get_fi_ident(fi);
                } else {
                        int poffset;    /* Unpaded ending offset */
 
@@ -246,7 +245,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                                        }
                                }
                                nameptr = copy_name;
-                               memcpy(nameptr, fi->fileIdent + liu,
+                               memcpy(nameptr, udf_get_fi_ident(fi),
                                        lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
                                        fibh->ebh->b_data, poffset);
index 22bc4fb..157de0e 100644 (file)
@@ -111,7 +111,7 @@ struct logicalVolIntegrityDescImpUse {
        __le16          minUDFReadRev;
        __le16          minUDFWriteRev;
        __le16          maxUDFWriteRev;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Implementation Use Volume Descriptor (UDF 2.60 2.2.7) */
@@ -178,15 +178,6 @@ struct metadataPartitionMap {
        uint8_t         reserved2[5];
 } __packed;
 
-/* Virtual Allocation Table (UDF 1.5 2.2.10) */
-struct virtualAllocationTable15 {
-       __le32          vatEntry[0];
-       struct regid    vatIdent;
-       __le32          previousVATICBLoc;
-} __packed;
-
-#define ICBTAG_FILE_TYPE_VAT15         0x00U
-
 /* Virtual Allocation Table (UDF 2.60 2.2.11) */
 struct virtualAllocationTable20 {
        __le16          lengthHeader;
@@ -199,8 +190,8 @@ struct virtualAllocationTable20 {
        __le16          minUDFWriteRev;
        __le16          maxUDFWriteRev;
        __le16          reserved;
-       uint8_t         impUse[0];
-       __le32          vatEntry[0];
+       uint8_t         impUse[];
+       /* __le32       vatEntry[]; */
 } __packed;
 
 #define ICBTAG_FILE_TYPE_VAT20         0xF8U
@@ -217,8 +208,7 @@ struct sparingTable {
        __le16          reallocationTableLen;
        __le16          reserved;
        __le32          sequenceNum;
-       struct sparingEntry
-                       mapEntry[0];
+       struct sparingEntry mapEntry[];
 } __packed;
 
 /* Metadata File (and Metadata Mirror File) (UDF 2.60 2.2.13.1) */
@@ -241,7 +231,7 @@ struct allocDescImpUse {
 /* FreeEASpace (UDF 2.60 3.3.4.5.1.1) */
 struct freeEaSpace {
        __le16          headerChecksum;
-       uint8_t         freeEASpace[0];
+       uint8_t         freeEASpace[];
 } __packed;
 
 /* DVD Copyright Management Information (UDF 2.60 3.3.4.5.1.2) */
@@ -265,7 +255,7 @@ struct LVExtensionEA {
 /* FreeAppEASpace (UDF 2.60 3.3.4.6.1) */
 struct freeAppEASpace {
        __le16          headerChecksum;
-       uint8_t         freeEASpace[0];
+       uint8_t         freeEASpace[];
 } __packed;
 
 /* UDF Defined System Stream (UDF 2.60 3.3.7) */
index 2f83c12..b2d7c57 100644 (file)
@@ -108,16 +108,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
                return NULL;
        lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
        partnum = le32_to_cpu(lvid->numOfPartitions);
-       if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
-            offsetof(struct logicalVolIntegrityDesc, impUse)) /
-            (2 * sizeof(uint32_t)) < partnum) {
-               udf_err(sb, "Logical volume integrity descriptor corrupted "
-                       "(numOfPartitions = %u)!\n", partnum);
-               return NULL;
-       }
        /* The offset is to skip freeSpaceTable and sizeTable arrays */
        offset = partnum * 2 * sizeof(uint32_t);
-       return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
+       return (struct logicalVolIntegrityDescImpUse *)
+                                       (((uint8_t *)(lvid + 1)) + offset);
 }
 
 /* UDF filesystem type */
@@ -349,10 +343,10 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
        if (sbi->s_anchor != 0)
                seq_printf(seq, ",anchor=%u", sbi->s_anchor);
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
-               seq_puts(seq, ",utf8");
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
+       if (sbi->s_nls_map)
                seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
+       else
+               seq_puts(seq, ",iocharset=utf8");
 
        return 0;
 }
@@ -558,19 +552,24 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
                        /* Ignored (never implemented properly) */
                        break;
                case Opt_utf8:
-                       uopt->flags |= (1 << UDF_FLAG_UTF8);
+                       if (!remount) {
+                               unload_nls(uopt->nls_map);
+                               uopt->nls_map = NULL;
+                       }
                        break;
                case Opt_iocharset:
                        if (!remount) {
-                               if (uopt->nls_map)
-                                       unload_nls(uopt->nls_map);
-                               /*
-                                * load_nls() failure is handled later in
-                                * udf_fill_super() after all options are
-                                * parsed.
-                                */
+                               unload_nls(uopt->nls_map);
+                               uopt->nls_map = NULL;
+                       }
+                       /* When nls_map is not loaded then UTF-8 is used */
+                       if (!remount && strcmp(args[0].from, "utf8") != 0) {
                                uopt->nls_map = load_nls(args[0].from);
-                               uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+                               if (!uopt->nls_map) {
+                                       pr_err("iocharset %s not found\n",
+                                               args[0].from);
+                                       return 0;
+                               }
                        }
                        break;
                case Opt_uforget:
@@ -1542,6 +1541,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct logicalVolIntegrityDesc *lvid;
        int indirections = 0;
+       u32 parts, impuselen;
 
        while (++indirections <= UDF_MAX_LVID_NESTING) {
                final_bh = NULL;
@@ -1568,15 +1568,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
 
                lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
                if (lvid->nextIntegrityExt.extLength == 0)
-                       return;
+                       goto check;
 
                loc = leea_to_cpu(lvid->nextIntegrityExt);
        }
 
        udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
                UDF_MAX_LVID_NESTING);
+out_err:
        brelse(sbi->s_lvid_bh);
        sbi->s_lvid_bh = NULL;
+       return;
+check:
+       parts = le32_to_cpu(lvid->numOfPartitions);
+       impuselen = le32_to_cpu(lvid->lengthOfImpUse);
+       if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
+           sizeof(struct logicalVolIntegrityDesc) + impuselen +
+           2 * parts * sizeof(u32) > sb->s_blocksize) {
+               udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
+                        "ignoring.\n", parts, impuselen);
+               goto out_err;
+       }
 }
 
 /*
@@ -2139,21 +2151,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        if (!udf_parse_options((char *)options, &uopt, false))
                goto parse_options_failure;
 
-       if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
-           uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
-               udf_err(sb, "utf8 cannot be combined with iocharset\n");
-               goto parse_options_failure;
-       }
-       if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
-               uopt.nls_map = load_nls_default();
-               if (!uopt.nls_map)
-                       uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
-               else
-                       udf_debug("Using default NLS map\n");
-       }
-       if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
-               uopt.flags |= (1 << UDF_FLAG_UTF8);
-
        fileset.logicalBlockNum = 0xFFFFFFFF;
        fileset.partitionReferenceNum = 0xFFFF;
 
@@ -2308,8 +2305,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
 error_out:
        iput(sbi->s_vat_inode);
 parse_options_failure:
-       if (uopt.nls_map)
-               unload_nls(uopt.nls_map);
+       unload_nls(uopt.nls_map);
        if (lvid_open)
                udf_close_lvid(sb);
        brelse(sbi->s_lvid_bh);
@@ -2359,8 +2355,7 @@ static void udf_put_super(struct super_block *sb)
        sbi = UDF_SB(sb);
 
        iput(sbi->s_vat_inode);
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
-               unload_nls(sbi->s_nls_map);
+       unload_nls(sbi->s_nls_map);
        if (!sb_rdonly(sb))
                udf_close_lvid(sb);
        brelse(sbi->s_lvid_bh);
index 758efe5..4fa6205 100644 (file)
@@ -20,8 +20,6 @@
 #define UDF_FLAG_UNDELETE              6
 #define UDF_FLAG_UNHIDE                        7
 #define UDF_FLAG_VARCONV               8
-#define UDF_FLAG_NLS_MAP               9
-#define UDF_FLAG_UTF8                  10
 #define UDF_FLAG_UID_FORGET     11    /* save -1 for uid to disk */
 #define UDF_FLAG_GID_FORGET     12
 #define UDF_FLAG_UID_SET       13
index 9dd0814..7e258f1 100644 (file)
@@ -130,6 +130,10 @@ static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
                le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
                UDF_NAME_PAD);
 }
+static inline uint8_t *udf_get_fi_ident(struct fileIdentDesc *fi)
+{
+       return ((uint8_t *)(fi + 1)) + le16_to_cpu(fi->lengthOfImpUse);
+}
 
 /* file.c */
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
index 5fcfa96..6225690 100644 (file)
@@ -177,7 +177,7 @@ static int udf_name_from_CS0(struct super_block *sb,
                return 0;
        }
 
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+       if (UDF_SB(sb)->s_nls_map)
                conv_f = UDF_SB(sb)->s_nls_map->uni2char;
        else
                conv_f = NULL;
@@ -285,7 +285,7 @@ static int udf_name_to_CS0(struct super_block *sb,
        if (ocu_max_len <= 0)
                return 0;
 
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+       if (UDF_SB(sb)->s_nls_map)
                conv_f = UDF_SB(sb)->s_nls_map->char2uni;
        else
                conv_f = NULL;
index 213a97a..1cd3f94 100644 (file)
@@ -1626,7 +1626,6 @@ xfs_swap_extents(
        struct xfs_bstat        *sbp = &sxp->sx_stat;
        int                     src_log_flags, target_log_flags;
        int                     error = 0;
-       int                     lock_flags;
        uint64_t                f;
        int                     resblks = 0;
        unsigned int            flags = 0;
@@ -1638,8 +1637,8 @@ xfs_swap_extents(
         * do the rest of the checks.
         */
        lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
-       lock_flags = XFS_MMAPLOCK_EXCL;
-       xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
+                                   VFS_I(tip)->i_mapping);
 
        /* Verify that both files have the same format */
        if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
@@ -1711,7 +1710,6 @@ xfs_swap_extents(
         * or cancel will unlock the inodes from this point onwards.
         */
        xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
-       lock_flags |= XFS_ILOCK_EXCL;
        xfs_trans_ijoin(tp, ip, 0);
        xfs_trans_ijoin(tp, tip, 0);
 
@@ -1830,13 +1828,16 @@ xfs_swap_extents(
        trace_xfs_swap_extent_after(ip, 0);
        trace_xfs_swap_extent_after(tip, 1);
 
+out_unlock_ilock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       xfs_iunlock(tip, XFS_ILOCK_EXCL);
 out_unlock:
-       xfs_iunlock(ip, lock_flags);
-       xfs_iunlock(tip, lock_flags);
+       filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
+                                     VFS_I(tip)->i_mapping);
        unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
        return error;
 
 out_trans_cancel:
        xfs_trans_cancel(tp);
-       goto out_unlock;
+       goto out_unlock_ilock;
 }
index 8ff42b3..3ab7356 100644 (file)
@@ -844,7 +844,7 @@ xfs_buf_readahead_map(
 {
        struct xfs_buf          *bp;
 
-       if (bdi_read_congested(target->bt_bdev->bd_bdi))
+       if (bdi_read_congested(target->bt_bdev->bd_disk->bdi))
                return;
 
        xfs_buf_read_map(target, map, nmaps,
index cc3cfb1..3dfbdcd 100644 (file)
@@ -1302,7 +1302,7 @@ xfs_file_llseek(
  *
  * mmap_lock (MM)
  *   sb_start_pagefault(vfs, freeze)
- *     i_mmaplock (XFS - truncate serialisation)
+ *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
  *       page_lock (MM)
  *         i_lock (XFS - extent map serialisation)
  */
@@ -1323,24 +1323,27 @@ __xfs_filemap_fault(
                file_update_time(vmf->vma->vm_file);
        }
 
-       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
        if (IS_DAX(inode)) {
                pfn_t pfn;
 
+               xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
                ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
                                (write_fault && !vmf->cow_page) ?
                                 &xfs_direct_write_iomap_ops :
                                 &xfs_read_iomap_ops);
                if (ret & VM_FAULT_NEEDDSYNC)
                        ret = dax_finish_sync_fault(vmf, pe_size, pfn);
+               xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
        } else {
-               if (write_fault)
+               if (write_fault) {
+                       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
                        ret = iomap_page_mkwrite(vmf,
                                        &xfs_buffered_write_iomap_ops);
-               else
+                       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+               } else {
                        ret = filemap_fault(vmf);
+               }
        }
-       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        if (write_fault)
                sb_end_pagefault(inode->i_sb);
index 990b72a..f00145e 100644 (file)
@@ -132,7 +132,7 @@ xfs_ilock_attr_map_shared(
 
 /*
  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
- * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
+ * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
  * various combinations of the locks to be obtained.
  *
  * The 3 locks should always be ordered so that the IO lock is obtained first,
@@ -140,23 +140,23 @@ xfs_ilock_attr_map_shared(
  *
  * Basic locking order:
  *
- * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
+ * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
  *
  * mmap_lock locking order:
  *
  * i_rwsem -> page lock -> mmap_lock
- * mmap_lock -> i_mmap_lock -> page_lock
+ * mmap_lock -> invalidate_lock -> page_lock
  *
  * The difference in mmap_lock locking order mean that we cannot hold the
- * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
- * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
- * in get_user_pages() to map the user pages into the kernel address space for
- * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
- * page faults already hold the mmap_lock.
+ * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
+ * can fault in pages during copy in/out (for buffered IO) or require the
+ * mmap_lock in get_user_pages() to map the user pages into the kernel address
+ * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
+ * fault because page faults already hold the mmap_lock.
  *
  * Hence to serialise fully against both syscall and mmap based IO, we need to
- * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
- * taken in places where we need to invalidate the page cache in a race
+ * take both the i_rwsem and the invalidate_lock. These locks should *only* be
+ * both taken in places where we need to invalidate the page cache in a race
  * free manner (e.g. truncate, hole punch and other extent manipulation
  * functions).
  */
@@ -188,10 +188,13 @@ xfs_ilock(
                                 XFS_IOLOCK_DEP(lock_flags));
        }
 
-       if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
-       else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
+       if (lock_flags & XFS_MMAPLOCK_EXCL) {
+               down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
+                                 XFS_MMAPLOCK_DEP(lock_flags));
+       } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
+               down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
+                                XFS_MMAPLOCK_DEP(lock_flags));
+       }
 
        if (lock_flags & XFS_ILOCK_EXCL)
                mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
@@ -240,10 +243,10 @@ xfs_ilock_nowait(
        }
 
        if (lock_flags & XFS_MMAPLOCK_EXCL) {
-               if (!mrtryupdate(&ip->i_mmaplock))
+               if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
                        goto out_undo_iolock;
        } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
-               if (!mrtryaccess(&ip->i_mmaplock))
+               if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
                        goto out_undo_iolock;
        }
 
@@ -258,9 +261,9 @@ xfs_ilock_nowait(
 
 out_undo_mmaplock:
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrunlock_excl(&ip->i_mmaplock);
+               up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mrunlock_shared(&ip->i_mmaplock);
+               up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 out_undo_iolock:
        if (lock_flags & XFS_IOLOCK_EXCL)
                up_write(&VFS_I(ip)->i_rwsem);
@@ -307,9 +310,9 @@ xfs_iunlock(
                up_read(&VFS_I(ip)->i_rwsem);
 
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrunlock_excl(&ip->i_mmaplock);
+               up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mrunlock_shared(&ip->i_mmaplock);
+               up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 
        if (lock_flags & XFS_ILOCK_EXCL)
                mrunlock_excl(&ip->i_lock);
@@ -335,7 +338,7 @@ xfs_ilock_demote(
        if (lock_flags & XFS_ILOCK_EXCL)
                mrdemote(&ip->i_lock);
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrdemote(&ip->i_mmaplock);
+               downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        if (lock_flags & XFS_IOLOCK_EXCL)
                downgrade_write(&VFS_I(ip)->i_rwsem);
 
@@ -343,9 +346,29 @@ xfs_ilock_demote(
 }
 
 #if defined(DEBUG) || defined(XFS_WARN)
-int
+static inline bool
+__xfs_rwsem_islocked(
+       struct rw_semaphore     *rwsem,
+       bool                    shared)
+{
+       if (!debug_locks)
+               return rwsem_is_locked(rwsem);
+
+       if (!shared)
+               return lockdep_is_held_type(rwsem, 0);
+
+       /*
+        * We are checking that the lock is held at least in shared
+        * mode but don't care that it might be held exclusively
+        * (i.e. shared | excl). Hence we check if the lock is held
+        * in any mode rather than an explicit shared mode.
+        */
+       return lockdep_is_held_type(rwsem, -1);
+}
+
+bool
 xfs_isilocked(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        uint                    lock_flags)
 {
        if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
@@ -355,20 +378,17 @@ xfs_isilocked(
        }
 
        if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
-               if (!(lock_flags & XFS_MMAPLOCK_SHARED))
-                       return !!ip->i_mmaplock.mr_writer;
-               return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
+               return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
+                               (lock_flags & XFS_IOLOCK_SHARED));
        }
 
-       if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
-               if (!(lock_flags & XFS_IOLOCK_SHARED))
-                       return !debug_locks ||
-                               lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
-               return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
+       if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
+               return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
+                               (lock_flags & XFS_IOLOCK_SHARED));
        }
 
        ASSERT(0);
-       return 0;
+       return false;
 }
 #endif
 
@@ -532,12 +552,10 @@ again:
 }
 
 /*
- * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
- * the mmaplock or the ilock, but not more than one type at a time. If we lock
- * more than one at a time, lockdep will report false positives saying we have
- * violated locking orders.  The iolock must be double-locked separately since
- * we use i_rwsem for that.  We now support taking one lock EXCL and the other
- * SHARED.
+ * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
+ * mmaplock must be double-locked separately since we use i_rwsem and
+ * invalidate_lock for that. We now support taking one lock EXCL and the
+ * other SHARED.
  */
 void
 xfs_lock_two_inodes(
@@ -555,15 +573,8 @@ xfs_lock_two_inodes(
        ASSERT(hweight32(ip1_mode) == 1);
        ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
        ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
-       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-
+       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
+       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
        ASSERT(ip0->i_ino != ip1->i_ino);
 
        if (ip0->i_ino > ip1->i_ino) {
@@ -3741,11 +3752,8 @@ xfs_ilock2_io_mmap(
        ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
        if (ret)
                return ret;
-       if (ip1 == ip2)
-               xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
-       else
-               xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
-                                   ip2, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
+                                   VFS_I(ip2)->i_mapping);
        return 0;
 }
 
@@ -3755,12 +3763,9 @@ xfs_iunlock2_io_mmap(
        struct xfs_inode        *ip1,
        struct xfs_inode        *ip2)
 {
-       bool                    same_inode = (ip1 == ip2);
-
-       xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
-       if (!same_inode)
-               xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
+                                     VFS_I(ip2)->i_mapping);
        inode_unlock(VFS_I(ip2));
-       if (!same_inode)
+       if (ip1 != ip2)
                inode_unlock(VFS_I(ip1));
 }
index 4b6703d..e0ae905 100644 (file)
@@ -40,7 +40,6 @@ typedef struct xfs_inode {
        /* Transaction and locking information. */
        struct xfs_inode_log_item *i_itemp;     /* logging information */
        mrlock_t                i_lock;         /* inode lock */
-       mrlock_t                i_mmaplock;     /* inode mmap IO lock */
        atomic_t                i_pincount;     /* inode pin count */
 
        /*
@@ -410,7 +409,7 @@ void                xfs_ilock(xfs_inode_t *, uint);
 int            xfs_ilock_nowait(xfs_inode_t *, uint);
 void           xfs_iunlock(xfs_inode_t *, uint);
 void           xfs_ilock_demote(xfs_inode_t *, uint);
-int            xfs_isilocked(xfs_inode_t *, uint);
+bool           xfs_isilocked(struct xfs_inode *, uint);
 uint           xfs_ilock_data_map_shared(struct xfs_inode *);
 uint           xfs_ilock_attr_map_shared(struct xfs_inode *);
 
index 2c9e26a..102cbd6 100644 (file)
@@ -709,8 +709,6 @@ xfs_fs_inode_init_once(
        atomic_set(&ip->i_pincount, 0);
        spin_lock_init(&ip->i_flags_lock);
 
-       mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
-                    "xfsino", ip->i_ino);
        mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
                     "xfsino", ip->i_ino);
 }
index 70055d4..ddc346a 100644 (file)
@@ -462,7 +462,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize)
        inode_dio_wait(inode);
 
        /* Serialize against page faults */
-       down_write(&zi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        /* Serialize against zonefs_iomap_begin() */
        mutex_lock(&zi->i_truncate_mutex);
@@ -500,7 +500,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize)
 
 unlock:
        mutex_unlock(&zi->i_truncate_mutex);
-       up_write(&zi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 
        return ret;
 }
@@ -575,18 +575,6 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
        return ret;
 }
 
-static vm_fault_t zonefs_filemap_fault(struct vm_fault *vmf)
-{
-       struct zonefs_inode_info *zi = ZONEFS_I(file_inode(vmf->vma->vm_file));
-       vm_fault_t ret;
-
-       down_read(&zi->i_mmap_sem);
-       ret = filemap_fault(vmf);
-       up_read(&zi->i_mmap_sem);
-
-       return ret;
-}
-
 static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
 {
        struct inode *inode = file_inode(vmf->vma->vm_file);
@@ -607,16 +595,16 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
        file_update_time(vmf->vma->vm_file);
 
        /* Serialize against truncates */
-       down_read(&zi->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
-       up_read(&zi->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
        return ret;
 }
 
 static const struct vm_operations_struct zonefs_file_vm_ops = {
-       .fault          = zonefs_filemap_fault,
+       .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
        .page_mkwrite   = zonefs_filemap_page_mkwrite,
 };
@@ -1155,7 +1143,6 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
 
        inode_init_once(&zi->i_vnode);
        mutex_init(&zi->i_truncate_mutex);
-       init_rwsem(&zi->i_mmap_sem);
        zi->i_wr_refcnt = 0;
 
        return &zi->i_vnode;
index 5114190..7b14790 100644 (file)
@@ -70,12 +70,11 @@ struct zonefs_inode_info {
         * and changes to the inode private data, and in particular changes to
         * a sequential file size on completion of direct IO writes.
         * Serialization of mmap read IOs with truncate and syscall IO
-        * operations is done with i_mmap_sem in addition to i_truncate_mutex.
-        * Only zonefs_seq_file_truncate() takes both lock (i_mmap_sem first,
-        * i_truncate_mutex second).
+        * operations is done with invalidate_lock in addition to
+        * i_truncate_mutex.  Only zonefs_seq_file_truncate() takes both lock
+        * (invalidate_lock first, i_truncate_mutex second).
         */
        struct mutex            i_truncate_mutex;
-       struct rw_semaphore     i_mmap_sem;
 
        /* guarded by i_truncate_mutex */
        unsigned int            i_wr_refcnt;
index a2bc381..30869ab 100644 (file)
@@ -20,6 +20,7 @@
 #define METHOD_NAME__CLS        "_CLS"
 #define METHOD_NAME__CRS        "_CRS"
 #define METHOD_NAME__DDN        "_DDN"
+#define METHOD_NAME__DIS        "_DIS"
 #define METHOD_NAME__DMA        "_DMA"
 #define METHOD_NAME__HID        "_HID"
 #define METHOD_NAME__INI        "_INI"
index 1b4c458..5a38757 100644 (file)
 /* Conditional execution */
 
 #define ACPI_DEBUG_EXEC(a)              a
-#define ACPI_DEBUG_ONLY_MEMBERS(a)      a;
+#define ACPI_DEBUG_ONLY_MEMBERS(a)      a
 #define _VERBOSE_STRUCTURES
 
 /* Various object display routines for debug */
index 027faa8..a0212e6 100644 (file)
@@ -14,6 +14,14 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
 }
 #endif
 
+#ifndef acpi_os_memmap
+static inline void __iomem *acpi_os_memmap(acpi_physical_address phys,
+                                           acpi_size size)
+{
+       return ioremap_cache(phys, size);
+}
+#endif
+
 extern bool acpi_permanent_mmap;
 
 void __iomem __ref
index a433359..fa02e3f 100644 (file)
@@ -12,7 +12,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20210604
+#define ACPI_CA_VERSION                 0x20210730
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
index ef2872d..159070e 100644 (file)
@@ -24,6 +24,7 @@
  * file. Useful because they make it more difficult to inadvertently type in
  * the wrong signature.
  */
+#define ACPI_SIG_AEST           "AEST" /* Arm Error Source Table */
 #define ACPI_SIG_ASF            "ASF!" /* Alert Standard Format table */
 #define ACPI_SIG_BERT           "BERT" /* Boot Error Record Table */
 #define ACPI_SIG_BGRT           "BGRT" /* Boot Graphics Resource Table */
@@ -482,7 +483,7 @@ struct acpi_csrt_descriptor {
  * DBG2 - Debug Port Table 2
  *        Version 0 (Both main table and subtables)
  *
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", September 21, 2020
  *
  ******************************************************************************/
 
@@ -532,11 +533,24 @@ struct acpi_dbg2_device {
 
 #define ACPI_DBG2_16550_COMPATIBLE  0x0000
 #define ACPI_DBG2_16550_SUBSET      0x0001
+#define ACPI_DBG2_MAX311XE_SPI      0x0002
 #define ACPI_DBG2_ARM_PL011         0x0003
+#define ACPI_DBG2_MSM8X60           0x0004
+#define ACPI_DBG2_16550_NVIDIA      0x0005
+#define ACPI_DBG2_TI_OMAP           0x0006
+#define ACPI_DBG2_APM88XXXX         0x0008
+#define ACPI_DBG2_MSM8974           0x0009
+#define ACPI_DBG2_SAM5250           0x000A
+#define ACPI_DBG2_INTEL_USIF        0x000B
+#define ACPI_DBG2_IMX6              0x000C
 #define ACPI_DBG2_ARM_SBSA_32BIT    0x000D
 #define ACPI_DBG2_ARM_SBSA_GENERIC  0x000E
 #define ACPI_DBG2_ARM_DCC           0x000F
 #define ACPI_DBG2_BCM2835           0x0010
+#define ACPI_DBG2_SDM845_1_8432MHZ  0x0011
+#define ACPI_DBG2_16550_WITH_GAS    0x0012
+#define ACPI_DBG2_SDM845_7_372MHZ   0x0013
+#define ACPI_DBG2_INTEL_LPSS        0x0014
 
 #define ACPI_DBG2_1394_STANDARD     0x0000
 
index 2069ac3..a47b32a 100644 (file)
  * See http://stackoverflow.com/a/1053662/41661
  */
 
+/*******************************************************************************
+ *
+ * AEST - Arm Error Source Table
+ *
+ * Conforms to: ACPI for the Armv8 RAS Extensions 1.1 Platform Design Document
+ * September 2020.
+ *
+ ******************************************************************************/
+
+struct acpi_table_aest {
+       struct acpi_table_header header;
+       void *node_array[];
+};
+
+/* Common Subtable header - one per Node Structure (Subtable) */
+
+struct acpi_aest_hdr {
+       u8 type;
+       u16 length;
+       u8 reserved;
+       u32 node_specific_offset;
+       u32 node_interface_offset;
+       u32 node_interrupt_offset;
+       u32 node_interrupt_count;
+       u64 timestamp_rate;
+       u64 reserved1;
+       u64 error_injection_rate;
+};
+
+/* Values for Type above */
+
+#define ACPI_AEST_PROCESSOR_ERROR_NODE      0
+#define ACPI_AEST_MEMORY_ERROR_NODE         1
+#define ACPI_AEST_SMMU_ERROR_NODE           2
+#define ACPI_AEST_VENDOR_ERROR_NODE         3
+#define ACPI_AEST_GIC_ERROR_NODE            4
+#define ACPI_AEST_NODE_TYPE_RESERVED        5  /* 5 and above are reserved */
+
+/*
+ * AEST subtables (Error nodes)
+ */
+
+/* 0: Processor Error */
+
+typedef struct acpi_aest_processor {
+       u32 processor_id;
+       u8 resource_type;
+       u8 reserved;
+       u8 flags;
+       u8 revision;
+       u64 processor_affinity;
+
+} acpi_aest_processor;
+
+/* Values for resource_type above, related structs below */
+
+#define ACPI_AEST_CACHE_RESOURCE            0
+#define ACPI_AEST_TLB_RESOURCE              1
+#define ACPI_AEST_GENERIC_RESOURCE          2
+#define ACPI_AEST_RESOURCE_RESERVED         3  /* 3 and above are reserved */
+
+/* 0R: Processor Cache Resource Substructure */
+
+typedef struct acpi_aest_processor_cache {
+       u32 cache_reference;
+       u32 reserved;
+
+} acpi_aest_processor_cache;
+
+/* Values for cache_type above */
+
+#define ACPI_AEST_CACHE_DATA                0
+#define ACPI_AEST_CACHE_INSTRUCTION         1
+#define ACPI_AEST_CACHE_UNIFIED             2
+#define ACPI_AEST_CACHE_RESERVED            3  /* 3 and above are reserved */
+
+/* 1R: Processor TLB Resource Substructure */
+
+typedef struct acpi_aest_processor_tlb {
+       u32 tlb_level;
+       u32 reserved;
+
+} acpi_aest_processor_tlb;
+
+/* 2R: Processor Generic Resource Substructure */
+
+typedef struct acpi_aest_processor_generic {
+       u8 *resource;
+
+} acpi_aest_processor_generic;
+
+/* 1: Memory Error */
+
+typedef struct acpi_aest_memory {
+       u32 srat_proximity_domain;
+
+} acpi_aest_memory;
+
+/* 2: Smmu Error */
+
+typedef struct acpi_aest_smmu {
+       u32 iort_node_reference;
+       u32 subcomponent_reference;
+
+} acpi_aest_smmu;
+
+/* 3: Vendor Defined */
+
+typedef struct acpi_aest_vendor {
+       u32 acpi_hid;
+       u32 acpi_uid;
+       u8 vendor_specific_data[16];
+
+} acpi_aest_vendor;
+
+/* 4: Gic Error */
+
+typedef struct acpi_aest_gic {
+       u32 interface_type;
+       u32 instance_id;
+
+} acpi_aest_gic;
+
+/* Values for interface_type above */
+
+#define ACPI_AEST_GIC_CPU                   0
+#define ACPI_AEST_GIC_DISTRIBUTOR           1
+#define ACPI_AEST_GIC_REDISTRIBUTOR         2
+#define ACPI_AEST_GIC_ITS                   3
+#define ACPI_AEST_GIC_RESERVED              4  /* 4 and above are reserved */
+
+/* Node Interface Structure */
+
+typedef struct acpi_aest_node_interface {
+       u8 type;
+       u8 reserved[3];
+       u32 flags;
+       u64 address;
+       u32 error_record_index;
+       u32 error_record_count;
+       u64 error_record_implemented;
+       u64 error_status_reporting;
+       u64 addressing_mode;
+
+} acpi_aest_node_interface;
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_SYSTEM_REGISTER      0
+#define ACPI_AEST_NODE_MEMORY_MAPPED        1
+#define ACPI_AEST_XFACE_RESERVED            2  /* 2 and above are reserved */
+
+/* Node Interrupt Structure */
+
+typedef struct acpi_aest_node_interrupt {
+       u8 type;
+       u8 reserved[2];
+       u8 flags;
+       u32 gsiv;
+       u8 iort_id;
+       u8 reserved1[3];
+
+} acpi_aest_node_interrupt;
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_FAULT_HANDLING       0
+#define ACPI_AEST_NODE_ERROR_RECOVERY       1
+#define ACPI_AEST_XRUPT_RESERVED            2  /* 2 and above are reserved */
+
 /*******************************************************************************
  *
  * BDAT - BIOS Data ACPI Table
index 86903ac..9125e2f 100644 (file)
@@ -723,6 +723,10 @@ struct acpi_table_wpbt {
        u16 arguments_length;
 };
 
+struct acpi_wpbt_unicode {
+       u16 *unicode_string;
+};
+
 /*******************************************************************************
  *
  * WSMT - Windows SMM Security Mitigations Table
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
deleted file mode 100644 (file)
index bc45af5..0000000
+++ /dev/null
@@ -1,1337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-instrumented.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-/*
- * This file provides wrappers with KASAN instrumentation for atomic operations.
- * To use this functionality an arch's atomic.h file needs to define all
- * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
- * this file at the end. This file provides atomic_read() that forwards to
- * arch_atomic_read() for actual atomic operation.
- * Note: if an arch atomic operation is implemented by means of other atomic
- * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
- * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
- * double instrumentation.
- */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-
-#include <linux/build_bug.h>
-#include <linux/compiler.h>
-#include <linux/instrumented.h>
-
-static __always_inline int
-atomic_read(const atomic_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic_read(v);
-}
-
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic_read_acquire(v);
-}
-
-static __always_inline void
-atomic_set(atomic_t *v, int i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic_set(v, i);
-}
-
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic_set_release(v, i);
-}
-
-static __always_inline void
-atomic_add(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_add(i, v);
-}
-
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return(i, v);
-}
-
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return_acquire(i, v);
-}
-
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return_release(i, v);
-}
-
-static __always_inline int
-atomic_add_return_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return_relaxed(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_sub(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_sub(i, v);
-}
-
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return(i, v);
-}
-
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return_acquire(i, v);
-}
-
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return_release(i, v);
-}
-
-static __always_inline int
-atomic_sub_return_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return_relaxed(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_inc(v);
-}
-
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return(v);
-}
-
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return_acquire(v);
-}
-
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return_release(v);
-}
-
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return_relaxed(v);
-}
-
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc(v);
-}
-
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc_acquire(v);
-}
-
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc_release(v);
-}
-
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_dec(v);
-}
-
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return(v);
-}
-
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return_acquire(v);
-}
-
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return_release(v);
-}
-
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return_relaxed(v);
-}
-
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec(v);
-}
-
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec_acquire(v);
-}
-
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec_release(v);
-}
-
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_and(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_and(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_andnot(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_or(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_or(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_xor(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_xor(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg(v, i);
-}
-
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg_acquire(v, i);
-}
-
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg_release(v, i);
-}
-
-static __always_inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg_relaxed(v, i);
-}
-
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg(v, old, new);
-}
-
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg_release(v, old, new);
-}
-
-static __always_inline int
-atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg_release(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_negative(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_unless_positive(v);
-}
-
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_if_positive(v);
-}
-
-static __always_inline s64
-atomic64_read(const atomic64_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic64_read(v);
-}
-
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic64_read_acquire(v);
-}
-
-static __always_inline void
-atomic64_set(atomic64_t *v, s64 i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic64_set(v, i);
-}
-
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic64_set_release(v, i);
-}
-
-static __always_inline void
-atomic64_add(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_add(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return_release(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return_relaxed(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_sub(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_sub(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return_release(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return_relaxed(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_inc(v);
-}
-
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return(v);
-}
-
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return_acquire(v);
-}
-
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return_release(v);
-}
-
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return_relaxed(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc_acquire(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc_release(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_dec(v);
-}
-
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return(v);
-}
-
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return_acquire(v);
-}
-
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return_release(v);
-}
-
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return_relaxed(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec_acquire(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec_release(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic64_and(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_and(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_andnot(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_or(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_or(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_xor(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_xor(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg(v, i);
-}
-
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg_acquire(v, i);
-}
-
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg_release(v, i);
-}
-
-static __always_inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg_relaxed(v, i);
-}
-
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg(v, old, new);
-}
-
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg_release(v, old, new);
-}
-
-static __always_inline s64
-atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg_release(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_negative(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_unless_positive(v);
-}
-
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_if_positive(v);
-}
-
-#define xchg(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define xchg_acquire(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-
-#define xchg_release(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg_release(__ai_ptr, __VA_ARGS__); \
-})
-
-#define xchg_relaxed(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_acquire(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_release(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_relaxed(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_acquire(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_release(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_relaxed(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-
-#define try_cmpxchg(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define try_cmpxchg_acquire(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define try_cmpxchg_release(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define try_cmpxchg_relaxed(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define cmpxchg_local(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_local(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define sync_cmpxchg(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_double(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
-       arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
-})
-
-
-#define cmpxchg_double_local(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
-       arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
deleted file mode 100644 (file)
index 073cf40..0000000
+++ /dev/null
@@ -1,1014 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-long.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
-
-#include <linux/compiler.h>
-#include <asm/types.h>
-
-#ifdef CONFIG_64BIT
-typedef atomic64_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i)            ATOMIC64_INIT(i)
-#define atomic_long_cond_read_acquire  atomic64_cond_read_acquire
-#define atomic_long_cond_read_relaxed  atomic64_cond_read_relaxed
-#else
-typedef atomic_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i)            ATOMIC_INIT(i)
-#define atomic_long_cond_read_acquire  atomic_cond_read_acquire
-#define atomic_long_cond_read_relaxed  atomic_cond_read_relaxed
-#endif
-
-#ifdef CONFIG_64BIT
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
-       return atomic64_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
-       return atomic64_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
-       atomic64_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
-       atomic64_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
-       atomic64_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
-       return atomic64_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
-       return atomic64_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
-       atomic64_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
-       atomic64_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
-       return atomic64_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
-       return atomic64_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
-       return atomic64_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
-       return atomic64_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
-       return atomic64_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
-       return atomic64_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
-       return atomic64_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
-       return atomic64_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
-       atomic64_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
-       return atomic64_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
-       return atomic64_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
-       return atomic64_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
-       return atomic64_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
-       return atomic64_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
-       return atomic64_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
-       return atomic64_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
-       return atomic64_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
-       atomic64_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
-       atomic64_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
-       atomic64_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
-       atomic64_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
-       return atomic64_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
-       return atomic64_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
-       return atomic64_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
-       return atomic64_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
-       return atomic64_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
-       return atomic64_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
-       return atomic64_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
-       return atomic64_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic64_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic64_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
-       return atomic64_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
-       return atomic64_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
-       return atomic64_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
-       return atomic64_dec_if_positive(v);
-}
-
-#else /* CONFIG_64BIT */
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
-       return atomic_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
-       return atomic_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
-       atomic_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
-       atomic_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
-       atomic_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
-       return atomic_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
-       return atomic_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
-       atomic_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
-       return atomic_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
-       return atomic_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
-       atomic_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
-       return atomic_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
-       return atomic_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
-       return atomic_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
-       return atomic_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
-       return atomic_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
-       return atomic_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
-       return atomic_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
-       return atomic_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
-       atomic_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
-       return atomic_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
-       return atomic_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
-       return atomic_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
-       return atomic_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
-       return atomic_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
-       return atomic_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
-       return atomic_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
-       return atomic_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
-       atomic_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
-       atomic_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
-       atomic_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
-       atomic_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
-       return atomic_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
-       return atomic_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
-       return atomic_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
-       return atomic_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg_acquire(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg_release(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
-       return atomic_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
-       return atomic_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
-       return atomic_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
-       return atomic_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
-       return atomic_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
-       return atomic_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
-       return atomic_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
-       return atomic_dec_if_positive(v);
-}
-
-#endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// a624200981f552b2c6be4f32fe44da8289f30d87
index 0e7316a..3096f08 100644 (file)
  * See Documentation/atomic_bitops.txt for details.
  */
 
-static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
@@ -38,11 +42,12 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
        if (READ_ONCE(*p) & mask)
                return 1;
 
-       old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
-static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
@@ -51,18 +56,21 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
        if (!(READ_ONCE(*p) & mask))
                return 0;
 
-       old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
-static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
+#include <asm-generic/bitops/instrumented-atomic.h>
+
 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
index 3ae0213..630f2f6 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/barrier.h>
 
 /**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
  * @nr: Bit to set
  * @addr: Address to count from
  *
@@ -15,8 +15,8 @@
  * the returned value is 0.
  * It can be used to implement bit locks.
  */
-static inline int test_and_set_bit_lock(unsigned int nr,
-                                       volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr,
        if (READ_ONCE(*p) & mask)
                return 1;
 
-       old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
 
 /**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
  * @nr: the bit to set
  * @addr: the address to start counting from
  *
  * This operation is atomic and provides release barrier semantics.
  */
-static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
 /**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
  * @nr: the bit to set
  * @addr: the address to start counting from
  *
@@ -54,38 +55,40 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
  *
  * See for example x86's implementation.
  */
-static inline void __clear_bit_unlock(unsigned int nr,
-                                     volatile unsigned long *p)
+static inline void
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
 {
        unsigned long old;
 
        p += BIT_WORD(nr);
        old = READ_ONCE(*p);
        old &= ~BIT_MASK(nr);
-       atomic_long_set_release((atomic_long_t *)p, old);
+       arch_atomic_long_set_release((atomic_long_t *)p, old);
 }
 
 /**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- *                                     byte is negative, for unlock.
+ * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ *                                          byte is negative, for unlock.
  * @nr: the bit to clear
  * @addr: the address to start counting from
  *
  * This is a bit of a one-trick-pony for the filemap code, which clears
  * PG_locked and tests PG_waiters,
  */
-#ifndef clear_bit_unlock_is_negative_byte
-static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
-                                                    volatile unsigned long *p)
+#ifndef arch_clear_bit_unlock_is_negative_byte
+static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
+                                                         volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
        return !!(old & BIT(7));
 }
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
 #endif
 
+#include <asm-generic/bitops/instrumented-lock.h>
+
 #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
index 7e10c4b..365377f 100644 (file)
@@ -5,7 +5,7 @@
 #include <asm/types.h>
 
 /**
- * __set_bit - Set a bit in memory
+ * arch___set_bit - Set a bit in memory
  * @nr: the bit to set
  * @addr: the address to start counting from
  *
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___set_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
        *p  |= mask;
 }
+#define __set_bit arch___set_bit
 
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
        *p &= ~mask;
 }
+#define __clear_bit arch___clear_bit
 
 /**
- * __change_bit - Toggle a bit in memory
+ * arch___change_bit - Toggle a bit in memory
  * @nr: the bit to change
  * @addr: the address to start counting from
  *
@@ -38,16 +42,18 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static __always_inline
+void arch___change_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
        *p ^= mask;
 }
+#define __change_bit arch___change_bit
 
 /**
- * __test_and_set_bit - Set a bit and return its old value
+ * arch___test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
  * @addr: Address to count from
  *
@@ -55,7 +61,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_set_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -64,9 +71,10 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
        *p = old | mask;
        return (old & mask) != 0;
 }
+#define __test_and_set_bit arch___test_and_set_bit
 
 /**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * arch___test_and_clear_bit - Clear a bit and return its old value
  * @nr: Bit to clear
  * @addr: Address to count from
  *
@@ -74,7 +82,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -83,10 +92,11 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
        *p = old & ~mask;
        return (old & mask) != 0;
 }
+#define __test_and_clear_bit arch___test_and_clear_bit
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
-                                           volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_change_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -95,15 +105,18 @@ static inline int __test_and_change_bit(int nr,
        *p = old ^ mask;
        return (old & mask) != 0;
 }
+#define __test_and_change_bit arch___test_and_change_bit
 
 /**
- * test_bit - Determine whether a bit is set
+ * arch_test_bit - Determine whether a bit is set
  * @nr: bit number to test
  * @addr: Address to start counting from
  */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline int
+arch_test_bit(int nr, const volatile unsigned long *addr)
 {
        return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
+#define test_bit arch_test_bit
 
 #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
index 1732541..62669b3 100644 (file)
                NOINSTR_TEXT                                            \
                *(.text..refcount)                                      \
                *(.ref.text)                                            \
+               *(.text.asan.* .text.tsan.*)                            \
                TEXT_CFI_JT                                             \
        MEM_KEEP(init.text*)                                            \
        MEM_KEEP(exit.text*)                                            \
index 47accec..f603325 100644 (file)
@@ -38,9 +38,9 @@ extern void public_key_free(struct public_key *key);
 struct public_key_signature {
        struct asymmetric_key_id *auth_ids[2];
        u8 *s;                  /* Signature */
-       u32 s_size;             /* Number of bytes in signature */
        u8 *digest;
-       u8 digest_size;         /* Number of bytes in digest */
+       u32 s_size;             /* Number of bytes in signature */
+       u32 digest_size;        /* Number of bytes in digest */
        const char *pkey_algo;
        const char *hash_algo;
        const char *encoding;
index 7afd730..709f286 100644 (file)
@@ -3,6 +3,7 @@
 /*
  * Common values for the SM4 algorithm
  * Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
  */
 
 #ifndef _CRYPTO_SM4_H
 #define SM4_BLOCK_SIZE 16
 #define SM4_RKEY_WORDS 32
 
-struct crypto_sm4_ctx {
+struct sm4_ctx {
        u32 rkey_enc[SM4_RKEY_WORDS];
        u32 rkey_dec[SM4_RKEY_WORDS];
 };
 
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
-                      unsigned int key_len);
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx:       The location where the computed key will be stored.
+ * @in_key:    The supplied key.
+ * @key_len:   The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
                          unsigned int key_len);
 
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk:                The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out:       Buffer to store output data
+ * @in:        Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
 
 #endif
index 063791b..d7aa42c 100644 (file)
 #define OST_CLK_PERCPU_TIMER2  3
 #define OST_CLK_PERCPU_TIMER3  4
 
+#define OST_CLK_EVENT_TIMER            1
+
+#define OST_CLK_EVENT_TIMER0   0
+#define OST_CLK_EVENT_TIMER1   1
+#define OST_CLK_EVENT_TIMER2   2
+#define OST_CLK_EVENT_TIMER3   3
+#define OST_CLK_EVENT_TIMER4   4
+#define OST_CLK_EVENT_TIMER5   5
+#define OST_CLK_EVENT_TIMER6   6
+#define OST_CLK_EVENT_TIMER7   7
+#define OST_CLK_EVENT_TIMER8   8
+#define OST_CLK_EVENT_TIMER9   9
+#define OST_CLK_EVENT_TIMER10  10
+#define OST_CLK_EVENT_TIMER11  11
+#define OST_CLK_EVENT_TIMER12  12
+#define OST_CLK_EVENT_TIMER13  13
+#define OST_CLK_EVENT_TIMER14  14
+#define OST_CLK_EVENT_TIMER15  15
+
 #endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */
index d918bf3..3205699 100644 (file)
@@ -16,4 +16,8 @@
 #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW       1
 #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH      2
 
+/* Polarity of INOK signal */
+#define SMB3XX_SYSOK_INOK_ACTIVE_LOW           0
+#define SMB3XX_SYSOK_INOK_ACTIVE_HIGH          1
+
 #endif
index 72e4f7f..974d497 100644 (file)
@@ -249,7 +249,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
 /* the following numa functions are architecture-dependent */
 void acpi_numa_slit_init (struct acpi_table_slit *slit);
 
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
 void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
 #else
 static inline void
@@ -1380,13 +1380,11 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
 #endif
 
 #ifdef CONFIG_ACPI
-extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
+extern void acpi_device_notify(struct device *dev);
+extern void acpi_device_notify_remove(struct device *dev);
 #else
-static inline int
-acpi_platform_notify(struct device *dev, enum kobject_action action)
-{
-       return 0;
-}
+static inline void acpi_device_notify(struct device *dev) { }
+static inline void acpi_device_notify_remove(struct device *dev) { }
 #endif
 
 #endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
deleted file mode 100644 (file)
index a3dba31..0000000
+++ /dev/null
@@ -1,2361 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef arch_xchg_relaxed
-#define arch_xchg_acquire arch_xchg
-#define arch_xchg_release arch_xchg
-#define arch_xchg_relaxed arch_xchg
-#else /* arch_xchg_relaxed */
-
-#ifndef arch_xchg_acquire
-#define arch_xchg_acquire(...) \
-       __atomic_op_acquire(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg_release
-#define arch_xchg_release(...) \
-       __atomic_op_release(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg
-#define arch_xchg(...) \
-       __atomic_op_fence(arch_xchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_xchg_relaxed */
-
-#ifndef arch_cmpxchg_relaxed
-#define arch_cmpxchg_acquire arch_cmpxchg
-#define arch_cmpxchg_release arch_cmpxchg
-#define arch_cmpxchg_relaxed arch_cmpxchg
-#else /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg_acquire
-#define arch_cmpxchg_acquire(...) \
-       __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg_release
-#define arch_cmpxchg_release(...) \
-       __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg
-#define arch_cmpxchg(...) \
-       __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg64_relaxed
-#define arch_cmpxchg64_acquire arch_cmpxchg64
-#define arch_cmpxchg64_release arch_cmpxchg64
-#define arch_cmpxchg64_relaxed arch_cmpxchg64
-#else /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_cmpxchg64_acquire
-#define arch_cmpxchg64_acquire(...) \
-       __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64_release
-#define arch_cmpxchg64_release(...) \
-       __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64
-#define arch_cmpxchg64(...) \
-       __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_try_cmpxchg_relaxed
-#ifdef arch_try_cmpxchg
-#define arch_try_cmpxchg_acquire arch_try_cmpxchg
-#define arch_try_cmpxchg_release arch_try_cmpxchg
-#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_acquire */
-
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_release */
-
-#ifndef arch_try_cmpxchg_relaxed
-#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_relaxed */
-
-#else /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(...) \
-       __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(...) \
-       __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(...) \
-       __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_read_acquire
-static __always_inline int
-arch_atomic_read_acquire(const atomic_t *v)
-{
-       return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic_read_acquire arch_atomic_read_acquire
-#endif
-
-#ifndef arch_atomic_set_release
-static __always_inline void
-arch_atomic_set_release(atomic_t *v, int i)
-{
-       smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic_set_release arch_atomic_set_release
-#endif
-
-#ifndef arch_atomic_add_return_relaxed
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return
-#else /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_add_return_acquire
-static __always_inline int
-arch_atomic_add_return_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_add_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
-#endif
-
-#ifndef arch_atomic_add_return_release
-static __always_inline int
-arch_atomic_add_return_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_add_return_relaxed(i, v);
-}
-#define arch_atomic_add_return_release arch_atomic_add_return_release
-#endif
-
-#ifndef arch_atomic_add_return
-static __always_inline int
-arch_atomic_add_return(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_add_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_add_return arch_atomic_add_return
-#endif
-
-#endif /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add
-#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
-#else /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_fetch_add_acquire
-static __always_inline int
-arch_atomic_fetch_add_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_add_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic_fetch_add_release
-static __always_inline int
-arch_atomic_fetch_add_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
-#endif
-
-#ifndef arch_atomic_fetch_add
-static __always_inline int
-arch_atomic_fetch_add(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_add_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_add arch_atomic_fetch_add
-#endif
-
-#endif /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_sub_return_relaxed
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return
-#define arch_atomic_sub_return_release arch_atomic_sub_return
-#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
-#else /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_sub_return_acquire
-static __always_inline int
-arch_atomic_sub_return_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_sub_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
-#endif
-
-#ifndef arch_atomic_sub_return_release
-static __always_inline int
-arch_atomic_sub_return_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_sub_return_relaxed(i, v);
-}
-#define arch_atomic_sub_return_release arch_atomic_sub_return_release
-#endif
-
-#ifndef arch_atomic_sub_return
-static __always_inline int
-arch_atomic_sub_return(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_sub_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_sub_return arch_atomic_sub_return
-#endif
-
-#endif /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_fetch_sub_relaxed
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
-#else /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_fetch_sub_acquire
-static __always_inline int
-arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_sub_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic_fetch_sub_release
-static __always_inline int
-arch_atomic_fetch_sub_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
-#endif
-
-#ifndef arch_atomic_fetch_sub
-static __always_inline int
-arch_atomic_fetch_sub(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_sub_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
-#endif
-
-#endif /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_inc
-static __always_inline void
-arch_atomic_inc(atomic_t *v)
-{
-       arch_atomic_add(1, v);
-}
-#define arch_atomic_inc arch_atomic_inc
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-#ifdef arch_atomic_inc_return
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return
-#define arch_atomic_inc_return_release arch_atomic_inc_return
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
-#endif /* arch_atomic_inc_return */
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
-       return arch_atomic_add_return(1, v);
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
-       return arch_atomic_add_return_acquire(1, v);
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
-       return arch_atomic_add_return_release(1, v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-static __always_inline int
-arch_atomic_inc_return_relaxed(atomic_t *v)
-{
-       return arch_atomic_add_return_relaxed(1, v);
-}
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
-#endif
-
-#else /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_inc_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_inc_return_relaxed(v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_inc_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#endif /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_fetch_inc_relaxed
-#ifdef arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
-#endif /* arch_atomic_fetch_inc */
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
-       return arch_atomic_fetch_add(1, v);
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
-       return arch_atomic_fetch_add_acquire(1, v);
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
-       return arch_atomic_fetch_add_release(1, v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc_relaxed
-static __always_inline int
-arch_atomic_fetch_inc_relaxed(atomic_t *v)
-{
-       return arch_atomic_fetch_add_relaxed(1, v);
-}
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_fetch_inc_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_inc_relaxed(v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_inc_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#endif /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_dec
-static __always_inline void
-arch_atomic_dec(atomic_t *v)
-{
-       arch_atomic_sub(1, v);
-}
-#define arch_atomic_dec arch_atomic_dec
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-#ifdef arch_atomic_dec_return
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return
-#define arch_atomic_dec_return_release arch_atomic_dec_return
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
-#endif /* arch_atomic_dec_return */
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
-       return arch_atomic_sub_return(1, v);
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
-       return arch_atomic_sub_return_acquire(1, v);
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
-       return arch_atomic_sub_return_release(1, v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-static __always_inline int
-arch_atomic_dec_return_relaxed(atomic_t *v)
-{
-       return arch_atomic_sub_return_relaxed(1, v);
-}
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#endif
-
-#else /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_dec_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_dec_return_relaxed(v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_dec_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#endif /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_fetch_dec_relaxed
-#ifdef arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
-#endif /* arch_atomic_fetch_dec */
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
-       return arch_atomic_fetch_sub(1, v);
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
-       return arch_atomic_fetch_sub_acquire(1, v);
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
-       return arch_atomic_fetch_sub_release(1, v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec_relaxed
-static __always_inline int
-arch_atomic_fetch_dec_relaxed(atomic_t *v)
-{
-       return arch_atomic_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_fetch_dec_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_dec_relaxed(v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_dec_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#endif /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_and_relaxed
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and
-#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
-#else /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_fetch_and_acquire
-static __always_inline int
-arch_atomic_fetch_and_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_and_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic_fetch_and_release
-static __always_inline int
-arch_atomic_fetch_and_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
-#endif
-
-#ifndef arch_atomic_fetch_and
-static __always_inline int
-arch_atomic_fetch_and(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_and_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_and arch_atomic_fetch_and
-#endif
-
-#endif /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_andnot
-static __always_inline void
-arch_atomic_andnot(int i, atomic_t *v)
-{
-       arch_atomic_and(~i, v);
-}
-#define arch_atomic_andnot arch_atomic_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-#ifdef arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
-#endif /* arch_atomic_fetch_andnot */
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and(~i, v);
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and_acquire(~i, v);
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and_release(~i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-static __always_inline int
-arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_andnot_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_andnot_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#endif /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_or_relaxed
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or
-#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
-#else /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_or_acquire
-static __always_inline int
-arch_atomic_fetch_or_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_or_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic_fetch_or_release
-static __always_inline int
-arch_atomic_fetch_or_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
-#endif
-
-#ifndef arch_atomic_fetch_or
-static __always_inline int
-arch_atomic_fetch_or(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_or_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_or arch_atomic_fetch_or
-#endif
-
-#endif /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_xor_relaxed
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
-#else /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_fetch_xor_acquire
-static __always_inline int
-arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_xor_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic_fetch_xor_release
-static __always_inline int
-arch_atomic_fetch_xor_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
-#endif
-
-#ifndef arch_atomic_fetch_xor
-static __always_inline int
-arch_atomic_fetch_xor(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_xor_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#endif
-
-#endif /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_xchg_relaxed
-#define arch_atomic_xchg_acquire arch_atomic_xchg
-#define arch_atomic_xchg_release arch_atomic_xchg
-#define arch_atomic_xchg_relaxed arch_atomic_xchg
-#else /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_xchg_acquire
-static __always_inline int
-arch_atomic_xchg_acquire(atomic_t *v, int i)
-{
-       int ret = arch_atomic_xchg_relaxed(v, i);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
-#endif
-
-#ifndef arch_atomic_xchg_release
-static __always_inline int
-arch_atomic_xchg_release(atomic_t *v, int i)
-{
-       __atomic_release_fence();
-       return arch_atomic_xchg_relaxed(v, i);
-}
-#define arch_atomic_xchg_release arch_atomic_xchg_release
-#endif
-
-#ifndef arch_atomic_xchg
-static __always_inline int
-arch_atomic_xchg(atomic_t *v, int i)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_xchg_relaxed(v, i);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_xchg arch_atomic_xchg
-#endif
-
-#endif /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
-#else /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_acquire
-static __always_inline int
-arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
-       int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_cmpxchg_release
-static __always_inline int
-arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
-       __atomic_release_fence();
-       return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_cmpxchg
-static __always_inline int
-arch_atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#endif
-
-#endif /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-#ifdef arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
-#endif /* arch_atomic_try_cmpxchg */
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg_acquire(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg_release(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg_relaxed(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
-       bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
-       __atomic_release_fence();
-       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
-       bool ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#endif /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_sub_and_test
-/**
- * arch_atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_sub_and_test(int i, atomic_t *v)
-{
-       return arch_atomic_sub_return(i, v) == 0;
-}
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-#endif
-
-#ifndef arch_atomic_dec_and_test
-/**
- * arch_atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic_dec_and_test(atomic_t *v)
-{
-       return arch_atomic_dec_return(v) == 0;
-}
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-#endif
-
-#ifndef arch_atomic_inc_and_test
-/**
- * arch_atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_inc_and_test(atomic_t *v)
-{
-       return arch_atomic_inc_return(v) == 0;
-}
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
-#endif
-
-#ifndef arch_atomic_add_negative
-/**
- * arch_atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic_add_negative(int i, atomic_t *v)
-{
-       return arch_atomic_add_return(i, v) < 0;
-}
-#define arch_atomic_add_negative arch_atomic_add_negative
-#endif
-
-#ifndef arch_atomic_fetch_add_unless
-/**
- * arch_atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
-       int c = arch_atomic_read(v);
-
-       do {
-               if (unlikely(c == u))
-                       break;
-       } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
-       return c;
-}
-#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#endif
-
-#ifndef arch_atomic_add_unless
-/**
- * arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
-       return arch_atomic_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic_add_unless arch_atomic_add_unless
-#endif
-
-#ifndef arch_atomic_inc_not_zero
-/**
- * arch_atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic_inc_not_zero(atomic_t *v)
-{
-       return arch_atomic_add_unless(v, 1, 0);
-}
-#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
-#endif
-
-#ifndef arch_atomic_inc_unless_negative
-static __always_inline bool
-arch_atomic_inc_unless_negative(atomic_t *v)
-{
-       int c = arch_atomic_read(v);
-
-       do {
-               if (unlikely(c < 0))
-                       return false;
-       } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
-
-       return true;
-}
-#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
-#endif
-
-#ifndef arch_atomic_dec_unless_positive
-static __always_inline bool
-arch_atomic_dec_unless_positive(atomic_t *v)
-{
-       int c = arch_atomic_read(v);
-
-       do {
-               if (unlikely(c > 0))
-                       return false;
-       } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
-
-       return true;
-}
-#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
-#endif
-
-#ifndef arch_atomic_dec_if_positive
-static __always_inline int
-arch_atomic_dec_if_positive(atomic_t *v)
-{
-       int dec, c = arch_atomic_read(v);
-
-       do {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-       } while (!arch_atomic_try_cmpxchg(v, &c, dec));
-
-       return dec;
-}
-#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef arch_atomic64_read_acquire
-static __always_inline s64
-arch_atomic64_read_acquire(const atomic64_t *v)
-{
-       return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic64_read_acquire arch_atomic64_read_acquire
-#endif
-
-#ifndef arch_atomic64_set_release
-static __always_inline void
-arch_atomic64_set_release(atomic64_t *v, s64 i)
-{
-       smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic64_set_release arch_atomic64_set_release
-#endif
-
-#ifndef arch_atomic64_add_return_relaxed
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return
-#define arch_atomic64_add_return_release arch_atomic64_add_return
-#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
-#else /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_add_return_acquire
-static __always_inline s64
-arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_add_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
-#endif
-
-#ifndef arch_atomic64_add_return_release
-static __always_inline s64
-arch_atomic64_add_return_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_add_return_relaxed(i, v);
-}
-#define arch_atomic64_add_return_release arch_atomic64_add_return_release
-#endif
-
-#ifndef arch_atomic64_add_return
-static __always_inline s64
-arch_atomic64_add_return(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_add_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_add_return arch_atomic64_add_return
-#endif
-
-#endif /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_fetch_add_relaxed
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
-#else /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_fetch_add_acquire
-static __always_inline s64
-arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_add_release
-static __always_inline s64
-arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
-#endif
-
-#ifndef arch_atomic64_fetch_add
-static __always_inline s64
-arch_atomic64_fetch_add(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_add_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-#endif
-
-#endif /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_sub_return_relaxed
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return
-#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
-#else /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_sub_return_acquire
-static __always_inline s64
-arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_sub_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
-#endif
-
-#ifndef arch_atomic64_sub_return_release
-static __always_inline s64
-arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
-#endif
-
-#ifndef arch_atomic64_sub_return
-static __always_inline s64
-arch_atomic64_sub_return(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_sub_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_sub_return arch_atomic64_sub_return
-#endif
-
-#endif /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_relaxed
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
-#else /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_acquire
-static __always_inline s64
-arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_sub_release
-static __always_inline s64
-arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
-#endif
-
-#ifndef arch_atomic64_fetch_sub
-static __always_inline s64
-arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_sub_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
-#endif
-
-#endif /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_inc
-static __always_inline void
-arch_atomic64_inc(atomic64_t *v)
-{
-       arch_atomic64_add(1, v);
-}
-#define arch_atomic64_inc arch_atomic64_inc
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-#ifdef arch_atomic64_inc_return
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
-#endif /* arch_atomic64_inc_return */
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
-       return arch_atomic64_add_return(1, v);
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
-       return arch_atomic64_add_return_acquire(1, v);
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
-       return arch_atomic64_add_return_release(1, v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-static __always_inline s64
-arch_atomic64_inc_return_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_add_return_relaxed(1, v);
-}
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
-#endif
-
-#else /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_inc_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_inc_return_relaxed(v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_inc_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#endif /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-#ifdef arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
-#endif /* arch_atomic64_fetch_inc */
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add(1, v);
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add_acquire(1, v);
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add_release(1, v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-static __always_inline s64
-arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add_relaxed(1, v);
-}
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_inc_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_inc_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#endif /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_dec
-static __always_inline void
-arch_atomic64_dec(atomic64_t *v)
-{
-       arch_atomic64_sub(1, v);
-}
-#define arch_atomic64_dec arch_atomic64_dec
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-#ifdef arch_atomic64_dec_return
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
-#endif /* arch_atomic64_dec_return */
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
-       return arch_atomic64_sub_return(1, v);
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
-       return arch_atomic64_sub_return_acquire(1, v);
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
-       return arch_atomic64_sub_return_release(1, v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-static __always_inline s64
-arch_atomic64_dec_return_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_sub_return_relaxed(1, v);
-}
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
-#endif
-
-#else /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_dec_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_dec_return_relaxed(v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_dec_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#endif /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-#ifdef arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
-#endif /* arch_atomic64_fetch_dec */
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub(1, v);
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub_acquire(1, v);
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub_release(1, v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-static __always_inline s64
-arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_dec_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_dec_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#endif /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_and_relaxed
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
-#else /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_fetch_and_acquire
-static __always_inline s64
-arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_and_release
-static __always_inline s64
-arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
-#endif
-
-#ifndef arch_atomic64_fetch_and
-static __always_inline s64
-arch_atomic64_fetch_and(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_and_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-#endif
-
-#endif /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_andnot
-static __always_inline void
-arch_atomic64_andnot(s64 i, atomic64_t *v)
-{
-       arch_atomic64_and(~i, v);
-}
-#define arch_atomic64_andnot arch_atomic64_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-#ifdef arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
-#endif /* arch_atomic64_fetch_andnot */
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and(~i, v);
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and_acquire(~i, v);
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and_release(~i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-static __always_inline s64
-arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_andnot_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#endif /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_or_relaxed
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
-#else /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_or_acquire
-static __always_inline s64
-arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_or_release
-static __always_inline s64
-arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
-#endif
-
-#ifndef arch_atomic64_fetch_or
-static __always_inline s64
-arch_atomic64_fetch_or(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_or_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-#endif
-
-#endif /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_relaxed
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
-#else /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_acquire
-static __always_inline s64
-arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_xor_release
-static __always_inline s64
-arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
-#endif
-
-#ifndef arch_atomic64_fetch_xor
-static __always_inline s64
-arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_xor_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
-#endif
-
-#endif /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_xchg_relaxed
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg
-#define arch_atomic64_xchg_release arch_atomic64_xchg
-#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
-#else /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_xchg_acquire
-static __always_inline s64
-arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
-       s64 ret = arch_atomic64_xchg_relaxed(v, i);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
-#endif
-
-#ifndef arch_atomic64_xchg_release
-static __always_inline s64
-arch_atomic64_xchg_release(atomic64_t *v, s64 i)
-{
-       __atomic_release_fence();
-       return arch_atomic64_xchg_relaxed(v, i);
-}
-#define arch_atomic64_xchg_release arch_atomic64_xchg_release
-#endif
-
-#ifndef arch_atomic64_xchg
-static __always_inline s64
-arch_atomic64_xchg(atomic64_t *v, s64 i)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_xchg_relaxed(v, i);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_xchg arch_atomic64_xchg
-#endif
-
-#endif /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_relaxed
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
-#else /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_acquire
-static __always_inline s64
-arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
-       s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_cmpxchg_release
-static __always_inline s64
-arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
-       __atomic_release_fence();
-       return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_cmpxchg
-static __always_inline s64
-arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#endif
-
-#endif /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-#ifdef arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
-#endif /* arch_atomic64_try_cmpxchg */
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg_acquire(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg_release(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg_relaxed(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
-       bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
-       __atomic_release_fence();
-       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
-       bool ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#endif /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_sub_and_test
-/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_sub_return(i, v) == 0;
-}
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
-#endif
-
-#ifndef arch_atomic64_dec_and_test
-/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic64_dec_and_test(atomic64_t *v)
-{
-       return arch_atomic64_dec_return(v) == 0;
-}
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
-#endif
-
-#ifndef arch_atomic64_inc_and_test
-/**
- * arch_atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_inc_and_test(atomic64_t *v)
-{
-       return arch_atomic64_inc_return(v) == 0;
-}
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
-#endif
-
-#ifndef arch_atomic64_add_negative
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic64_add_negative(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_add_return(i, v) < 0;
-}
-#define arch_atomic64_add_negative arch_atomic64_add_negative
-#endif
-
-#ifndef arch_atomic64_fetch_add_unless
-/**
- * arch_atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       s64 c = arch_atomic64_read(v);
-
-       do {
-               if (unlikely(c == u))
-                       break;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
-
-       return c;
-}
-#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
-#endif
-
-#ifndef arch_atomic64_add_unless
-/**
- * arch_atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       return arch_atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic64_add_unless arch_atomic64_add_unless
-#endif
-
-#ifndef arch_atomic64_inc_not_zero
-/**
- * arch_atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic64_inc_not_zero(atomic64_t *v)
-{
-       return arch_atomic64_add_unless(v, 1, 0);
-}
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
-#endif
-
-#ifndef arch_atomic64_inc_unless_negative
-static __always_inline bool
-arch_atomic64_inc_unless_negative(atomic64_t *v)
-{
-       s64 c = arch_atomic64_read(v);
-
-       do {
-               if (unlikely(c < 0))
-                       return false;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
-
-       return true;
-}
-#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
-#endif
-
-#ifndef arch_atomic64_dec_unless_positive
-static __always_inline bool
-arch_atomic64_dec_unless_positive(atomic64_t *v)
-{
-       s64 c = arch_atomic64_read(v);
-
-       do {
-               if (unlikely(c > 0))
-                       return false;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
-
-       return true;
-}
-#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
-#endif
-
-#ifndef arch_atomic64_dec_if_positive
-static __always_inline s64
-arch_atomic64_dec_if_positive(atomic64_t *v)
-{
-       s64 dec, c = arch_atomic64_read(v);
-
-       do {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
-
-       return dec;
-}
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
index ed1d3ff..8dd57c3 100644 (file)
@@ -77,9 +77,8 @@
        __ret;                                                          \
 })
 
-#include <linux/atomic-arch-fallback.h>
-#include <asm-generic/atomic-instrumented.h>
-
-#include <asm-generic/atomic-long.h>
+#include <linux/atomic/atomic-arch-fallback.h>
+#include <linux/atomic/atomic-long.h>
+#include <linux/atomic/atomic-instrumented.h>
 
 #endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
new file mode 100644 (file)
index 0000000..a3dba31
--- /dev/null
@@ -0,0 +1,2361 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#ifndef arch_xchg_relaxed
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#define arch_xchg_relaxed arch_xchg
+#else /* arch_xchg_relaxed */
+
+#ifndef arch_xchg_acquire
+#define arch_xchg_acquire(...) \
+       __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg_release
+#define arch_xchg_release(...) \
+       __atomic_op_release(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg
+#define arch_xchg(...) \
+       __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_xchg_relaxed */
+
+#ifndef arch_cmpxchg_relaxed
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#define arch_cmpxchg_relaxed arch_cmpxchg
+#else /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg_acquire
+#define arch_cmpxchg_acquire(...) \
+       __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg_release
+#define arch_cmpxchg_release(...) \
+       __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg
+#define arch_cmpxchg(...) \
+       __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg64_relaxed
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
+#else /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_cmpxchg64_acquire
+#define arch_cmpxchg64_acquire(...) \
+       __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64_release
+#define arch_cmpxchg64_release(...) \
+       __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64
+#define arch_cmpxchg64(...) \
+       __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_try_cmpxchg_relaxed
+#ifdef arch_try_cmpxchg
+#define arch_try_cmpxchg_acquire arch_try_cmpxchg
+#define arch_try_cmpxchg_release arch_try_cmpxchg
+#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_acquire */
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_release */
+
+#ifndef arch_try_cmpxchg_relaxed
+#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_relaxed */
+
+#else /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(...) \
+       __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(...) \
+       __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(...) \
+       __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_read_acquire
+static __always_inline int
+arch_atomic_read_acquire(const atomic_t *v)
+{
+       return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic_read_acquire arch_atomic_read_acquire
+#endif
+
+#ifndef arch_atomic_set_release
+static __always_inline void
+arch_atomic_set_release(atomic_t *v, int i)
+{
+       smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic_set_release arch_atomic_set_release
+#endif
+
+#ifndef arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return
+#else /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_add_return_acquire
+static __always_inline int
+arch_atomic_add_return_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_add_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#endif
+
+#ifndef arch_atomic_add_return_release
+static __always_inline int
+arch_atomic_add_return_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_add_return_relaxed(i, v);
+}
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#endif
+
+#ifndef arch_atomic_add_return
+static __always_inline int
+arch_atomic_add_return(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_add_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_add_return arch_atomic_add_return
+#endif
+
+#endif /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
+#else /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_fetch_add_acquire
+static __always_inline int
+arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_add_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic_fetch_add_release
+static __always_inline int
+arch_atomic_fetch_add_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#endif
+
+#ifndef arch_atomic_fetch_add
+static __always_inline int
+arch_atomic_fetch_add(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_add_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#endif
+
+#endif /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
+#else /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_sub_return_acquire
+static __always_inline int
+arch_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_sub_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#endif
+
+#ifndef arch_atomic_sub_return_release
+static __always_inline int
+arch_atomic_sub_return_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_sub_return_relaxed(i, v);
+}
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#endif
+
+#ifndef arch_atomic_sub_return
+static __always_inline int
+arch_atomic_sub_return(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_sub_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_sub_return arch_atomic_sub_return
+#endif
+
+#endif /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
+#else /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_fetch_sub_acquire
+static __always_inline int
+arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_sub_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic_fetch_sub_release
+static __always_inline int
+arch_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#endif
+
+#ifndef arch_atomic_fetch_sub
+static __always_inline int
+arch_atomic_fetch_sub(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_sub_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#endif
+
+#endif /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_inc
+static __always_inline void
+arch_atomic_inc(atomic_t *v)
+{
+       arch_atomic_add(1, v);
+}
+#define arch_atomic_inc arch_atomic_inc
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+#ifdef arch_atomic_inc_return
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return
+#define arch_atomic_inc_return_release arch_atomic_inc_return
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
+#endif /* arch_atomic_inc_return */
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+       return arch_atomic_add_return(1, v);
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+       return arch_atomic_add_return_acquire(1, v);
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+       return arch_atomic_add_return_release(1, v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+static __always_inline int
+arch_atomic_inc_return_relaxed(atomic_t *v)
+{
+       return arch_atomic_add_return_relaxed(1, v);
+}
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#endif
+
+#else /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_inc_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_inc_return_relaxed(v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_inc_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#endif /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_fetch_inc_relaxed
+#ifdef arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
+#endif /* arch_atomic_fetch_inc */
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+       return arch_atomic_fetch_add(1, v);
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+       return arch_atomic_fetch_add_acquire(1, v);
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+       return arch_atomic_fetch_add_release(1, v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc_relaxed
+static __always_inline int
+arch_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+       return arch_atomic_fetch_add_relaxed(1, v);
+}
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_fetch_inc_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_inc_relaxed(v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_inc_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#endif /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_dec
+static __always_inline void
+arch_atomic_dec(atomic_t *v)
+{
+       arch_atomic_sub(1, v);
+}
+#define arch_atomic_dec arch_atomic_dec
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+#ifdef arch_atomic_dec_return
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return
+#define arch_atomic_dec_return_release arch_atomic_dec_return
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
+#endif /* arch_atomic_dec_return */
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+       return arch_atomic_sub_return(1, v);
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+       return arch_atomic_sub_return_acquire(1, v);
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+       return arch_atomic_sub_return_release(1, v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+static __always_inline int
+arch_atomic_dec_return_relaxed(atomic_t *v)
+{
+       return arch_atomic_sub_return_relaxed(1, v);
+}
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+#endif
+
+#else /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_dec_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_dec_return_relaxed(v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_dec_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#endif /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_fetch_dec_relaxed
+#ifdef arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
+#endif /* arch_atomic_fetch_dec */
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+       return arch_atomic_fetch_sub(1, v);
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+       return arch_atomic_fetch_sub_acquire(1, v);
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+       return arch_atomic_fetch_sub_release(1, v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec_relaxed
+static __always_inline int
+arch_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+       return arch_atomic_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_fetch_dec_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_dec_relaxed(v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_dec_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#endif /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
+#else /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_fetch_and_acquire
+static __always_inline int
+arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_and_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic_fetch_and_release
+static __always_inline int
+arch_atomic_fetch_and_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#endif
+
+#ifndef arch_atomic_fetch_and
+static __always_inline int
+arch_atomic_fetch_and(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_and_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#endif
+
+#endif /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_andnot
+static __always_inline void
+arch_atomic_andnot(int i, atomic_t *v)
+{
+       arch_atomic_and(~i, v);
+}
+#define arch_atomic_andnot arch_atomic_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+#ifdef arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
+#endif /* arch_atomic_fetch_andnot */
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and(~i, v);
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and_acquire(~i, v);
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and_release(~i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+static __always_inline int
+arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_andnot_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#endif /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
+#else /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_or_acquire
+static __always_inline int
+arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_or_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic_fetch_or_release
+static __always_inline int
+arch_atomic_fetch_or_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#endif
+
+#ifndef arch_atomic_fetch_or
+static __always_inline int
+arch_atomic_fetch_or(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_or_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#endif
+
+#endif /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
+#else /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_fetch_xor_acquire
+static __always_inline int
+arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_xor_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic_fetch_xor_release
+static __always_inline int
+arch_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#endif
+
+#ifndef arch_atomic_fetch_xor
+static __always_inline int
+arch_atomic_fetch_xor(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_xor_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#endif
+
+#endif /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg
+#define arch_atomic_xchg_release arch_atomic_xchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg
+#else /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int i)
+{
+       int ret = arch_atomic_xchg_relaxed(v, i);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int i)
+{
+       __atomic_release_fence();
+       return arch_atomic_xchg_relaxed(v, i);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int i)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_xchg_relaxed(v, i);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#endif /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#else /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+       int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+       __atomic_release_fence();
+       return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#endif /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+#ifdef arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
+#endif /* arch_atomic_try_cmpxchg */
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg_acquire(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg_release(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg_relaxed(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       __atomic_release_fence();
+       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       bool ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#endif /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_sub_and_test
+/**
+ * arch_atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+       return arch_atomic_sub_return(i, v) == 0;
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#endif
+
+#ifndef arch_atomic_dec_and_test
+/**
+ * arch_atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic_dec_and_test(atomic_t *v)
+{
+       return arch_atomic_dec_return(v) == 0;
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#endif
+
+#ifndef arch_atomic_inc_and_test
+/**
+ * arch_atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_inc_and_test(atomic_t *v)
+{
+       return arch_atomic_inc_return(v) == 0;
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#endif
+
+#ifndef arch_atomic_add_negative
+/**
+ * arch_atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+       return arch_atomic_add_return(i, v) < 0;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#ifndef arch_atomic_fetch_add_unless
+/**
+ * arch_atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       int c = arch_atomic_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+#endif
+
+#ifndef arch_atomic_add_unless
+/**
+ * arch_atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic_add_unless(atomic_t *v, int a, int u)
+{
+       return arch_atomic_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic_add_unless arch_atomic_add_unless
+#endif
+
+#ifndef arch_atomic_inc_not_zero
+/**
+ * arch_atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic_inc_not_zero(atomic_t *v)
+{
+       return arch_atomic_add_unless(v, 1, 0);
+}
+#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#endif
+
+#ifndef arch_atomic_inc_unless_negative
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+       int c = arch_atomic_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#endif
+
+#ifndef arch_atomic_dec_unless_positive
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+       int c = arch_atomic_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#endif
+
+#ifndef arch_atomic_dec_if_positive
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+       int dec, c = arch_atomic_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+
+       return dec;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef arch_atomic64_read_acquire
+static __always_inline s64
+arch_atomic64_read_acquire(const atomic64_t *v)
+{
+       return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic64_read_acquire arch_atomic64_read_acquire
+#endif
+
+#ifndef arch_atomic64_set_release
+static __always_inline void
+arch_atomic64_set_release(atomic64_t *v, s64 i)
+{
+       smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic64_set_release arch_atomic64_set_release
+#endif
+
+#ifndef arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
+#else /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_add_return_acquire
+static __always_inline s64
+arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_add_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#endif
+
+#ifndef arch_atomic64_add_return_release
+static __always_inline s64
+arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_add_return_relaxed(i, v);
+}
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#endif
+
+#ifndef arch_atomic64_add_return
+static __always_inline s64
+arch_atomic64_add_return(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_add_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_add_return arch_atomic64_add_return
+#endif
+
+#endif /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
+#else /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_fetch_add_acquire
+static __always_inline s64
+arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_add_release
+static __always_inline s64
+arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#endif
+
+#ifndef arch_atomic64_fetch_add
+static __always_inline s64
+arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_add_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#endif
+
+#endif /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
+#else /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_sub_return_acquire
+static __always_inline s64
+arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#endif
+
+#ifndef arch_atomic64_sub_return_release
+static __always_inline s64
+arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#endif
+
+#ifndef arch_atomic64_sub_return
+static __always_inline s64
+arch_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_sub_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#endif
+
+#endif /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
+#else /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_acquire
+static __always_inline s64
+arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_sub_release
+static __always_inline s64
+arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#endif
+
+#ifndef arch_atomic64_fetch_sub
+static __always_inline s64
+arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_sub_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#endif /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_inc
+static __always_inline void
+arch_atomic64_inc(atomic64_t *v)
+{
+       arch_atomic64_add(1, v);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+#ifdef arch_atomic64_inc_return
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
+#endif /* arch_atomic64_inc_return */
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+       return arch_atomic64_add_return(1, v);
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+       return arch_atomic64_add_return_acquire(1, v);
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+       return arch_atomic64_add_return_release(1, v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+static __always_inline s64
+arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_add_return_relaxed(1, v);
+}
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#endif
+
+#else /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_inc_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_inc_return_relaxed(v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_inc_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#endif /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+#ifdef arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
+#endif /* arch_atomic64_fetch_inc */
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add(1, v);
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add_acquire(1, v);
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add_release(1, v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+static __always_inline s64
+arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add_relaxed(1, v);
+}
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_inc_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#endif /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_dec
+static __always_inline void
+arch_atomic64_dec(atomic64_t *v)
+{
+       arch_atomic64_sub(1, v);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+#ifdef arch_atomic64_dec_return
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
+#endif /* arch_atomic64_dec_return */
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+       return arch_atomic64_sub_return(1, v);
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+       return arch_atomic64_sub_return_acquire(1, v);
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+       return arch_atomic64_sub_return_release(1, v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+static __always_inline s64
+arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_sub_return_relaxed(1, v);
+}
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+#else /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_dec_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_dec_return_relaxed(v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_dec_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#endif /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+#ifdef arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
+#endif /* arch_atomic64_fetch_dec */
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub(1, v);
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub_acquire(1, v);
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub_release(1, v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+static __always_inline s64
+arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_dec_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#endif /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
+#else /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_fetch_and_acquire
+static __always_inline s64
+arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_and_release
+static __always_inline s64
+arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#endif
+
+#ifndef arch_atomic64_fetch_and
+static __always_inline s64
+arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_and_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#endif
+
+#endif /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_andnot
+static __always_inline void
+arch_atomic64_andnot(s64 i, atomic64_t *v)
+{
+       arch_atomic64_and(~i, v);
+}
+#define arch_atomic64_andnot arch_atomic64_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+#ifdef arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
+#endif /* arch_atomic64_fetch_andnot */
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and(~i, v);
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and_acquire(~i, v);
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and_release(~i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+static __always_inline s64
+arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#endif /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
+#else /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_or_acquire
+static __always_inline s64
+arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_or_release
+static __always_inline s64
+arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#endif
+
+#ifndef arch_atomic64_fetch_or
+static __always_inline s64
+arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_or_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#endif
+
+#endif /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
+#else /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_acquire
+static __always_inline s64
+arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_xor_release
+static __always_inline s64
+arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#endif
+
+#ifndef arch_atomic64_fetch_xor
+static __always_inline s64
+arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_xor_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#endif /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg
+#define arch_atomic64_xchg_release arch_atomic64_xchg
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#else /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+       s64 ret = arch_atomic64_xchg_relaxed(v, i);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+       __atomic_release_fence();
+       return arch_atomic64_xchg_relaxed(v, i);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 i)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_xchg_relaxed(v, i);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#endif /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#else /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+       s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+       __atomic_release_fence();
+       return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#endif /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+#ifdef arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
+#endif /* arch_atomic64_try_cmpxchg */
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg_acquire(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg_release(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       __atomic_release_fence();
+       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       bool ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#endif /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_sub_and_test
+/**
+ * arch_atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_sub_return(i, v) == 0;
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#endif
+
+#ifndef arch_atomic64_dec_and_test
+/**
+ * arch_atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic64_dec_and_test(atomic64_t *v)
+{
+       return arch_atomic64_dec_return(v) == 0;
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#endif
+
+#ifndef arch_atomic64_inc_and_test
+/**
+ * arch_atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_inc_and_test(atomic64_t *v)
+{
+       return arch_atomic64_inc_return(v) == 0;
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#endif
+
+#ifndef arch_atomic64_add_negative
+/**
+ * arch_atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_add_return(i, v) < 0;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#ifndef arch_atomic64_fetch_add_unless
+/**
+ * arch_atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline s64
+arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       s64 c = arch_atomic64_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+#ifndef arch_atomic64_add_unless
+/**
+ * arch_atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       return arch_atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
+#endif
+
+#ifndef arch_atomic64_inc_not_zero
+/**
+ * arch_atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+       return arch_atomic64_add_unless(v, 1, 0);
+}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#endif
+
+#ifndef arch_atomic64_inc_unless_negative
+static __always_inline bool
+arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+       s64 c = arch_atomic64_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+#endif
+
+#ifndef arch_atomic64_dec_unless_positive
+static __always_inline bool
+arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+       s64 c = arch_atomic64_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+#endif
+
+#ifndef arch_atomic64_dec_if_positive
+static __always_inline s64
+arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+       s64 dec, c = arch_atomic64_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+
+       return dec;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
new file mode 100644 (file)
index 0000000..a0f6543
--- /dev/null
@@ -0,0 +1,1915 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
+
+static __always_inline int
+atomic_read(const atomic_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_read(v);
+}
+
+static __always_inline int
+atomic_read_acquire(const atomic_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+atomic_set(atomic_t *v, int i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_set(v, i);
+}
+
+static __always_inline void
+atomic_set_release(atomic_t *v, int i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+atomic_add(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_add(i, v);
+}
+
+static __always_inline int
+atomic_add_return(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return(i, v);
+}
+
+static __always_inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_sub(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_sub(i, v);
+}
+
+static __always_inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_inc(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_inc(v);
+}
+
+static __always_inline int
+atomic_inc_return(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return(v);
+}
+
+static __always_inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline int
+atomic_inc_return_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_inc(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_dec(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_dec(v);
+}
+
+static __always_inline int
+atomic_dec_return(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return(v);
+}
+
+static __always_inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline int
+atomic_dec_return_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_dec(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_and(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_andnot(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_or(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_xor(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_xchg(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg(v, i);
+}
+
+static __always_inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline int
+atomic_xchg_relaxed(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_if_positive(v);
+}
+
+static __always_inline s64
+atomic64_read(const atomic64_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic64_read(v);
+}
+
+static __always_inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_add(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_inc(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_inc(v);
+}
+
+static __always_inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic64_dec(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_dec(v);
+}
+
+static __always_inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_if_positive(v);
+}
+
+static __always_inline long
+atomic_long_read(const atomic_long_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_long_read(v);
+}
+
+static __always_inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_long_read_acquire(v);
+}
+
+static __always_inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_long_set(v, i);
+}
+
+static __always_inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_long_set_release(v, i);
+}
+
+static __always_inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_add(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_inc(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_inc(v);
+}
+
+static __always_inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return_release(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_dec(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_dec(v);
+}
+
+static __always_inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return_release(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg_acquire(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg_release(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_negative(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_unless_positive(v);
+}
+
+static __always_inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_if_positive(v);
+}
+
+#define xchg(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_acquire(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_release(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_relaxed(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_release(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_relaxed(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_acquire(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_release(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_relaxed(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_acquire(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_release(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_relaxed(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_local(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_local(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define sync_cmpxchg(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_double(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+       arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#define cmpxchg_double_local(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+       arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+// 2a9553f0a9d5619f19151092df5cabbbf16ce835
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
new file mode 100644 (file)
index 0000000..800b8c3
--- /dev/null
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire  atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire  atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic_cond_read_relaxed
+#endif
+
+#ifdef CONFIG_64BIT
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+       return arch_atomic64_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+       return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+       arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+       arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+       arch_atomic64_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+       arch_atomic64_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+       arch_atomic64_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+       arch_atomic64_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+       arch_atomic64_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+       arch_atomic64_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+       arch_atomic64_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+       arch_atomic64_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+       return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+       return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+       return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       return arch_atomic64_dec_if_positive(v);
+}
+
+#else /* CONFIG_64BIT */
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+       return arch_atomic_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+       return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+       arch_atomic_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+       arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+       arch_atomic_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+       arch_atomic_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+       arch_atomic_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+       return arch_atomic_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+       return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+       arch_atomic_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+       return arch_atomic_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+       return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+       arch_atomic_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+       arch_atomic_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+       arch_atomic_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+       arch_atomic_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg_release(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+       return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+       return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+       return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       return arch_atomic_dec_if_positive(v);
+}
+
+#endif /* CONFIG_64BIT */
+#endif /* _LINUX_ATOMIC_LONG_H */
+// e8f0e08ff072b74d180eabe2ad001282b38c2c88
index 44df4fc..2953085 100644 (file)
@@ -143,7 +143,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
        sb = inode->i_sb;
 #ifdef CONFIG_BLOCK
        if (sb_is_blkdev_sb(sb))
-               return I_BDEV(inode)->bd_bdi;
+               return I_BDEV(inode)->bd_disk->bdi;
 #endif
        return sb->s_bdi;
 }
index 2203b68..00952e9 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef __LINUX_BIO_H
 #define __LINUX_BIO_H
 
-#include <linux/highmem.h>
 #include <linux/mempool.h>
 #include <linux/ioprio.h>
 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
@@ -375,7 +374,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip,
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
-extern void bio_trim(struct bio *bio, int offset, int size);
+void bio_trim(struct bio *bio, sector_t offset, sector_t size);
 extern struct bio *bio_split(struct bio *bio, int sectors,
                             gfp_t gfp, struct bio_set *bs);
 
@@ -401,6 +400,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
 enum {
        BIOSET_NEED_BVECS = BIT(0),
        BIOSET_NEED_RESCUER = BIT(1),
+       BIOSET_PERCPU_CACHE = BIT(2),
 };
 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
 extern void bioset_exit(struct bio_set *);
@@ -409,6 +409,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
 
 struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
                struct bio_set *bs);
+struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
+               struct bio_set *bs);
 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
 extern void bio_put(struct bio *);
 
@@ -519,47 +521,6 @@ static inline void bio_clone_blkg_association(struct bio *dst,
                                              struct bio *src) { }
 #endif /* CONFIG_BLK_CGROUP */
 
-#ifdef CONFIG_HIGHMEM
-/*
- * remember never ever reenable interrupts between a bvec_kmap_irq and
- * bvec_kunmap_irq!
- */
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
-       unsigned long addr;
-
-       /*
-        * might not be a highmem page, but the preempt/irq count
-        * balancing is a lot nicer this way
-        */
-       local_irq_save(*flags);
-       addr = (unsigned long) kmap_atomic(bvec->bv_page);
-
-       BUG_ON(addr & ~PAGE_MASK);
-
-       return (char *) addr + bvec->bv_offset;
-}
-
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
-       unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
-       kunmap_atomic((void *) ptr);
-       local_irq_restore(*flags);
-}
-
-#else
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
-       return page_address(bvec->bv_page) + bvec->bv_offset;
-}
-
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
-       *flags = 0;
-}
-#endif
-
 /*
  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
  *
@@ -699,6 +660,11 @@ struct bio_set {
        struct kmem_cache *bio_slab;
        unsigned int front_pad;
 
+       /*
+        * per-cpu bio alloc cache
+        */
+       struct bio_alloc_cache __percpu *cache;
+
        mempool_t bio_pool;
        mempool_t bvec_pool;
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -715,6 +681,11 @@ struct bio_set {
        struct bio_list         rescue_list;
        struct work_struct      rescue_work;
        struct workqueue_struct *rescue_workqueue;
+
+       /*
+        * Hot un-plug notifier for the per-cpu cache, if used
+        */
+       struct hlist_node cpuhp_dead;
 };
 
 static inline bool bioset_initialized(struct bio_set *bs)
index 3704843..b4de201 100644 (file)
@@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
-                                     size_t size);
+typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
+                               struct seq_file *s);
 
 struct blkcg_policy {
        int                             plid;
index 1d18447..13ba186 100644 (file)
@@ -404,7 +404,13 @@ enum {
        BLK_MQ_F_STACKING       = 1 << 2,
        BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
        BLK_MQ_F_BLOCKING       = 1 << 5,
+       /* Do not allow an I/O scheduler to be configured. */
        BLK_MQ_F_NO_SCHED       = 1 << 6,
+       /*
+        * Select 'none' during queue registration in case of a single hwq
+        * or shared hwqs instead of 'mq-deadline'.
+        */
+       BLK_MQ_F_NO_SCHED_BY_DEFAULT    = 1 << 7,
        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
        BLK_MQ_F_ALLOC_POLICY_BITS = 1,
 
@@ -426,18 +432,14 @@ enum {
        ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
                << BLK_MQ_F_ALLOC_POLICY_START_BIT)
 
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+               struct lock_class_key *lkclass);
 #define blk_mq_alloc_disk(set, queuedata)                              \
 ({                                                                     \
        static struct lock_class_key __key;                             \
-       struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata);   \
                                                                        \
-       if (!IS_ERR(__disk))                                            \
-               lockdep_init_map(&__disk->lockdep_map,                  \
-                       "(bio completion)", &__key, 0);                 \
-       __disk;                                                         \
+       __blk_mq_alloc_disk(set, queuedata, &__key);                    \
 })
-struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
-               void *queuedata);
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                struct request_queue *q);
index 290f906..be622b5 100644 (file)
@@ -34,14 +34,10 @@ struct block_device {
        void *                  bd_holder;
        int                     bd_holders;
        bool                    bd_write_holder;
-#ifdef CONFIG_SYSFS
-       struct list_head        bd_holder_disks;
-#endif
        struct kobject          *bd_holder_dir;
        u8                      bd_partno;
        spinlock_t              bd_size_lock; /* for bd_inode->i_size updates */
        struct gendisk *        bd_disk;
-       struct backing_dev_info *bd_bdi;
 
        /* The counter of freeze processes */
        int                     bd_fsfreeze_count;
@@ -281,6 +277,7 @@ struct bio {
 };
 
 #define BIO_RESET_BYTES                offsetof(struct bio, bi_max_vecs)
+#define BIO_MAX_SECTORS                (UINT_MAX >> SECTOR_SHIFT)
 
 /*
  * bio flags
@@ -301,6 +298,7 @@ enum {
        BIO_TRACKED,            /* set if bio goes through the rq_qos path */
        BIO_REMAPPED,
        BIO_ZONE_WRITE_LOCKED,  /* Owns a zoned device zone write lock */
+       BIO_PERCPU_CACHE,       /* can participate in per-cpu alloc cache */
        BIO_FLAG_LAST
 };
 
index d3afea4..c9cb124 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/minmax.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
-#include <linux/backing-dev-defs.h>
 #include <linux/wait.h>
 #include <linux/mempool.h>
 #include <linux/pfn.h>
@@ -398,8 +397,6 @@ struct request_queue {
        struct blk_mq_hw_ctx    **queue_hw_ctx;
        unsigned int            nr_hw_queues;
 
-       struct backing_dev_info *backing_dev_info;
-
        /*
         * The queue owner gets to use this for whatever they like.
         * ll_rw_blk doesn't touch it.
@@ -424,6 +421,8 @@ struct request_queue {
 
        spinlock_t              queue_lock;
 
+       struct gendisk          *disk;
+
        /*
         * queue kobject
         */
@@ -664,8 +663,6 @@ extern void blk_clear_pm_only(struct request_queue *q);
        dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
        (dir), (attrs))
 
-#define queue_to_disk(q)       (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
-
 static inline bool queue_is_mq(struct request_queue *q)
 {
        return q->mq_ops;
@@ -941,6 +938,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
 #endif
 
+#define PAGE_SECTORS_SHIFT     (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS           (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK            (PAGE_SECTORS - 1)
+
 /*
  * blk_rq_pos()                        : the current sector
  * blk_rq_bytes()              : bytes left in the entire request
@@ -1139,7 +1140,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q,
                                      unsigned int size);
 extern void blk_queue_alignment_offset(struct request_queue *q,
                                       unsigned int alignment);
-void blk_queue_update_readahead(struct request_queue *q);
+void disk_update_readahead(struct gendisk *disk);
 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
@@ -1521,6 +1522,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
        return offset << SECTOR_SHIFT;
 }
 
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+       if (req_op(req) == REQ_OP_DISCARD &&
+           queue_max_discard_segments(req->q) > 1)
+               return true;
+       return false;
+}
+
 static inline int bdev_discard_alignment(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);
@@ -1855,6 +1872,13 @@ struct block_device_operations {
        char *(*devnode)(struct gendisk *disk, umode_t *mode);
        struct module *owner;
        const struct pr_ops *pr_ops;
+
+       /*
+        * Special callback for probing GPT entry at a given sector.
+        * Needed by Android devices, used by GPT scanner and MMC blk
+        * driver.
+        */
+       int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
 };
 
 #ifdef CONFIG_COMPAT
@@ -1984,8 +2008,6 @@ void blkdev_put_no_open(struct block_device *bdev);
 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
 void bdev_add(struct block_device *bdev, dev_t dev);
 struct block_device *I_BDEV(struct inode *inode);
-struct block_device *bdgrab(struct block_device *bdev);
-void bdput(struct block_device *);
 int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
                loff_t lend);
 
index 8b77d08..6c9b10d 100644 (file)
@@ -201,8 +201,8 @@ static inline void bpf_cgroup_storage_unset(void)
 {
        int i;
 
-       for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
-               if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+       for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+               if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
                        continue;
 
                this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
index ff832e6..0e9bdd4 100644 (file)
@@ -4,9 +4,10 @@
  *
  * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
  */
-#ifndef __LINUX_BVEC_ITER_H
-#define __LINUX_BVEC_ITER_H
+#ifndef __LINUX_BVEC_H
+#define __LINUX_BVEC_H
 
+#include <linux/highmem.h>
 #include <linux/bug.h>
 #include <linux/errno.h>
 #include <linux/limits.h>
@@ -183,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec,
        }
 }
 
-#endif /* __LINUX_BVEC_ITER_H */
+/**
+ * bvec_kmap_local - map a bvec into the kernel virtual address space
+ * @bvec: bvec to map
+ *
+ * Must be called on single-page bvecs only.  Call kunmap_local on the returned
+ * address to unmap.
+ */
+static inline void *bvec_kmap_local(struct bio_vec *bvec)
+{
+       return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * memcpy_from_bvec - copy data from a bvec
+ * @bvec: bvec to copy from
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+{
+       memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * memcpy_to_bvec - copy data to a bvec
+ * @bvec: bvec to copy to
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+{
+       memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+}
+
+/**
+ * memzero_bvec - zero all data in a bvec
+ * @bvec: bvec to zero
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memzero_bvec(struct bio_vec *bvec)
+{
+       memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * bvec_virt - return the virtual address for a bvec
+ * @bvec: bvec to return the virtual address for
+ *
+ * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
+ */
+static inline void *bvec_virt(struct bio_vec *bvec)
+{
+       WARN_ON_ONCE(PageHighMem(bvec->bv_page));
+       return page_address(bvec->bv_page) + bvec->bv_offset;
+}
+
+#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
deleted file mode 100644 (file)
index 68a5418..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Parsing command line, get the partitions information.
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#ifndef CMDLINEPARSEH
-#define CMDLINEPARSEH
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-/* partition flags */
-#define PF_RDONLY                   0x01 /* Device is read only */
-#define PF_POWERUP_LOCK             0x02 /* Always locked after reset */
-
-struct cmdline_subpart {
-       char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
-       sector_t from;
-       sector_t size;
-       int flags;
-       struct cmdline_subpart *next_subpart;
-};
-
-struct cmdline_parts {
-       char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
-       unsigned int nr_subparts;
-       struct cmdline_subpart *subpart;
-       struct cmdline_parts *next_parts;
-};
-
-void cmdline_parts_free(struct cmdline_parts **parts);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
-                                        const char *bdev);
-
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                     int slot,
-                     int (*add_part)(int, struct cmdline_subpart *, void *),
-                     void *param);
-
-#endif /* CMDLINEPARSEH */
index f39b34b..95f88ed 100644 (file)
@@ -46,6 +46,7 @@ enum cpuhp_state {
        CPUHP_ARM_OMAP_WAKE_DEAD,
        CPUHP_IRQ_POLL_DEAD,
        CPUHP_BLOCK_SOFTIRQ_DEAD,
+       CPUHP_BIO_DEAD,
        CPUHP_ACPI_CPUDRV_DEAD,
        CPUHP_S390_PFAULT_DEAD,
        CPUHP_BLK_MQ_DEAD,
@@ -399,7 +400,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
 
 /**
  * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
- *                                      without invoking the reatdown callback
+ *                                      without invoking the teardown callback
  * @state:     The state from which the instance is removed
  * @node:      The node for this individual state.
  *
index 04c20de..d2b9c41 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/cpumask.h>
 #include <linux/nodemask.h>
 #include <linux/mm.h>
+#include <linux/mmu_context.h>
 #include <linux/jump_label.h>
 
 #ifdef CONFIG_CPUSETS
@@ -58,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void);
 extern void cpuset_read_lock(void);
 extern void cpuset_read_unlock(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
@@ -184,11 +185,12 @@ static inline void cpuset_read_unlock(void) { }
 static inline void cpuset_cpus_allowed(struct task_struct *p,
                                       struct cpumask *mask)
 {
-       cpumask_copy(mask, cpu_possible_mask);
+       cpumask_copy(mask, task_cpu_possible_mask(p));
 }
 
-static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
 {
+       return false;
 }
 
 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
index edb5c18..3f49e65 100644 (file)
@@ -3,8 +3,7 @@
 #define __LINUX_DEBUG_LOCKING_H
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
-#include <linux/printk.h>
+#include <linux/cache.h>
 
 struct task_struct;
 
index 7457d49..94f2cd6 100644 (file)
@@ -151,7 +151,6 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
                void *addr, size_t bytes, struct iov_iter *i);
 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
                size_t nr_pages);
-#define PAGE_SECTORS (PAGE_SIZE / 512)
 
 void dm_error(const char *message);
 
index 59940f1..65d84b6 100644 (file)
@@ -407,6 +407,7 @@ struct dev_links_info {
  * @em_pd:     device's energy model performance domain
  * @pins:      For device pin management.
  *             See Documentation/driver-api/pin-control.rst for details.
+ * @msi_lock:  Lock to protect MSI mask cache and mask register
  * @msi_list:  Hosts MSI descriptors
  * @msi_domain: The generic MSI domain this device is using.
  * @numa_node: NUMA node this device is close to.
@@ -506,6 +507,7 @@ struct device {
        struct dev_pin_info     *pins;
 #endif
 #ifdef CONFIG_GENERIC_MSI_IRQ
+       raw_spinlock_t          msi_lock;
        struct list_head        msi_list;
 #endif
 #ifdef CONFIG_DMA_OPS
index 76d3562..4207d06 100644 (file)
@@ -184,6 +184,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
  * @MEM_DDR5:          Unbuffered DDR5 RAM
  * @MEM_NVDIMM:                Non-volatile RAM
  * @MEM_WIO2:          Wide I/O 2.
+ * @MEM_HBM2:          High bandwidth Memory Gen 2.
  */
 enum mem_type {
        MEM_EMPTY = 0,
@@ -212,6 +213,7 @@ enum mem_type {
        MEM_DDR5,
        MEM_NVDIMM,
        MEM_WIO2,
+       MEM_HBM2,
 };
 
 #define MEM_FLAG_EMPTY         BIT(MEM_EMPTY)
@@ -239,6 +241,7 @@ enum mem_type {
 #define MEM_FLAG_DDR5           BIT(MEM_DDR5)
 #define MEM_FLAG_NVDIMM         BIT(MEM_NVDIMM)
 #define MEM_FLAG_WIO2          BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2          BIT(MEM_HBM2)
 
 /**
  * enum edac_type - Error Detection and Correction capabilities and mode
index 3f221db..1834752 100644 (file)
@@ -53,6 +53,22 @@ struct em_perf_domain {
 #ifdef CONFIG_ENERGY_MODEL
 #define EM_MAX_POWER 0xFFFF
 
+/*
+ * Increase resolution of energy estimation calculations for 64-bit
+ * architectures. The extra resolution improves decision made by EAS for the
+ * task placement when two Performance Domains might provide similar energy
+ * estimation values (w/o better resolution the values could be equal).
+ *
+ * We increase resolution only if we have enough bits to allow this increased
+ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
+ * are pretty high and the returns do not justify the increased costs.
+ */
+#ifdef CONFIG_64BIT
+#define em_scale_power(p) ((p) * 1000)
+#else
+#define em_scale_power(p) (p)
+#endif
+
 struct em_data_callback {
        /**
         * active_power() - Provide power at the next performance state of
index d73f597..8b0c754 100644 (file)
@@ -31,5 +31,6 @@
 #define EJUKEBOX       528     /* Request initiated, but will not complete before timeout */
 #define EIOCBQUEUED    529     /* iocb queued, will get completion event */
 #define ERECALLCONFLICT        530     /* conflict with recalled state */
+#define ENOGRACE       531     /* NFS file lock reclaim refused */
 
 #endif
index fa0a524..305d5f1 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/err.h>
 #include <linux/percpu-defs.h>
 #include <linux/percpu.h>
+#include <linux/sched.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -43,11 +44,9 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
                                  __u64 *cnt);
 void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
 
-DECLARE_PER_CPU(int, eventfd_wake_count);
-
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
 {
-       return this_cpu_read(eventfd_wake_count);
+       return !current->in_eventfd_signal;
 }
 
 #else /* CONFIG_EVENTFD */
@@ -78,9 +77,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
        return -ENOSYS;
 }
 
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
 {
-       return false;
+       return true;
 }
 
 static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
index fe84890..3260fe7 100644 (file)
@@ -221,6 +221,8 @@ struct export_operations {
 #define EXPORT_OP_NOATOMIC_ATTR                (0x10) /* Filesystem cannot supply
                                                  atomic attribute updates
                                                */
+#define EXPORT_OP_SYNC_LOCKS           (0x20) /* Filesystem can't do
+                                                 asychronous blocking locks */
        unsigned long   flags;
 };
 
index a16dbec..eec3b7c 100644 (file)
@@ -27,6 +27,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
 
 #define FANOTIFY_FID_BITS      (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
 
+#define FANOTIFY_INFO_MODES    (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+
 /*
  * fanotify_init() flags that require CAP_SYS_ADMIN.
  * We do not allow unprivileged groups to request permission events.
@@ -35,6 +37,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
  */
 #define FANOTIFY_ADMIN_INIT_FLAGS      (FANOTIFY_PERM_CLASSES | \
                                         FAN_REPORT_TID | \
+                                        FAN_REPORT_PIDFD | \
                                         FAN_UNLIMITED_QUEUE | \
                                         FAN_UNLIMITED_MARKS)
 
index 4e624c4..c50882f 100644 (file)
@@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
                            u64 phys, u64 len, u32 flags);
 
-int generic_block_fiemap(struct inode *inode,
-               struct fiemap_extent_info *fieinfo, u64 start, u64 len,
-               get_block_t *get_block);
-
 #endif /* _LINUX_FIEMAP_H 1 */
index 6405742..1c01f9f 100644 (file)
@@ -319,6 +319,8 @@ enum rw_hint {
 /* iocb->ki_waitq is valid */
 #define IOCB_WAITQ             (1 << 19)
 #define IOCB_NOIO              (1 << 20)
+/* can use bio alloc cache */
+#define IOCB_ALLOC_CACHE       (1 << 21)
 
 struct kiocb {
        struct file             *ki_filp;
@@ -436,6 +438,10 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
  * struct address_space - Contents of a cacheable, mappable object.
  * @host: Owner, either the inode or the block_device.
  * @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ *   file offset->disk block mappings in the filesystem during invalidates.
+ *   It is also used to block modification of page cache contents through
+ *   memory mappings.
  * @gfp_mask: Memory allocation flags to use for allocating pages.
  * @i_mmap_writable: Number of VM_SHARED mappings.
  * @nr_thps: Number of THPs in the pagecache (non-shmem only).
@@ -453,6 +459,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
 struct address_space {
        struct inode            *host;
        struct xarray           i_pages;
+       struct rw_semaphore     invalidate_lock;
        gfp_t                   gfp_mask;
        atomic_t                i_mmap_writable;
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -814,9 +821,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla
        down_read_nested(&inode->i_rwsem, subclass);
 }
 
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+       down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+       up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+       down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+                                       struct address_space *mapping)
+{
+       return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+                                       struct address_space *mapping)
+{
+       up_read(&mapping->invalidate_lock);
+}
+
 void lock_two_nondirectories(struct inode *, struct inode*);
 void unlock_two_nondirectories(struct inode *, struct inode*);
 
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+                                struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+                                  struct address_space *mapping2);
+
+
 /*
  * NOTE: in a 32bit arch with a preemptable kernel and
  * an UP compile the i_size_read/write must be atomic
@@ -997,6 +1037,7 @@ static inline struct file *get_file(struct file *f)
 #define FL_UNLOCK_PENDING      512 /* Lease is being broken */
 #define FL_OFDLCK      1024    /* lock is "owned" by struct file */
 #define FL_LAYOUT      2048    /* outstanding pNFS layout */
+#define FL_RECLAIM     4096    /* reclaiming from a reboot server */
 
 #define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
 
@@ -1507,8 +1548,11 @@ struct super_block {
        /* Number of inodes with nlink == 0 but still referenced */
        atomic_long_t s_remove_count;
 
-       /* Pending fsnotify inode refs */
-       atomic_long_t s_fsnotify_inode_refs;
+       /*
+        * Number of inode/mount/sb objects that are being watched, note that
+        * inodes objects are currently double-accounted.
+        */
+       atomic_long_t s_fsnotify_connectors;
 
        /* Being remounted read-only */
        int s_readonly_remount;
@@ -2457,7 +2501,6 @@ static inline void file_accessed(struct file *file)
 
 extern int file_modified(struct file *file);
 
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
 int sync_inode_metadata(struct inode *inode, int wait);
 
 struct file_system_type {
@@ -2487,6 +2530,7 @@ struct file_system_type {
 
        struct lock_class_key i_lock_key;
        struct lock_class_key i_mutex_key;
+       struct lock_class_key invalidate_lock_key;
        struct lock_class_key i_mutex_dir_key;
 };
 
@@ -2570,90 +2614,6 @@ extern struct kobject *fs_kobj;
 
 #define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit -  an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
-{
-       return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
-}
-
-/*
- * ... and these candidates should be on SB_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
-{
-       return IS_MANDLOCK(ino) && __mandatory_lock(ino);
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
-       if (mandatory_lock(locks_inode(file)))
-               return locks_mandatory_locked(file);
-       return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode,
-                                   struct file *f,
-                                   loff_t size)
-{
-       if (!inode->i_flctx || !mandatory_lock(inode))
-               return 0;
-
-       if (size < inode->i_size) {
-               return locks_mandatory_area(inode, f, size, inode->i_size - 1,
-                               F_WRLCK);
-       } else {
-               return locks_mandatory_area(inode, f, inode->i_size, size - 1,
-                               F_WRLCK);
-       }
-}
-
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
-{
-       return 0;
-}
-
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
-                                       loff_t start, loff_t end, unsigned char type)
-{
-       return 0;
-}
-
-static inline int __mandatory_lock(struct inode *inode)
-{
-       return 0;
-}
-
-static inline int mandatory_lock(struct inode *inode)
-{
-       return 0;
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
-       return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
-                                       size_t size)
-{
-       return 0;
-}
-
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
-
 #ifdef CONFIG_FILE_LOCKING
 static inline int break_lease(struct inode *inode, unsigned int mode)
 {
@@ -2786,6 +2746,7 @@ static inline struct file *file_clone_open(struct file *file)
 extern int filp_close(struct file *, fl_owner_t id);
 
 extern struct filename *getname_flags(const char __user *, int, int *);
+extern struct filename *getname_uflags(const char __user *, int);
 extern struct filename *getname(const char __user *);
 extern struct filename *getname_kernel(const char *);
 extern void putname(struct filename *name);
@@ -2891,6 +2852,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
                                loff_t start, loff_t end);
 extern int filemap_check_errors(struct address_space *mapping);
 extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+int filemap_fdatawrite_wbc(struct address_space *mapping,
+                          struct writeback_control *wbc);
 
 static inline int filemap_write_and_wait(struct address_space *mapping)
 {
@@ -3246,10 +3209,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
 ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
                            struct iov_iter *iter);
 
-/* fs/block_dev.c */
-extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
-                       int datasync);
-
 /* fs/splice.c */
 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
                struct pipe_inode_info *, size_t, unsigned int);
index 2ea1387..e912ed9 100644 (file)
@@ -47,27 +47,128 @@ struct fscrypt_name {
 #define FSCRYPT_SET_CONTEXT_MAX_SIZE   40
 
 #ifdef CONFIG_FS_ENCRYPTION
+
 /*
- * fscrypt superblock flags
+ * If set, the fscrypt bounce page pool won't be allocated (unless another
+ * filesystem needs it).  Set this if the filesystem always uses its own bounce
+ * pages for writes and therefore won't need the fscrypt bounce page pool.
  */
 #define FS_CFLG_OWN_PAGES (1U << 1)
 
-/*
- * crypto operations for filesystems
- */
+/* Crypto operations for filesystems */
 struct fscrypt_operations {
+
+       /* Set of optional flags; see above for allowed flags */
        unsigned int flags;
+
+       /*
+        * If set, this is a filesystem-specific key description prefix that
+        * will be accepted for "logon" keys for v1 fscrypt policies, in
+        * addition to the generic prefix "fscrypt:".  This functionality is
+        * deprecated, so new filesystems shouldn't set this field.
+        */
        const char *key_prefix;
+
+       /*
+        * Get the fscrypt context of the given inode.
+        *
+        * @inode: the inode whose context to get
+        * @ctx: the buffer into which to get the context
+        * @len: length of the @ctx buffer in bytes
+        *
+        * Return: On success, returns the length of the context in bytes; this
+        *         may be less than @len.  On failure, returns -ENODATA if the
+        *         inode doesn't have a context, -ERANGE if the context is
+        *         longer than @len, or another -errno code.
+        */
        int (*get_context)(struct inode *inode, void *ctx, size_t len);
+
+       /*
+        * Set an fscrypt context on the given inode.
+        *
+        * @inode: the inode whose context to set.  The inode won't already have
+        *         an fscrypt context.
+        * @ctx: the context to set
+        * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
+        * @fs_data: If called from fscrypt_set_context(), this will be the
+        *           value the filesystem passed to fscrypt_set_context().
+        *           Otherwise (i.e. when called from
+        *           FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
+        *
+        * i_rwsem will be held for write.
+        *
+        * Return: 0 on success, -errno on failure.
+        */
        int (*set_context)(struct inode *inode, const void *ctx, size_t len,
                           void *fs_data);
+
+       /*
+        * Get the dummy fscrypt policy in use on the filesystem (if any).
+        *
+        * Filesystems only need to implement this function if they support the
+        * test_dummy_encryption mount option.
+        *
+        * Return: A pointer to the dummy fscrypt policy, if the filesystem is
+        *         mounted with test_dummy_encryption; otherwise NULL.
+        */
        const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
+
+       /*
+        * Check whether a directory is empty.  i_rwsem will be held for write.
+        */
        bool (*empty_dir)(struct inode *inode);
+
+       /* The filesystem's maximum ciphertext filename length, in bytes */
        unsigned int max_namelen;
+
+       /*
+        * Check whether the filesystem's inode numbers and UUID are stable,
+        * meaning that they will never be changed even by offline operations
+        * such as filesystem shrinking and therefore can be used in the
+        * encryption without the possibility of files becoming unreadable.
+        *
+        * Filesystems only need to implement this function if they want to
+        * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags.  These
+        * flags are designed to work around the limitations of UFS and eMMC
+        * inline crypto hardware, and they shouldn't be used in scenarios where
+        * such hardware isn't being used.
+        *
+        * Leaving this NULL is equivalent to always returning false.
+        */
        bool (*has_stable_inodes)(struct super_block *sb);
+
+       /*
+        * Get the number of bits that the filesystem uses to represent inode
+        * numbers and file logical block numbers.
+        *
+        * By default, both of these are assumed to be 64-bit.  This function
+        * can be implemented to declare that either or both of these numbers is
+        * shorter, which may allow the use of the
+        * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
+        * inline crypto hardware whose maximum DUN length is less than 64 bits
+        * (e.g., eMMC v5.2 spec compliant hardware).  This function only needs
+        * to be implemented if support for one of these features is needed.
+        */
        void (*get_ino_and_lblk_bits)(struct super_block *sb,
                                      int *ino_bits_ret, int *lblk_bits_ret);
+
+       /*
+        * Return the number of block devices to which the filesystem may write
+        * encrypted file contents.
+        *
+        * If the filesystem can use multiple block devices (other than block
+        * devices that aren't used for encrypted file contents, such as
+        * external journal devices), and wants to support inline encryption,
+        * then it must implement this function.  Otherwise it's not needed.
+        */
        int (*get_num_devices)(struct super_block *sb);
+
+       /*
+        * If ->get_num_devices() returns a value greater than 1, then this
+        * function is called to get the array of request_queues that the
+        * filesystem is using -- one per block device.  (There may be duplicate
+        * entries in this array, as block devices can share a request_queue.)
+        */
        void (*get_devices)(struct super_block *sb,
                            struct request_queue **devs);
 };
@@ -253,6 +354,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
 const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
                                unsigned int max_size,
                                struct delayed_call *done);
+int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
 static inline void fscrypt_set_ops(struct super_block *sb,
                                   const struct fscrypt_operations *s_cop)
 {
@@ -583,6 +685,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
        return ERR_PTR(-EOPNOTSUPP);
 }
 
+static inline int fscrypt_symlink_getattr(const struct path *path,
+                                         struct kstat *stat)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline void fscrypt_set_ops(struct super_block *sb,
                                   const struct fscrypt_operations *s_cop)
 {
index f8acddc..12d3a7d 100644 (file)
@@ -30,6 +30,9 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask,
                                 struct inode *child,
                                 const struct qstr *name, u32 cookie)
 {
+       if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0)
+               return;
+
        fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
 }
 
@@ -41,6 +44,9 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
 
 static inline void fsnotify_inode(struct inode *inode, __u32 mask)
 {
+       if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+               return;
+
        if (S_ISDIR(inode->i_mode))
                mask |= FS_ISDIR;
 
@@ -53,6 +59,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
 {
        struct inode *inode = d_inode(dentry);
 
+       if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+               return 0;
+
        if (S_ISDIR(inode->i_mode)) {
                mask |= FS_ISDIR;
 
index a69f363..832e65f 100644 (file)
@@ -643,6 +643,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
 extern int ftrace_make_nop(struct module *mod,
                           struct dyn_ftrace *rec, unsigned long addr);
 
+/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ *  0      if ftrace_init_nop() should be called
+ *  Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
 
 /**
  * ftrace_init_nop - initialize a nop call site
index 13b3417..c68d83c 100644 (file)
@@ -60,9 +60,6 @@ struct partition_meta_info {
  * device.
  * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
  *
- * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up",
- * with a similar meaning to network interfaces.
- *
  * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
  * partition information in ``/proc/partitions`` or in the output of
  * printk_all_partitions().
@@ -97,7 +94,6 @@ struct partition_meta_info {
 /* 2 is unused (used to be GENHD_FL_DRIVERFS) */
 /* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
 #define GENHD_FL_CD                            0x0008
-#define GENHD_FL_UP                            0x0010
 #define GENHD_FL_SUPPRESS_PARTITION_INFO       0x0020
 #define GENHD_FL_EXT_DEVT                      0x0040
 #define GENHD_FL_NATIVE_CAPACITY               0x0080
@@ -153,13 +149,15 @@ struct gendisk {
        unsigned long state;
 #define GD_NEED_PART_SCAN              0
 #define GD_READ_ONLY                   1
-#define GD_QUEUE_REF                   2
 
        struct mutex open_mutex;        /* open/close mutex */
        unsigned open_partitions;       /* number of open partitions */
 
+       struct backing_dev_info *bdi;
        struct kobject *slave_dir;
-
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+       struct list_head slave_bdevs;
+#endif
        struct timer_rand_state *random;
        atomic_t sync_io;               /* RAID */
        struct disk_events *ev;
@@ -172,8 +170,14 @@ struct gendisk {
        int node_id;
        struct badblocks *bb;
        struct lockdep_map lockdep_map;
+       u64 diskseq;
 };
 
+static inline bool disk_live(struct gendisk *disk)
+{
+       return !inode_unhashed(disk->part0->bd_inode);
+}
+
 /*
  * The gendisk is refcounted by the part0 block_device, and the bd_device
  * therein is also used for device model presentation in sysfs.
@@ -210,18 +214,12 @@ static inline dev_t disk_devt(struct gendisk *disk)
 void disk_uevent(struct gendisk *disk, enum kobject_action action);
 
 /* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk,
-                           const struct attribute_group **groups);
-static inline void add_disk(struct gendisk *disk)
+int device_add_disk(struct device *parent, struct gendisk *disk,
+               const struct attribute_group **groups);
+static inline int add_disk(struct gendisk *disk)
 {
-       device_add_disk(NULL, disk, NULL);
+       return device_add_disk(NULL, disk, NULL);
 }
-extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
-static inline void add_disk_no_queue_reg(struct gendisk *disk)
-{
-       device_add_disk_no_queue_reg(NULL, disk);
-}
-
 extern void del_gendisk(struct gendisk *gp);
 
 void set_disk_ro(struct gendisk *disk, bool read_only);
@@ -236,6 +234,7 @@ extern void disk_block_events(struct gendisk *disk);
 extern void disk_unblock_events(struct gendisk *disk);
 extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
 bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+bool disk_force_media_change(struct gendisk *disk, unsigned int events);
 
 /* drivers/char/random.c */
 extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
@@ -259,26 +258,10 @@ static inline sector_t get_capacity(struct gendisk *disk)
 int bdev_disk_changed(struct gendisk *disk, bool invalidate);
 void blk_drop_partitions(struct gendisk *disk);
 
-extern struct gendisk *__alloc_disk_node(int minors, int node_id);
+struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
+               struct lock_class_key *lkclass);
 extern void put_disk(struct gendisk *disk);
-
-#define alloc_disk_node(minors, node_id)                               \
-({                                                                     \
-       static struct lock_class_key __key;                             \
-       const char *__name;                                             \
-       struct gendisk *__disk;                                         \
-                                                                       \
-       __name = "(gendisk_completion)"#minors"("#node_id")";           \
-                                                                       \
-       __disk = __alloc_disk_node(minors, node_id);                    \
-                                                                       \
-       if (__disk)                                                     \
-               lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
-                                                                       \
-       __disk;                                                         \
-})
-
-#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
+struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
 
 /**
  * blk_alloc_disk - allocate a gendisk structure
@@ -291,15 +274,10 @@ extern void put_disk(struct gendisk *disk);
  */
 #define blk_alloc_disk(node_id)                                                \
 ({                                                                     \
-       struct gendisk *__disk = __blk_alloc_disk(node_id);             \
        static struct lock_class_key __key;                             \
                                                                        \
-       if (__disk)                                                     \
-               lockdep_init_map(&__disk->lockdep_map,                  \
-                       "(bio completion)", &__key, 0);                 \
-       __disk;                                                         \
+       __blk_alloc_disk(node_id, &__key);                              \
 })
-struct gendisk *__blk_alloc_disk(int node);
 void blk_cleanup_disk(struct gendisk *disk);
 
 int __register_blkdev(unsigned int major, const char *name,
@@ -316,9 +294,10 @@ void set_capacity(struct gendisk *disk, sector_t size);
 int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
 long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
 
-#ifdef CONFIG_SYSFS
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
+int bd_register_pending_holders(struct gendisk *disk);
 #else
 static inline int bd_link_disk_holder(struct block_device *bdev,
                                      struct gendisk *disk)
@@ -329,9 +308,14 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
                                         struct gendisk *disk)
 {
 }
-#endif /* CONFIG_SYSFS */
+static inline int bd_register_pending_holders(struct gendisk *disk)
+{
+       return 0;
+}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
 
 dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
 dev_t blk_lookup_devt(const char *name, int partno);
 void blk_request_module(dev_t devt);
 #ifdef CONFIG_BLOCK
index bb5e7b0..0ee1401 100644 (file)
@@ -318,16 +318,12 @@ struct clock_event_device;
 
 extern void hrtimer_interrupt(struct clock_event_device *dev);
 
-extern void clock_was_set_delayed(void);
-
 extern unsigned int hrtimer_resolution;
 
 #else
 
 #define hrtimer_resolution     (unsigned int)LOW_RES_NSEC
 
-static inline void clock_was_set_delayed(void) { }
-
 #endif
 
 static inline ktime_t
@@ -351,13 +347,13 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
                                                    timer->base->get_time());
 }
 
-extern void clock_was_set(void);
 #ifdef CONFIG_TIMERFD
 extern void timerfd_clock_was_set(void);
+extern void timerfd_resume(void);
 #else
 static inline void timerfd_clock_was_set(void) { }
+static inline void timerfd_resume(void) { }
 #endif
-extern void hrtimers_resume(void);
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
index 53aa034..aaf4f1b 100644 (file)
@@ -41,7 +41,7 @@ struct in_device {
        unsigned long           mr_qri;         /* Query Response Interval */
        unsigned char           mr_qrv;         /* Query Robustness Variable */
        unsigned char           mr_gq_running;
-       unsigned char           mr_ifc_count;
+       u32                     mr_ifc_count;
        struct timer_list       mr_gq_timer;    /* general query timer */
        struct timer_list       mr_ifc_timer;   /* interface change timer */
 
index 2ed65b0..1f22a30 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/hrtimer.h>
 #include <linux/kref.h>
 #include <linux/workqueue.h>
+#include <linux/jump_label.h>
 
 #include <linux/atomic.h>
 #include <asm/ptrace.h>
@@ -474,12 +475,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
 
 #ifdef CONFIG_IRQ_FORCED_THREADING
 # ifdef CONFIG_PREEMPT_RT
-#  define force_irqthreads     (true)
+#  define force_irqthreads()   (true)
 # else
-extern bool force_irqthreads;
+DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
+#  define force_irqthreads()   (static_branch_unlikely(&force_irqthreads_key))
 # endif
 #else
-#define force_irqthreads       (0)
+#define force_irqthreads()     (false)
 #endif
 
 #ifndef local_softirq_pending
index 04b650b..649a4d7 100644 (file)
@@ -7,17 +7,18 @@
 
 #if defined(CONFIG_IO_URING)
 struct sock *io_uring_get_socket(struct file *file);
-void __io_uring_cancel(struct files_struct *files);
+void __io_uring_cancel(bool cancel_all);
 void __io_uring_free(struct task_struct *tsk);
 
-static inline void io_uring_files_cancel(struct files_struct *files)
+static inline void io_uring_files_cancel(void)
 {
        if (current->io_uring)
-               __io_uring_cancel(files);
+               __io_uring_cancel(false);
 }
 static inline void io_uring_task_cancel(void)
 {
-       return io_uring_files_cancel(NULL);
+       if (current->io_uring)
+               __io_uring_cancel(true);
 }
 static inline void io_uring_free(struct task_struct *tsk)
 {
@@ -32,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file)
 static inline void io_uring_task_cancel(void)
 {
 }
-static inline void io_uring_files_cancel(struct files_struct *files)
+static inline void io_uring_files_cancel(void)
 {
 }
 static inline void io_uring_free(struct task_struct *tsk)
index 479c1da..24f8489 100644 (file)
@@ -91,12 +91,29 @@ struct iomap {
        const struct iomap_page_ops *page_ops;
 };
 
-static inline sector_t
-iomap_sector(struct iomap *iomap, loff_t pos)
+static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
 {
        return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
 }
 
+/*
+ * Returns the inline data pointer for logical offset @pos.
+ */
+static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
+{
+       return iomap->inline_data + pos - iomap->offset;
+}
+
+/*
+ * Check if the mapping's length is within the valid range for inline data.
+ * This is used to guard against accessing data beyond the page inline_data
+ * points at.
+ */
+static inline bool iomap_inline_data_valid(const struct iomap *iomap)
+{
+       return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
+}
+
 /*
  * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
  * and page_done will be called for each page written to.  This only applies to
@@ -108,10 +125,9 @@ iomap_sector(struct iomap *iomap, loff_t pos)
  * associated page could not be obtained.
  */
 struct iomap_page_ops {
-       int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
-                       struct iomap *iomap);
+       int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
        void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
-                       struct page *page, struct iomap *iomap);
+                       struct page *page);
 };
 
 /*
@@ -124,6 +140,7 @@ struct iomap_page_ops {
 #define IOMAP_DIRECT           (1 << 4) /* direct I/O */
 #define IOMAP_NOWAIT           (1 << 5) /* do not block */
 #define IOMAP_OVERWRITE_ONLY   (1 << 6) /* only pure overwrites allowed */
+#define IOMAP_UNSHARE          (1 << 7) /* unshare_file_range */
 
 struct iomap_ops {
        /*
@@ -145,15 +162,61 @@ struct iomap_ops {
                        ssize_t written, unsigned flags, struct iomap *iomap);
 };
 
-/*
- * Main iomap iterator function.
+/**
+ * struct iomap_iter - Iterate through a range of a file
+ * @inode: Set at the start of the iteration and should not change.
+ * @pos: The current file position we are operating on.  It is updated by
+ *     calls to iomap_iter().  Treat as read-only in the body.
+ * @len: The remaining length of the file segment we're operating on.
+ *     It is updated at the same time as @pos.
+ * @processed: The number of bytes processed by the body in the most recent
+ *     iteration, or a negative errno. 0 causes the iteration to stop.
+ * @flags: Zero or more of the iomap_begin flags above.
+ * @iomap: Map describing the I/O iteration
+ * @srcmap: Source map for COW operations
  */
-typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
-               void *data, struct iomap *iomap, struct iomap *srcmap);
+struct iomap_iter {
+       struct inode *inode;
+       loff_t pos;
+       u64 len;
+       s64 processed;
+       unsigned flags;
+       struct iomap iomap;
+       struct iomap srcmap;
+};
 
-loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
-               unsigned flags, const struct iomap_ops *ops, void *data,
-               iomap_actor_t actor);
+int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
+ */
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+       u64 end = iter->iomap.offset + iter->iomap.length;
+
+       if (iter->srcmap.type != IOMAP_HOLE)
+               end = min(end, iter->srcmap.offset + iter->srcmap.length);
+       return min(iter->len, end - iter->pos);
+}
+
+/**
+ * iomap_iter_srcmap - return the source map for the current iomap iteration
+ * @i: iteration structure
+ *
+ * Write operations on file systems with reflink support might require a
+ * source and a destination map.  This function retourns the source map
+ * for a given operation, which may or may no be identical to the destination
+ * map in &i->iomap.
+ */
+static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
+{
+       if (i->srcmap.type != IOMAP_HOLE)
+               return &i->srcmap;
+       return &i->iomap;
+}
 
 ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
                const struct iomap_ops *ops);
@@ -250,8 +313,8 @@ int iomap_writepages(struct address_space *mapping,
 struct iomap_dio_ops {
        int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
                      unsigned flags);
-       blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap,
-                       struct bio *bio, loff_t file_offset);
+       blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+                             loff_t file_offset);
 };
 
 /*
index e9bfe69..3f53bc2 100644 (file)
@@ -6,46 +6,22 @@
 #include <linux/sched/rt.h>
 #include <linux/iocontext.h>
 
-/*
- * Gives us 8 prio classes with 13-bits of data for each class
- */
-#define IOPRIO_CLASS_SHIFT     (13)
-#define IOPRIO_PRIO_MASK       ((1UL << IOPRIO_CLASS_SHIFT) - 1)
-
-#define IOPRIO_PRIO_CLASS(mask)        ((mask) >> IOPRIO_CLASS_SHIFT)
-#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-
-#define ioprio_valid(mask)     (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+#include <uapi/linux/ioprio.h>
 
 /*
- * These are the io priority groups as implemented by CFQ. RT is the realtime
- * class, it always gets premium service. BE is the best-effort scheduling
- * class, the default for any process. IDLE is the idle scheduling class, it
- * is only served when no one else is using the disk.
+ * Default IO priority.
  */
-enum {
-       IOPRIO_CLASS_NONE,
-       IOPRIO_CLASS_RT,
-       IOPRIO_CLASS_BE,
-       IOPRIO_CLASS_IDLE,
-};
+#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM)
 
 /*
- * 8 best effort priority levels are supported
+ * Check that a priority value has a valid class.
  */
-#define IOPRIO_BE_NR   (8)
-
-enum {
-       IOPRIO_WHO_PROCESS = 1,
-       IOPRIO_WHO_PGRP,
-       IOPRIO_WHO_USER,
-};
+static inline bool ioprio_valid(unsigned short ioprio)
+{
+       unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
 
-/*
- * Fallback BE priority
- */
-#define IOPRIO_NORM    (4)
+       return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
+}
 
 /*
  * if process has set io priority explicitly, use that. if not, convert
@@ -80,7 +56,7 @@ static inline int get_current_ioprio(void)
 
        if (ioc)
                return ioc->ioprio;
-       return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+       return IOPRIO_DEFAULT;
 }
 
 /*
index 8e9a9ae..c8293c8 100644 (file)
@@ -569,6 +569,7 @@ struct irq_chip {
  * IRQCHIP_SUPPORTS_NMI:              Chip can deliver NMIs, only for root irqchips
  * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND:  Invokes __enable_irq()/__disable_irq() for wake irqs
  *                                    in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP:      Default affinity update before startup
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED                 = (1 <<  0),
@@ -581,6 +582,7 @@ enum {
        IRQCHIP_SUPPORTS_LEVEL_MSI              = (1 <<  7),
        IRQCHIP_SUPPORTS_NMI                    = (1 <<  8),
        IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND        = (1 <<  9),
+       IRQCHIP_AFFINITY_PRE_STARTUP            = (1 << 10),
 };
 
 #include <linux/irqdesc.h>
index a70d1ea..3fe6dd8 100644 (file)
@@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate;
 static __always_inline bool is_kfence_address(const void *addr)
 {
        /*
-        * The non-NULL check is required in case the __kfence_pool pointer was
-        * never initialized; keep it in the slow-path after the range-check.
+        * The __kfence_pool != NULL check is required to deal with the case
+        * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+        * the slow-path after the range-check!
         */
-       return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
+       return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
 }
 
 /**
index 3fcd242..860e63f 100644 (file)
@@ -161,6 +161,10 @@ enum {
        ATA_DFLAG_D_SENSE       = (1 << 29), /* Descriptor sense requested */
        ATA_DFLAG_ZAC           = (1 << 30), /* ZAC device */
 
+       ATA_DFLAG_FEATURES_MASK = ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+                                 ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+                                 ATA_DFLAG_NCQ_PRIO,
+
        ATA_DEV_UNKNOWN         = 0,    /* unknown device */
        ATA_DEV_ATA             = 1,    /* ATA device */
        ATA_DEV_ATA_UNSUP       = 2,    /* ATA device (unsupported) */
@@ -535,6 +539,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
 extern struct device_attribute dev_attr_unload_heads;
 #ifdef CONFIG_SATA_HOST
 extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_ncq_prio_supported;
 extern struct device_attribute dev_attr_ncq_prio_enable;
 extern struct device_attribute dev_attr_em_message_type;
 extern struct device_attribute dev_attr_em_message;
@@ -1454,7 +1459,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
 
 static inline bool ata_is_host_link(const struct ata_link *link)
 {
-       return 1;
+       return true;
 }
 #endif /* CONFIG_SATA_PMP */
 
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
deleted file mode 100644 (file)
index 0908abd..0000000
+++ /dev/null
@@ -1,697 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef NVM_H
-#define NVM_H
-
-#include <linux/blkdev.h>
-#include <linux/types.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
-       NVM_IO_OK = 0,
-       NVM_IO_REQUEUE = 1,
-       NVM_IO_DONE = 2,
-       NVM_IO_ERR = 3,
-
-       NVM_IOTYPE_NONE = 0,
-       NVM_IOTYPE_GC = 1,
-};
-
-/* common format */
-#define NVM_GEN_CH_BITS  (8)
-#define NVM_GEN_LUN_BITS (8)
-#define NVM_GEN_BLK_BITS (16)
-#define NVM_GEN_RESERVED (32)
-
-/* 1.2 format */
-#define NVM_12_PG_BITS  (16)
-#define NVM_12_PL_BITS  (4)
-#define NVM_12_SEC_BITS (4)
-#define NVM_12_RESERVED (8)
-
-/* 2.0 format */
-#define NVM_20_SEC_BITS (24)
-#define NVM_20_RESERVED (8)
-
-enum {
-       NVM_OCSSD_SPEC_12 = 12,
-       NVM_OCSSD_SPEC_20 = 20,
-};
-
-struct ppa_addr {
-       /* Generic structure for all addresses */
-       union {
-               /* generic device format */
-               struct {
-                       u64 ch          : NVM_GEN_CH_BITS;
-                       u64 lun         : NVM_GEN_LUN_BITS;
-                       u64 blk         : NVM_GEN_BLK_BITS;
-                       u64 reserved    : NVM_GEN_RESERVED;
-               } a;
-
-               /* 1.2 device format */
-               struct {
-                       u64 ch          : NVM_GEN_CH_BITS;
-                       u64 lun         : NVM_GEN_LUN_BITS;
-                       u64 blk         : NVM_GEN_BLK_BITS;
-                       u64 pg          : NVM_12_PG_BITS;
-                       u64 pl          : NVM_12_PL_BITS;
-                       u64 sec         : NVM_12_SEC_BITS;
-                       u64 reserved    : NVM_12_RESERVED;
-               } g;
-
-               /* 2.0 device format */
-               struct {
-                       u64 grp         : NVM_GEN_CH_BITS;
-                       u64 pu          : NVM_GEN_LUN_BITS;
-                       u64 chk         : NVM_GEN_BLK_BITS;
-                       u64 sec         : NVM_20_SEC_BITS;
-                       u64 reserved    : NVM_20_RESERVED;
-               } m;
-
-               struct {
-                       u64 line        : 63;
-                       u64 is_cached   : 1;
-               } c;
-
-               u64 ppa;
-       };
-};
-
-struct nvm_rq;
-struct nvm_id;
-struct nvm_dev;
-struct nvm_tgt_dev;
-struct nvm_chk_meta;
-
-typedef int (nvm_id_fn)(struct nvm_dev *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
-                                                       struct nvm_chk_meta *);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
-                                                               dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
-       nvm_id_fn               *identity;
-       nvm_op_bb_tbl_fn        *get_bb_tbl;
-       nvm_op_set_bb_fn        *set_bb_tbl;
-
-       nvm_get_chk_meta_fn     *get_chk_meta;
-
-       nvm_submit_io_fn        *submit_io;
-
-       nvm_create_dma_pool_fn  *create_dma_pool;
-       nvm_destroy_dma_pool_fn *destroy_dma_pool;
-       nvm_dev_dma_alloc_fn    *dev_dma_alloc;
-       nvm_dev_dma_free_fn     *dev_dma_free;
-};
-
-#ifdef CONFIG_NVM
-
-#include <linux/file.h>
-#include <linux/dmapool.h>
-
-enum {
-       /* HW Responsibilities */
-       NVM_RSP_L2P     = 1 << 0,
-       NVM_RSP_ECC     = 1 << 1,
-
-       /* Physical Adressing Mode */
-       NVM_ADDRMODE_LINEAR     = 0,
-       NVM_ADDRMODE_CHANNEL    = 1,
-
-       /* Plane programming mode for LUN */
-       NVM_PLANE_SINGLE        = 1,
-       NVM_PLANE_DOUBLE        = 2,
-       NVM_PLANE_QUAD          = 4,
-
-       /* Status codes */
-       NVM_RSP_SUCCESS         = 0x0,
-       NVM_RSP_NOT_CHANGEABLE  = 0x1,
-       NVM_RSP_ERR_FAILWRITE   = 0x40ff,
-       NVM_RSP_ERR_EMPTYPAGE   = 0x42ff,
-       NVM_RSP_ERR_FAILECC     = 0x4281,
-       NVM_RSP_ERR_FAILCRC     = 0x4004,
-       NVM_RSP_WARN_HIGHECC    = 0x4700,
-
-       /* Device opcodes */
-       NVM_OP_PWRITE           = 0x91,
-       NVM_OP_PREAD            = 0x92,
-       NVM_OP_ERASE            = 0x90,
-
-       /* PPA Command Flags */
-       NVM_IO_SNGL_ACCESS      = 0x0,
-       NVM_IO_DUAL_ACCESS      = 0x1,
-       NVM_IO_QUAD_ACCESS      = 0x2,
-
-       /* NAND Access Modes */
-       NVM_IO_SUSPEND          = 0x80,
-       NVM_IO_SLC_MODE         = 0x100,
-       NVM_IO_SCRAMBLE_ENABLE  = 0x200,
-
-       /* Block Types */
-       NVM_BLK_T_FREE          = 0x0,
-       NVM_BLK_T_BAD           = 0x1,
-       NVM_BLK_T_GRWN_BAD      = 0x2,
-       NVM_BLK_T_DEV           = 0x4,
-       NVM_BLK_T_HOST          = 0x8,
-
-       /* Memory capabilities */
-       NVM_ID_CAP_SLC          = 0x1,
-       NVM_ID_CAP_CMD_SUSPEND  = 0x2,
-       NVM_ID_CAP_SCRAMBLE     = 0x4,
-       NVM_ID_CAP_ENCRYPT      = 0x8,
-
-       /* Memory types */
-       NVM_ID_FMTYPE_SLC       = 0,
-       NVM_ID_FMTYPE_MLC       = 1,
-
-       /* Device capabilities */
-       NVM_ID_DCAP_BBLKMGMT    = 0x1,
-       NVM_UD_DCAP_ECC         = 0x2,
-};
-
-struct nvm_id_lp_mlc {
-       u16     num_pairs;
-       u8      pairs[886];
-};
-
-struct nvm_id_lp_tbl {
-       __u8    id[8];
-       struct nvm_id_lp_mlc mlc;
-};
-
-struct nvm_addrf_12 {
-       u8      ch_len;
-       u8      lun_len;
-       u8      blk_len;
-       u8      pg_len;
-       u8      pln_len;
-       u8      sec_len;
-
-       u8      ch_offset;
-       u8      lun_offset;
-       u8      blk_offset;
-       u8      pg_offset;
-       u8      pln_offset;
-       u8      sec_offset;
-
-       u64     ch_mask;
-       u64     lun_mask;
-       u64     blk_mask;
-       u64     pg_mask;
-       u64     pln_mask;
-       u64     sec_mask;
-};
-
-struct nvm_addrf {
-       u8      ch_len;
-       u8      lun_len;
-       u8      chk_len;
-       u8      sec_len;
-       u8      rsv_len[2];
-
-       u8      ch_offset;
-       u8      lun_offset;
-       u8      chk_offset;
-       u8      sec_offset;
-       u8      rsv_off[2];
-
-       u64     ch_mask;
-       u64     lun_mask;
-       u64     chk_mask;
-       u64     sec_mask;
-       u64     rsv_mask[2];
-};
-
-enum {
-       /* Chunk states */
-       NVM_CHK_ST_FREE =       1 << 0,
-       NVM_CHK_ST_CLOSED =     1 << 1,
-       NVM_CHK_ST_OPEN =       1 << 2,
-       NVM_CHK_ST_OFFLINE =    1 << 3,
-
-       /* Chunk types */
-       NVM_CHK_TP_W_SEQ =      1 << 0,
-       NVM_CHK_TP_W_RAN =      1 << 1,
-       NVM_CHK_TP_SZ_SPEC =    1 << 4,
-};
-
-/*
- * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
- * buffer can be used when converting from little endian to cpu addressing.
- */
-struct nvm_chk_meta {
-       u8      state;
-       u8      type;
-       u8      wi;
-       u8      rsvd[5];
-       u64     slba;
-       u64     cnlb;
-       u64     wp;
-};
-
-struct nvm_target {
-       struct list_head list;
-       struct nvm_tgt_dev *dev;
-       struct nvm_tgt_type *type;
-       struct gendisk *disk;
-};
-
-#define ADDR_EMPTY (~0ULL)
-
-#define NVM_TARGET_DEFAULT_OP (101)
-#define NVM_TARGET_MIN_OP (3)
-#define NVM_TARGET_MAX_OP (80)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCH 0
-
-#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
-
-struct nvm_rq;
-typedef void (nvm_end_io_fn)(struct nvm_rq *);
-
-struct nvm_rq {
-       struct nvm_tgt_dev *dev;
-
-       struct bio *bio;
-
-       union {
-               struct ppa_addr ppa_addr;
-               dma_addr_t dma_ppa_list;
-       };
-
-       struct ppa_addr *ppa_list;
-
-       void *meta_list;
-       dma_addr_t dma_meta_list;
-
-       nvm_end_io_fn *end_io;
-
-       uint8_t opcode;
-       uint16_t nr_ppas;
-       uint16_t flags;
-
-       u64 ppa_status; /* ppa media status */
-       int error;
-
-       int is_seq; /* Sequential hint flag. 1.2 only */
-
-       void *private;
-};
-
-static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
-{
-       return pdu - sizeof(struct nvm_rq);
-}
-
-static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
-{
-       return rqdata + 1;
-}
-
-static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
-{
-       return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-}
-
-enum {
-       NVM_BLK_ST_FREE =       0x1,    /* Free block */
-       NVM_BLK_ST_TGT =        0x2,    /* Block in use by target */
-       NVM_BLK_ST_BAD =        0x8,    /* Bad block */
-};
-
-/* Instance geometry */
-struct nvm_geo {
-       /* device reported version */
-       u8      major_ver_id;
-       u8      minor_ver_id;
-
-       /* kernel short version */
-       u8      version;
-
-       /* instance specific geometry */
-       int num_ch;
-       int num_lun;            /* per channel */
-
-       /* calculated values */
-       int all_luns;           /* across channels */
-       int all_chunks;         /* across channels */
-
-       int op;                 /* over-provision in instance */
-
-       sector_t total_secs;    /* across channels */
-
-       /* chunk geometry */
-       u32     num_chk;        /* chunks per lun */
-       u32     clba;           /* sectors per chunk */
-       u16     csecs;          /* sector size */
-       u16     sos;            /* out-of-band area size */
-       bool    ext;            /* metadata in extended data buffer */
-       u32     mdts;           /* Max data transfer size*/
-
-       /* device write constrains */
-       u32     ws_min;         /* minimum write size */
-       u32     ws_opt;         /* optimal write size */
-       u32     mw_cunits;      /* distance required for successful read */
-       u32     maxoc;          /* maximum open chunks */
-       u32     maxocpu;        /* maximum open chunks per parallel unit */
-
-       /* device capabilities */
-       u32     mccap;
-
-       /* device timings */
-       u32     trdt;           /* Avg. Tread (ns) */
-       u32     trdm;           /* Max Tread (ns) */
-       u32     tprt;           /* Avg. Tprog (ns) */
-       u32     tprm;           /* Max Tprog (ns) */
-       u32     tbet;           /* Avg. Terase (ns) */
-       u32     tbem;           /* Max Terase (ns) */
-
-       /* generic address format */
-       struct nvm_addrf addrf;
-
-       /* 1.2 compatibility */
-       u8      vmnt;
-       u32     cap;
-       u32     dom;
-
-       u8      mtype;
-       u8      fmtype;
-
-       u16     cpar;
-       u32     mpos;
-
-       u8      num_pln;
-       u8      pln_mode;
-       u16     num_pg;
-       u16     fpg_sz;
-};
-
-/* sub-device structure */
-struct nvm_tgt_dev {
-       /* Device information */
-       struct nvm_geo geo;
-
-       /* Base ppas for target LUNs */
-       struct ppa_addr *luns;
-
-       struct request_queue *q;
-
-       struct nvm_dev *parent;
-       void *map;
-};
-
-struct nvm_dev {
-       struct nvm_dev_ops *ops;
-
-       struct list_head devices;
-
-       /* Device information */
-       struct nvm_geo geo;
-
-       unsigned long *lun_map;
-       void *dma_pool;
-
-       /* Backend device */
-       struct request_queue *q;
-       char name[DISK_NAME_LEN];
-       void *private_data;
-
-       struct kref ref;
-       void *rmap;
-
-       struct mutex mlock;
-       spinlock_t lock;
-
-       /* target management */
-       struct list_head area_list;
-       struct list_head targets;
-};
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
-                                                 struct ppa_addr r)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr l;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
-               l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
-               l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
-               l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
-               l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
-               l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
-               l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
-       } else {
-               struct nvm_addrf *lbaf = &geo->addrf;
-
-               l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
-               l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
-               l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
-               l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
-       }
-
-       return l;
-}
-
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
-                                                 struct ppa_addr r)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr l;
-
-       l.ppa = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
-               l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
-               l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
-               l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
-               l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
-               l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
-               l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
-       } else {
-               struct nvm_addrf *lbaf = &geo->addrf;
-
-               l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
-               l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
-               l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
-               l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
-       }
-
-       return l;
-}
-
-static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
-                                   struct ppa_addr p)
-{
-       struct nvm_geo *geo = &dev->geo;
-       u64 caddr;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
-
-               caddr = (u64)p.g.pg << ppaf->pg_offset;
-               caddr |= (u64)p.g.pl << ppaf->pln_offset;
-               caddr |= (u64)p.g.sec << ppaf->sec_offset;
-       } else {
-               caddr = p.m.sec;
-       }
-
-       return caddr;
-}
-
-static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
-                                                void *addrf, u32 ppa32)
-{
-       struct ppa_addr ppa64;
-
-       ppa64.ppa = 0;
-
-       if (ppa32 == -1) {
-               ppa64.ppa = ADDR_EMPTY;
-       } else if (ppa32 & (1U << 31)) {
-               ppa64.c.line = ppa32 & ((~0U) >> 1);
-               ppa64.c.is_cached = 1;
-       } else {
-               struct nvm_geo *geo = &dev->geo;
-
-               if (geo->version == NVM_OCSSD_SPEC_12) {
-                       struct nvm_addrf_12 *ppaf = addrf;
-
-                       ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
-                                                       ppaf->ch_offset;
-                       ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
-                                                       ppaf->lun_offset;
-                       ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
-                                                       ppaf->blk_offset;
-                       ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
-                                                       ppaf->pg_offset;
-                       ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
-                                                       ppaf->pln_offset;
-                       ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
-                                                       ppaf->sec_offset;
-               } else {
-                       struct nvm_addrf *lbaf = addrf;
-
-                       ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
-                                                       lbaf->ch_offset;
-                       ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
-                                                       lbaf->lun_offset;
-                       ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
-                                                       lbaf->chk_offset;
-                       ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
-                                                       lbaf->sec_offset;
-               }
-       }
-
-       return ppa64;
-}
-
-static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
-                                    void *addrf, struct ppa_addr ppa64)
-{
-       u32 ppa32 = 0;
-
-       if (ppa64.ppa == ADDR_EMPTY) {
-               ppa32 = ~0U;
-       } else if (ppa64.c.is_cached) {
-               ppa32 |= ppa64.c.line;
-               ppa32 |= 1U << 31;
-       } else {
-               struct nvm_geo *geo = &dev->geo;
-
-               if (geo->version == NVM_OCSSD_SPEC_12) {
-                       struct nvm_addrf_12 *ppaf = addrf;
-
-                       ppa32 |= ppa64.g.ch << ppaf->ch_offset;
-                       ppa32 |= ppa64.g.lun << ppaf->lun_offset;
-                       ppa32 |= ppa64.g.blk << ppaf->blk_offset;
-                       ppa32 |= ppa64.g.pg << ppaf->pg_offset;
-                       ppa32 |= ppa64.g.pl << ppaf->pln_offset;
-                       ppa32 |= ppa64.g.sec << ppaf->sec_offset;
-               } else {
-                       struct nvm_addrf *lbaf = addrf;
-
-                       ppa32 |= ppa64.m.grp << lbaf->ch_offset;
-                       ppa32 |= ppa64.m.pu << lbaf->lun_offset;
-                       ppa32 |= ppa64.m.chk << lbaf->chk_offset;
-                       ppa32 |= ppa64.m.sec << lbaf->sec_offset;
-               }
-       }
-
-       return ppa32;
-}
-
-static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
-                                     struct ppa_addr *ppa)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int last = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               int sec = ppa->g.sec;
-
-               sec++;
-               if (sec == geo->ws_min) {
-                       int pg = ppa->g.pg;
-
-                       sec = 0;
-                       pg++;
-                       if (pg == geo->num_pg) {
-                               int pl = ppa->g.pl;
-
-                               pg = 0;
-                               pl++;
-                               if (pl == geo->num_pln)
-                                       last = 1;
-
-                               ppa->g.pl = pl;
-                       }
-                       ppa->g.pg = pg;
-               }
-               ppa->g.sec = sec;
-       } else {
-               ppa->m.sec++;
-               if (ppa->m.sec == geo->clba)
-                       last = 1;
-       }
-
-       return last;
-}
-
-typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
-                               int flags);
-typedef void (nvm_tgt_exit_fn)(void *, bool);
-typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
-typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
-
-enum {
-       NVM_TGT_F_DEV_L2P = 0,
-       NVM_TGT_F_HOST_L2P = 1 << 0,
-};
-
-struct nvm_tgt_type {
-       const char *name;
-       unsigned int version[3];
-       int flags;
-
-       /* target entry points */
-       const struct block_device_operations *bops;
-       nvm_tgt_capacity_fn *capacity;
-
-       /* module-specific init/teardown */
-       nvm_tgt_init_fn *init;
-       nvm_tgt_exit_fn *exit;
-
-       /* sysfs */
-       nvm_tgt_sysfs_init_fn *sysfs_init;
-       nvm_tgt_sysfs_exit_fn *sysfs_exit;
-
-       /* For internal use */
-       struct list_head list;
-       struct module *owner;
-};
-
-extern int nvm_register_tgt_type(struct nvm_tgt_type *);
-extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
-
-extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
-extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-
-extern struct nvm_dev *nvm_alloc_dev(int);
-extern int nvm_register(struct nvm_dev *);
-extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
-                             int, struct nvm_chk_meta *);
-extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
-                             int, int);
-extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern void nvm_end_io(struct nvm_rq *);
-
-#else /* CONFIG_NVM */
-struct nvm_dev_ops;
-
-static inline struct nvm_dev *nvm_alloc_dev(int node)
-{
-       return ERR_PTR(-EINVAL);
-}
-static inline int nvm_register(struct nvm_dev *dev)
-{
-       return -EINVAL;
-}
-static inline void nvm_unregister(struct nvm_dev *dev) {}
-#endif /* CONFIG_NVM */
-#endif /* LIGHTNVM.H */
index 17b5943..fd3d0b3 100644 (file)
@@ -41,6 +41,8 @@ int linear_range_get_selector_low(const struct linear_range *r,
 int linear_range_get_selector_high(const struct linear_range *r,
                                   unsigned int val, unsigned int *selector,
                                   bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+                                     unsigned int val, unsigned int *selector);
 int linear_range_get_selector_low_array(const struct linear_range *r,
                                        int ranges, unsigned int val,
                                        unsigned int *selector, bool *found);
index ded90b0..975e33b 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/percpu-defs.h>
 #include <linux/lockdep.h>
 
+#ifndef CONFIG_PREEMPT_RT
+
 typedef struct {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
@@ -14,29 +16,14 @@ typedef struct {
 } local_lock_t;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LL_DEP_MAP_INIT(lockname)                     \
+# define LOCAL_LOCK_DEBUG_INIT(lockname)               \
        .dep_map = {                                    \
                .name = #lockname,                      \
                .wait_type_inner = LD_WAIT_CONFIG,      \
-               .lock_type = LD_LOCK_PERCPU,                    \
-       }
-#else
-# define LL_DEP_MAP_INIT(lockname)
-#endif
-
-#define INIT_LOCAL_LOCK(lockname)      { LL_DEP_MAP_INIT(lockname) }
-
-#define __local_lock_init(lock)                                        \
-do {                                                           \
-       static struct lock_class_key __key;                     \
-                                                               \
-       debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
-       lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
-                             LD_WAIT_CONFIG, LD_WAIT_INV,      \
-                             LD_LOCK_PERCPU);                  \
-} while (0)
+               .lock_type = LD_LOCK_PERCPU,            \
+       },                                              \
+       .owner = NULL,
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
 static inline void local_lock_acquire(local_lock_t *l)
 {
        lock_map_acquire(&l->dep_map);
@@ -51,11 +38,30 @@ static inline void local_lock_release(local_lock_t *l)
        lock_map_release(&l->dep_map);
 }
 
+static inline void local_lock_debug_init(local_lock_t *l)
+{
+       l->owner = NULL;
+}
 #else /* CONFIG_DEBUG_LOCK_ALLOC */
+# define LOCAL_LOCK_DEBUG_INIT(lockname)
 static inline void local_lock_acquire(local_lock_t *l) { }
 static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
 
+#define INIT_LOCAL_LOCK(lockname)      { LOCAL_LOCK_DEBUG_INIT(lockname) }
+
+#define __local_lock_init(lock)                                        \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+       lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
+                             0, LD_WAIT_CONFIG, LD_WAIT_INV,   \
+                             LD_LOCK_PERCPU);                  \
+       local_lock_debug_init(lock);                            \
+} while (0)
+
 #define __local_lock(lock)                                     \
        do {                                                    \
                preempt_disable();                              \
@@ -91,3 +97,45 @@ static inline void local_lock_release(local_lock_t *l) { }
                local_lock_release(this_cpu_ptr(lock));         \
                local_irq_restore(flags);                       \
        } while (0)
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(l)                                   \
+       do {                                                    \
+               local_spin_lock_init((l));                      \
+       } while (0)
+
+#define __local_lock(__lock)                                   \
+       do {                                                    \
+               migrate_disable();                              \
+               spin_lock(this_cpu_ptr((__lock)));              \
+       } while (0)
+
+#define __local_lock_irq(lock)                 __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags)                      \
+       do {                                                    \
+               typecheck(unsigned long, flags);                \
+               flags = 0;                                      \
+               __local_lock(lock);                             \
+       } while (0)
+
+#define __local_unlock(__lock)                                 \
+       do {                                                    \
+               spin_unlock(this_cpu_ptr((__lock)));            \
+               migrate_enable();                               \
+       } while (0)
+
+#define __local_unlock_irq(lock)               __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#endif /* CONFIG_PREEMPT_RT */
index 0520c0c..3bc9f74 100644 (file)
@@ -27,7 +27,8 @@ struct rpc_task;
 struct nlmsvc_binding {
        __be32                  (*fopen)(struct svc_rqst *,
                                                struct nfs_fh *,
-                                               struct file **);
+                                               struct file **,
+                                               int mode);
        void                    (*fclose)(struct file *);
 };
 
index 666f5f3..c4ae650 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef LINUX_LOCKD_LOCKD_H
 #define LINUX_LOCKD_LOCKD_H
 
+/* XXX: a lot of this should really be under fs/lockd. */
+
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <net/ipv6.h>
@@ -154,7 +156,8 @@ struct nlm_rqst {
 struct nlm_file {
        struct hlist_node       f_list;         /* linked list */
        struct nfs_fh           f_handle;       /* NFS file handle */
-       struct file *           f_file;         /* VFS file pointer */
+       struct file *           f_file[2];      /* VFS file pointers,
+                                                  indexed by O_ flags */
        struct nlm_share *      f_shares;       /* DOS shares */
        struct list_head        f_blocks;       /* blocked locks */
        unsigned int            f_locks;        /* guesstimate # of locks */
@@ -267,6 +270,7 @@ typedef int   (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
 /*
  * Server-side lock handling
  */
+int              lock_to_openmode(struct file_lock *);
 __be32           nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
                              struct nlm_host *, struct nlm_lock *, int,
                              struct nlm_cookie *, int);
@@ -286,7 +290,7 @@ void                  nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
  * File handling for the server personality
  */
 __be32           nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
-                                       struct nfs_fh *);
+                                       struct nlm_lock *);
 void             nlm_release_file(struct nlm_file *);
 void             nlmsvc_release_lockowner(struct nlm_lock *);
 void             nlmsvc_mark_resources(struct net *);
@@ -301,7 +305,8 @@ int           nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
 
 static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
 {
-       return locks_inode(file->f_file);
+       return locks_inode(file->f_file[O_RDONLY] ?
+                          file->f_file[O_RDONLY] : file->f_file[O_WRONLY]);
 }
 
 static inline int __nlm_privileged_request4(const struct sockaddr *sap)
index bfe5c48..2479792 100644 (file)
@@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void)
        return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
-                                                 struct mem_cgroup *memcg,
-                                                 bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+                                        struct mem_cgroup *memcg,
+                                        unsigned long *min,
+                                        unsigned long *low)
 {
+       *min = *low = 0;
+
        if (mem_cgroup_disabled())
-               return 0;
+               return;
 
        /*
         * There is no reclaim protection applied to a targeted reclaim.
@@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
         *
         */
        if (root == memcg)
-               return 0;
-
-       if (in_low_reclaim)
-               return READ_ONCE(memcg->memory.emin);
+               return;
 
-       return max(READ_ONCE(memcg->memory.emin),
-                  READ_ONCE(memcg->memory.elow));
+       *min = READ_ONCE(memcg->memory.emin);
+       *low = READ_ONCE(memcg->memory.elow);
 }
 
 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
 {
 }
 
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
-                                                 struct mem_cgroup *memcg,
-                                                 bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+                                        struct mem_cgroup *memcg,
+                                        unsigned long *min,
+                                        unsigned long *low)
 {
-       return 0;
+       *min = *low = 0;
 }
 
 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
index 40a0c2d..2d1895c 100644 (file)
@@ -200,13 +200,13 @@ enum rt5033_reg {
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN              1000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX              3000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP             100000U
-#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         21
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         32
 
 /* RT5033 regulator LDO output voltage uV */
 #define RT5033_REGULATOR_LDO_VOLTAGE_MIN               1200000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_MAX               3000000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_STEP              100000U
-#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          19
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          32
 
 /* RT5033 regulator SAFE LDO output voltage uV */
 #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE              4900000U
index 1efe374..25a8be5 100644 (file)
@@ -1044,8 +1044,7 @@ void mlx5_unregister_debugfs(void);
 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-                   unsigned int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 
index 98b56b7..1a9c9d9 100644 (file)
@@ -11,13 +11,15 @@ enum {
 };
 
 enum {
-       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT   = 0x1, // do I check this caps?
-       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED  = 0x2,
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT   = 0,
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED  = 1,
 };
 
 enum {
-       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT   = 0,
-       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED  = 1,
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+               BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+               BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
 };
 
 struct mlx5_ifc_virtio_q_bits {
index 74e6c06..37f9758 100644 (file)
@@ -109,6 +109,7 @@ struct mmc_ext_csd {
        u8                      raw_hc_erase_gap_size;  /* 221 */
        u8                      raw_erase_timeout_mult; /* 223 */
        u8                      raw_hc_erase_grp_size;  /* 224 */
+       u8                      raw_boot_mult;          /* 226 */
        u8                      raw_sec_trim_mult;      /* 229 */
        u8                      raw_sec_erase_mult;     /* 230 */
        u8                      raw_sec_feature_support;/* 231 */
index ab19245..71101d1 100644 (file)
@@ -164,9 +164,8 @@ struct mmc_request {
        int                     tag;
 
 #ifdef CONFIG_MMC_CRYPTO
-       bool                    crypto_enabled;
+       const struct bio_crypt_ctx *crypto_ctx;
        int                     crypto_key_slot;
-       u32                     data_unit_num;
 #endif
 };
 
index 0abd47e..0c0c9a0 100644 (file)
@@ -153,7 +153,7 @@ struct mmc_host_ops {
 
        int     (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
 
-       /* Check if the card is pulling dat[0:3] low */
+       /* Check if the card is pulling dat[0] low */
        int     (*card_busy)(struct mmc_host *host);
 
        /* The tuning command opcode value is different for SD and eMMC cards */
@@ -398,6 +398,7 @@ struct mmc_host {
 #else
 #define MMC_CAP2_CRYPTO                0
 #endif
+#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28)       /* Host with eMMC that has GPT entry at a non-standard location */
 
        int                     fixed_drv_type; /* fixed driver type for non-removable media */
 
index 03dee12..b9b970f 100644 (file)
 static inline void leave_mm(int cpu) { }
 #endif
 
+/*
+ * CPUs that are capable of running user task @p. Must contain at least one
+ * active CPU. It is assumed that the kernel can run on all CPUs, so calling
+ * this for a kernel thread is pointless.
+ *
+ * By default, we assume a sane, homogeneous system.
+ */
+#ifndef task_cpu_possible_mask
+# define task_cpu_possible_mask(p)     cpu_possible_mask
+# define task_cpu_possible(cpu, p)     true
+#else
+# define task_cpu_possible(cpu, p)     cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
+#endif
+
 #endif
index eed280f..962cd41 100644 (file)
@@ -431,6 +431,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp);
 extern const struct kernel_param_ops param_ops_uint;
 extern int param_set_uint(const char *val, const struct kernel_param *kp);
 extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+               unsigned int min, unsigned int max);
 #define param_check_uint(name, p) __param_check(name, p, unsigned int)
 
 extern const struct kernel_param_ops param_ops_long;
index 6aff469..49cf6eb 100644 (file)
@@ -107,7 +107,8 @@ struct ti_sci_inta_msi_desc {
  *                     address or data changes
  * @write_msi_msg_data:        Data parameter for the callback.
  *
- * @masked:    [PCI MSI/X] Mask bits
+ * @msi_mask:  [PCI MSI]   MSI cached mask bits
+ * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
  * @is_msix:   [PCI MSI/X] True if MSI-X
  * @multiple:  [PCI MSI/X] log2 num of messages allocated
  * @multi_cap: [PCI MSI/X] log2 num of messages supported
@@ -139,7 +140,10 @@ struct msi_desc {
        union {
                /* PCI MSI/X specific data */
                struct {
-                       u32 masked;
+                       union {
+                               u32 msi_mask;
+                               u32 msix_ctrl;
+                       };
                        struct {
                                u8      is_msix         : 1;
                                u8      multiple        : 3;
@@ -232,11 +236,13 @@ void free_msi_entry(struct msi_desc *entry);
 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
 void pci_msi_mask_irq(struct irq_data *data);
 void pci_msi_unmask_irq(struct irq_data *data);
 
+const struct attribute_group **msi_populate_sysfs(struct device *dev);
+void msi_destroy_sysfs(struct device *dev,
+                      const struct attribute_group **msi_irq_groups);
+
 /*
  * The arch hooks to setup up msi irqs. Default functions are implemented
  * as weak symbols so that they /can/ be overriden by architecture specific
index e193235..8f226d4 100644 (file)
 #include <linux/osq_lock.h>
 #include <linux/debug_locks.h>
 
-struct ww_class;
-struct ww_acquire_ctx;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)                 \
+               , .dep_map = {                                  \
+                       .name = #lockname,                      \
+                       .wait_type_inner = LD_WAIT_SLEEP,       \
+               }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -53,7 +62,7 @@ struct ww_acquire_ctx;
  */
 struct mutex {
        atomic_long_t           owner;
-       spinlock_t              wait_lock;
+       raw_spinlock_t          wait_lock;
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        struct optimistic_spin_queue osq; /* Spinner MCS lock */
 #endif
@@ -66,27 +75,6 @@ struct mutex {
 #endif
 };
 
-struct ww_mutex {
-       struct mutex base;
-       struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
-       struct ww_class *ww_class;
-#endif
-};
-
-/*
- * This is the control structure for tasks blocked on mutex,
- * which resides on the blocked task's kernel stack:
- */
-struct mutex_waiter {
-       struct list_head        list;
-       struct task_struct      *task;
-       struct ww_acquire_ctx   *ww_ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
-       void                    *magic;
-#endif
-};
-
 #ifdef CONFIG_DEBUG_MUTEXES
 
 #define __DEBUG_MUTEX_INITIALIZER(lockname)                            \
@@ -117,19 +105,9 @@ do {                                                                       \
        __mutex_init((mutex), #mutex, &__key);                          \
 } while (0)
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)                 \
-               , .dep_map = {                                  \
-                       .name = #lockname,                      \
-                       .wait_type_inner = LD_WAIT_SLEEP,       \
-               }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
 #define __MUTEX_INITIALIZER(lockname) \
                { .owner = ATOMIC_LONG_INIT(0) \
-               , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+               , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
                , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
                __DEBUG_MUTEX_INITIALIZER(lockname) \
                __DEP_MAP_MUTEX_INITIALIZER(lockname) }
@@ -148,6 +126,50 @@ extern void __mutex_init(struct mutex *lock, const char *name,
  */
 extern bool mutex_is_locked(struct mutex *lock);
 
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+       struct rt_mutex_base    rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+};
+
+#define __MUTEX_INITIALIZER(mutexname)                                 \
+{                                                                      \
+       .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex)       \
+       __DEP_MAP_MUTEX_INITIALIZER(mutexname)                          \
+}
+
+#define DEFINE_MUTEX(mutexname)                                                \
+       struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_rt_init(struct mutex *lock, const char *name,
+                           struct lock_class_key *key);
+extern int mutex_trylock(struct mutex *lock);
+
+static inline void mutex_destroy(struct mutex *lock) { }
+
+#define mutex_is_locked(l)     rt_mutex_base_is_locked(&(l)->rtmutex)
+
+#define __mutex_init(mutex, name, key)                 \
+do {                                                   \
+       rt_mutex_base_init(&(mutex)->rtmutex);          \
+       __mutex_rt_init((mutex), name, key);            \
+} while (0)
+
+#define mutex_init(mutex)                              \
+do {                                                   \
+       static struct lock_class_key __key;             \
+                                                       \
+       __mutex_init((mutex), #mutex, &__key);          \
+} while (0)
+#endif /* CONFIG_PREEMPT_RT */
+
 /*
  * See kernel/locking/mutex.c for detailed documentation of these APIs.
  * Also see Documentation/locking/mutex-design.rst.
index be9a2b3..e89329b 100644 (file)
@@ -68,6 +68,7 @@ extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
 extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
+struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int);
 
 extern int follow_down_one(struct path *);
 extern int follow_down(struct path *);
index 10279c4..ada1296 100644 (file)
@@ -196,6 +196,9 @@ struct ip_set_region {
        u32 elements;           /* Number of elements vs timeout */
 };
 
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE                (1<<20)
+
 /* The max revision number supported by any set type + 1 */
 #define IPSET_REVISION_MAX     9
 
index 2fb373a..87069b8 100644 (file)
@@ -168,8 +168,6 @@ extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
 extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
                unsigned long val, void *v);
 
-extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
-               unsigned long val_up, unsigned long val_down, void *v);
 extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
                unsigned long val_up, unsigned long val_down, void *v);
 extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
index 3d8db1f..0f4a890 100644 (file)
@@ -70,6 +70,9 @@ enum OID {
 
        OID_spnego,                     /* 1.3.6.1.5.5.2 */
 
+       OID_IAKerb,                     /* 1.3.6.1.5.2.5 */
+       OID_PKU2U,                      /* 1.3.5.1.5.2.7 */
+       OID_Scram,                      /* 1.3.6.1.5.5.14 */
        OID_certAuthInfoAccess,         /* 1.3.6.1.5.5.7.1.1 */
        OID_sha1,                       /* 1.3.14.3.2.26 */
        OID_id_ansip384r1,              /* 1.3.132.0.34 */
@@ -104,6 +107,10 @@ enum OID {
        OID_authorityKeyIdentifier,     /* 2.5.29.35 */
        OID_extKeyUsage,                /* 2.5.29.37 */
 
+       /* Heimdal mechanisms */
+       OID_NetlogonMechanism,          /* 1.2.752.43.14.2 */
+       OID_appleLocalKdcSupported,     /* 1.2.752.43.14.3 */
+
        /* EC-RDSA */
        OID_gostCPSignA,                /* 1.2.643.2.2.35.1 */
        OID_gostCPSignB,                /* 1.2.643.2.2.35.2 */
index 9225ee6..ae6f4eb 100644 (file)
@@ -7,7 +7,7 @@
 
 bool __do_once_start(bool *done, unsigned long *flags);
 void __do_once_done(bool *done, struct static_key_true *once_key,
-                   unsigned long *flags);
+                   unsigned long *flags, struct module *mod);
 
 /* Call a function exactly once. The idea of DO_ONCE() is to perform
  * a function call such as initialization of random seeds, etc, only
@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
                        if (unlikely(___ret)) {                              \
                                func(__VA_ARGS__);                           \
                                __do_once_done(&___done, &___once_key,       \
-                                              &___flags);                   \
+                                              &___flags, THIS_MODULE);      \
                        }                                                    \
                }                                                            \
                ___ret;                                                      \
index a433f13..495b16b 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef PADATA_H
 #define PADATA_H
 
+#include <linux/refcount.h>
 #include <linux/compiler_types.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
@@ -96,7 +97,7 @@ struct parallel_data {
        struct padata_shell             *ps;
        struct padata_list              __percpu *reorder_list;
        struct padata_serial_queue      __percpu *squeue;
-       atomic_t                        refcnt;
+       refcount_t                      refcnt;
        unsigned int                    seq_nr;
        unsigned int                    processed;
        int                             cpu;
index 3d50d5b..06eccef 100644 (file)
 #define PCI_DEVICE_ID_3COM_3CR990SVR   0x990a
 
 #define PCI_VENDOR_ID_AL               0x10b9
+#define PCI_DEVICE_ID_AL_M1489         0x1489
 #define PCI_DEVICE_ID_AL_M1533         0x1533
 #define PCI_DEVICE_ID_AL_M1535         0x1535
 #define PCI_DEVICE_ID_AL_M1541         0x1541
 #define PCI_DEVICE_ID_INTEL_82375      0x0482
 #define PCI_DEVICE_ID_INTEL_82424      0x0483
 #define PCI_DEVICE_ID_INTEL_82378      0x0484
+#define PCI_DEVICE_ID_INTEL_82425      0x0486
 #define PCI_DEVICE_ID_INTEL_MRST_SD0   0x0807
 #define PCI_DEVICE_ID_INTEL_MRST_SD1   0x0808
 #define PCI_DEVICE_ID_INTEL_MFD_SD     0x0820
index fa10acb..af308e1 100644 (file)
@@ -78,6 +78,7 @@ struct file;
 
 extern struct pid *pidfd_pid(const struct file *file);
 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+int pidfd_create(struct pid *pid, unsigned int flags);
 
 static inline struct pid *get_pid(struct pid *pid)
 {
index 5d2705f..fc56424 100644 (file)
@@ -48,6 +48,7 @@ struct pipe_buffer {
  *     @files: number of struct file referring this pipe (protected by ->i_lock)
  *     @r_counter: reader counter
  *     @w_counter: writer counter
+ *     @poll_usage: is this pipe used for epoll, which has crazy wakeups?
  *     @fasync_readers: reader side fasync
  *     @fasync_writers: writer side fasync
  *     @bufs: the circular array of pipe buffers
@@ -70,6 +71,7 @@ struct pipe_inode_info {
        unsigned int files;
        unsigned int r_counter;
        unsigned int w_counter;
+       unsigned int poll_usage;
        struct page *tmp_page;
        struct fasync_struct *fasync_readers;
        struct fasync_struct *fasync_writers;
index 45f53af..271bd87 100644 (file)
@@ -4228,6 +4228,7 @@ enum ec_device_event {
        EC_DEVICE_EVENT_TRACKPAD,
        EC_DEVICE_EVENT_DSP,
        EC_DEVICE_EVENT_WIFI,
+       EC_DEVICE_EVENT_WLC,
 };
 
 enum ec_device_event_param {
@@ -5460,6 +5461,72 @@ struct ec_response_rollback_info {
 /* Issue AP reset */
 #define EC_CMD_AP_RESET 0x0125
 
+/**
+ * Get the number of peripheral charge ports
+ */
+#define EC_CMD_PCHG_COUNT 0x0134
+
+#define EC_PCHG_MAX_PORTS 8
+
+struct ec_response_pchg_count {
+       uint8_t port_count;
+} __ec_align1;
+
+/**
+ * Get the status of a peripheral charge port
+ */
+#define EC_CMD_PCHG 0x0135
+
+struct ec_params_pchg {
+       uint8_t port;
+} __ec_align1;
+
+struct ec_response_pchg {
+       uint32_t error;                 /* enum pchg_error */
+       uint8_t state;                  /* enum pchg_state state */
+       uint8_t battery_percentage;
+       uint8_t unused0;
+       uint8_t unused1;
+       /* Fields added in version 1 */
+       uint32_t fw_version;
+       uint32_t dropped_event_count;
+} __ec_align2;
+
+enum pchg_state {
+       /* Charger is reset and not initialized. */
+       PCHG_STATE_RESET = 0,
+       /* Charger is initialized or disabled. */
+       PCHG_STATE_INITIALIZED,
+       /* Charger is enabled and ready to detect a device. */
+       PCHG_STATE_ENABLED,
+       /* Device is in proximity. */
+       PCHG_STATE_DETECTED,
+       /* Device is being charged. */
+       PCHG_STATE_CHARGING,
+       /* Device is fully charged. It implies DETECTED (& not charging). */
+       PCHG_STATE_FULL,
+       /* In download (a.k.a. firmware update) mode */
+       PCHG_STATE_DOWNLOAD,
+       /* In download mode. Ready for receiving data. */
+       PCHG_STATE_DOWNLOADING,
+       /* Device is ready for data communication. */
+       PCHG_STATE_CONNECTED,
+       /* Put no more entry below */
+       PCHG_STATE_COUNT,
+};
+
+#define EC_PCHG_STATE_TEXT { \
+       [PCHG_STATE_RESET] = "RESET", \
+       [PCHG_STATE_INITIALIZED] = "INITIALIZED", \
+       [PCHG_STATE_ENABLED] = "ENABLED", \
+       [PCHG_STATE_DETECTED] = "DETECTED", \
+       [PCHG_STATE_CHARGING] = "CHARGING", \
+       [PCHG_STATE_FULL] = "FULL", \
+       [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \
+       [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \
+       [PCHG_STATE_CONNECTED] = "CONNECTED", \
+       }
+
 /*****************************************************************************/
 /* Voltage regulator controls */
 
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
deleted file mode 100644 (file)
index cba1184..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2010 Wolfram Sang <kernel@pengutronix.de>
- */
-
-#ifndef __ASM_ARCH_IMX_ESDHC_H
-#define __ASM_ARCH_IMX_ESDHC_H
-
-#include <linux/types.h>
-
-enum wp_types {
-       ESDHC_WP_NONE,          /* no WP, neither controller nor gpio */
-       ESDHC_WP_CONTROLLER,    /* mmc controller internal WP */
-       ESDHC_WP_GPIO,          /* external gpio pin for WP */
-};
-
-enum cd_types {
-       ESDHC_CD_NONE,          /* no CD, neither controller nor gpio */
-       ESDHC_CD_CONTROLLER,    /* mmc controller internal CD */
-       ESDHC_CD_GPIO,          /* external gpio pin for CD */
-       ESDHC_CD_PERMANENT,     /* no CD, card permanently wired to host */
-};
-
-/**
- * struct esdhc_platform_data - platform data for esdhc on i.MX
- *
- * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
- *
- * @wp_type:   type of write_protect method (see wp_types enum above)
- * @cd_type:   type of card_detect method (see cd_types enum above)
- */
-
-struct esdhc_platform_data {
-       enum wp_types wp_type;
-       enum cd_types cd_type;
-       int max_bus_width;
-       unsigned int delay_line;
-       unsigned int tuning_step;       /* The delay cell steps in tuning procedure */
-       unsigned int tuning_start_tap;  /* The start delay cell point in tuning procedure */
-       unsigned int strobe_dll_delay_target;   /* The delay cell for strobe pad (read clock) */
-};
-#endif /* __ASM_ARCH_IMX_ESDHC_H */
index 65fd5ff..f0db674 100644 (file)
@@ -12,5 +12,6 @@
 /* Board specific platform_data */
 struct mtk_chip_config {
        u32 sample_sel;
+       u32 tick_delay;
 };
 #endif
index 21a0577..67017c9 100644 (file)
@@ -198,6 +198,7 @@ struct generic_pm_domain_data {
        struct notifier_block *power_nb;
        int cpu;
        unsigned int performance_state;
+       unsigned int default_pstate;
        unsigned int rpm_pstate;
        ktime_t next_wakeup;
        void *data;
index 896c16d..00fef00 100644 (file)
@@ -82,12 +82,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
        return timerqueue_add(head, &ctmr->node);
 }
 
-static inline void cpu_timer_dequeue(struct cpu_timer *ctmr)
+static inline bool cpu_timer_queued(struct cpu_timer *ctmr)
 {
-       if (ctmr->head) {
+       return !!ctmr->head;
+}
+
+static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr)
+{
+       if (cpu_timer_queued(ctmr)) {
                timerqueue_del(ctmr->head, &ctmr->node);
                ctmr->head = NULL;
+               return true;
        }
+       return false;
 }
 
 static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr)
index d55c746..dd24756 100644 (file)
@@ -69,7 +69,7 @@ enum max17042_register {
        MAX17042_RelaxCFG       = 0x2A,
        MAX17042_MiscCFG        = 0x2B,
        MAX17042_TGAIN          = 0x2C,
-       MAx17042_TOFF           = 0x2D,
+       MAX17042_TOFF           = 0x2D,
        MAX17042_CGAIN          = 0x2E,
        MAX17042_COFF           = 0x2F,
 
@@ -110,13 +110,14 @@ enum max17042_register {
        MAX17042_VFSOC          = 0xFF,
 };
 
+/* Registers specific to max17055 only */
 enum max17055_register {
        MAX17055_QRes           = 0x0C,
+       MAX17055_RCell          = 0x14,
        MAX17055_TTF            = 0x20,
-       MAX17055_V_empty        = 0x3A,
-       MAX17055_TIMER          = 0x3E,
+       MAX17055_DieTemp        = 0x34,
        MAX17055_USER_MEM       = 0x40,
-       MAX17055_RGAIN          = 0x42,
+       MAX17055_RGAIN          = 0x43,
 
        MAX17055_ConvgCfg       = 0x49,
        MAX17055_VFRemCap       = 0x4A,
@@ -155,13 +156,14 @@ enum max17055_register {
        MAX17055_AtAvCap        = 0xDF,
 };
 
-/* Registers specific to max17047/50 */
+/* Registers specific to max17047/50/55 */
 enum max17047_register {
        MAX17047_QRTbl00        = 0x12,
        MAX17047_FullSOCThr     = 0x13,
        MAX17047_QRTbl10        = 0x22,
        MAX17047_QRTbl20        = 0x32,
        MAX17047_V_empty        = 0x3A,
+       MAX17047_TIMER          = 0x3E,
        MAX17047_QRTbl30        = 0x42,
 };
 
index be20398..9ca1f12 100644 (file)
@@ -352,6 +352,7 @@ struct power_supply_resistance_temp_table {
  */
 
 struct power_supply_battery_info {
+       unsigned int technology;            /* from the enum above */
        int energy_full_design_uwh;         /* microWatt-hours */
        int charge_full_design_uah;         /* microAmp-hours */
        int voltage_min_design_uv;          /* microVolts */
index 9881eac..4d244e2 100644 (file)
 /*
  * The preempt_count offset after spin_lock()
  */
+#if !defined(CONFIG_PREEMPT_RT)
 #define PREEMPT_LOCK_OFFSET    PREEMPT_DISABLE_OFFSET
+#else
+#define PREEMPT_LOCK_OFFSET    0
+#endif
 
 /*
  * The preempt_count offset needed for things like:
index 073e680..357513a 100644 (file)
@@ -484,8 +484,6 @@ void software_node_unregister_node_group(const struct software_node **node_group
 int software_node_register(const struct software_node *node);
 void software_node_unregister(const struct software_node *node);
 
-int software_node_notify(struct device *dev, unsigned long action);
-
 struct fwnode_handle *
 fwnode_create_software_node(const struct property_entry *properties,
                            const struct fwnode_handle *parent);
index d31ecaf..235047d 100644 (file)
 #ifndef        _LINUX_RBTREE_H
 #define        _LINUX_RBTREE_H
 
+#include <linux/rbtree_types.h>
+
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/rcupdate.h>
 
-struct rb_node {
-       unsigned long  __rb_parent_color;
-       struct rb_node *rb_right;
-       struct rb_node *rb_left;
-} __attribute__((aligned(sizeof(long))));
-    /* The alignment might seem pointless, but allegedly CRIS needs it */
-
-struct rb_root {
-       struct rb_node *rb_node;
-};
-
 #define rb_parent(r)   ((struct rb_node *)((r)->__rb_parent_color & ~3))
 
-#define RB_ROOT        (struct rb_root) { NULL, }
 #define        rb_entry(ptr, type, member) container_of(ptr, type, member)
 
 #define RB_EMPTY_ROOT(root)  (READ_ONCE((root)->rb_node) == NULL)
@@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
                        typeof(*pos), field); 1; }); \
             pos = n)
 
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
-       struct rb_root rb_root;
-       struct rb_node *rb_leftmost;
-};
-
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
-
 /* Same as rb_first(), but O(1) */
 #define rb_first_cached(root) (root)->rb_leftmost
 
diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h
new file mode 100644 (file)
index 0000000..45b6ecd
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_RBTREE_TYPES_H
+#define _LINUX_RBTREE_TYPES_H
+
+struct rb_node {
+       unsigned long  __rb_parent_color;
+       struct rb_node *rb_right;
+       struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+/* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+       struct rb_node *rb_node;
+};
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+       struct rb_root rb_root;
+       struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+#endif
index f8633d3..d29740b 100644 (file)
 #include <linux/list.h>
 #include <linux/rcupdate.h>
 
-/*
- * Why is there no list_empty_rcu()?  Because list_empty() serves this
- * purpose.  The list_empty() function fetches the RCU-protected pointer
- * and compares it to the address of the list head, but neither dereferences
- * this pointer itself nor provides this pointer to the caller.  Therefore,
- * it is not necessary to use rcu_dereference(), so that list_empty() can
- * be used anywhere you would want to use a list_empty_rcu().
- */
-
 /*
  * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
  * @list: list to be initialized
@@ -318,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 /*
  * Where are list_empty_rcu() and list_first_entry_rcu()?
  *
- * Implementing those functions following their counterparts list_empty() and
- * list_first_entry() is not advisable because they lead to subtle race
- * conditions as the following snippet shows:
+ * They do not exist because they would lead to subtle race conditions:
  *
  * if (!list_empty_rcu(mylist)) {
  *     struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
  *     do_something(bar);
  * }
  *
- * The list may not be empty when list_empty_rcu checks it, but it may be when
- * list_first_entry_rcu rereads the ->next pointer.
- *
- * Rereading the ->next pointer is not a problem for list_empty() and
- * list_first_entry() because they would be protected by a lock that blocks
- * writers.
+ * The list might be non-empty when list_empty_rcu() checks it, but it
+ * might have become empty by the time that list_first_entry_rcu() rereads
+ * the ->next pointer, which would result in a SEGV.
+ *
+ * When not using RCU, it is OK for list_first_entry() to re-read that
+ * pointer because both functions should be protected by some lock that
+ * blocks writers.
+ *
+ * When using RCU, list_empty() uses READ_ONCE() to fetch the
+ * RCU-protected ->next pointer and then compares it to the address of the
+ * list head.  However, it neither dereferences this pointer nor provides
+ * this pointer to its caller.  Thus, READ_ONCE() suffices (that is,
+ * rcu_dereference() is not needed), which means that list_empty() can be
+ * used anywhere you would want to use list_empty_rcu().  Just don't
+ * expect anything useful to happen if you do a subsequent lockless
+ * call to list_first_entry_rcu()!!!
  *
  * See list_first_or_null_rcu for an alternative.
  */
index d9680b7..434d12f 100644 (file)
@@ -53,7 +53,7 @@ void __rcu_read_unlock(void);
  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
  */
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
 
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
@@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void);
 # define synchronize_rcu_tasks synchronize_rcu
 # endif
 
-# ifdef CONFIG_TASKS_RCU_TRACE
+# ifdef CONFIG_TASKS_TRACE_RCU
 # define rcu_tasks_trace_qs(t)                                         \
        do {                                                            \
                if (!likely(READ_ONCE((t)->trc_reader_checked)) &&      \
index 953e70f..9be0153 100644 (file)
@@ -14,9 +14,6 @@
 
 #include <asm/param.h> /* for HZ */
 
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
-
 unsigned long get_state_synchronize_rcu(void);
 unsigned long start_poll_synchronize_rcu(void);
 bool poll_state_synchronize_rcu(unsigned long oldstate);
index f5f08dd..e3c9a25 100644 (file)
@@ -344,6 +344,7 @@ typedef void (*regmap_unlock)(void *);
  * @ranges: Array of configuration entries for virtual address ranges.
  * @num_ranges: Number of range configuration entries.
  * @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
  * @hwlock_id: Specify the hardware spinlock id.
  * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
  *              HWLOCK_IRQ or 0.
@@ -403,6 +404,7 @@ struct regmap_config {
        unsigned int num_ranges;
 
        bool use_hwlock;
+       bool use_raw_spinlock;
        unsigned int hwlock_id;
        unsigned int hwlock_mode;
 
@@ -1269,12 +1271,13 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
 
 int regmap_field_bulk_alloc(struct regmap *regmap,
                             struct regmap_field **rm_field,
-                            struct reg_field *reg_field,
+                            const struct reg_field *reg_field,
                             int num_fields);
 void regmap_field_bulk_free(struct regmap_field *field);
 int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
                                 struct regmap_field **field,
-                                struct reg_field *reg_field, int num_fields);
+                                const struct reg_field *reg_field,
+                                int num_fields);
 void devm_regmap_field_bulk_free(struct device *dev,
                                 struct regmap_field *field);
 
index f72ca73..bbf6590 100644 (file)
@@ -222,17 +222,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev,
 int devm_regulator_register_supply_alias(struct device *dev, const char *id,
                                         struct device *alias_dev,
                                         const char *alias_id);
-void devm_regulator_unregister_supply_alias(struct device *dev,
-                                           const char *id);
 
 int devm_regulator_bulk_register_supply_alias(struct device *dev,
                                              const char *const *id,
                                              struct device *alias_dev,
                                              const char *const *alias_id,
                                              int num_id);
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
-                                                const char *const *id,
-                                                int num_id);
 
 /* regulator output control and status */
 int __must_check regulator_enable(struct regulator *regulator);
@@ -408,11 +403,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev,
        return 0;
 }
 
-static inline void devm_regulator_unregister_supply_alias(struct device *dev,
-                                                         const char *id)
-{
-}
-
 static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
                                                const char *const *id,
                                                struct device *alias_dev,
@@ -422,11 +412,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
        return 0;
 }
 
-static inline void devm_regulator_bulk_unregister_supply_alias(
-       struct device *dev, const char *const *id, int num_id)
-{
-}
-
 static inline int regulator_enable(struct regulator *regulator)
 {
        return 0;
index 4aec203..bd7a73d 100644 (file)
@@ -337,6 +337,12 @@ enum regulator_type {
  * @pull_down_val_on: Enabling value for control when using regmap
  *                     set_pull_down
  *
+ * @ramp_reg:          Register for controlling the regulator ramp-rate.
+ * @ramp_mask:         Bitmask for the ramp-rate control register.
+ * @ramp_delay_table:  Table for mapping the regulator ramp-rate values. Values
+ *                     should be given in units of V/S (uV/uS). See the
+ *                     regulator_set_ramp_delay_regmap().
+ *
  * @enable_time: Time taken for initial enable of regulator (in uS).
  * @off_on_delay: guard time (in uS), before re-enabling a regulator
  *
@@ -462,7 +468,7 @@ struct regulator_err_state {
 };
 
 /**
- * struct regulator_irq_data - regulator error/notification status date
+ * struct regulator_irq_data - regulator error/notification status data
  *
  * @states:    Status structs for each of the associated regulators.
  * @num_states:        Amount of associated regulators.
@@ -521,8 +527,8 @@ struct regulator_irq_data {
  *             active events as core does not clean the map data.
  *             REGULATOR_FAILED_RETRY can be returned to indicate that the
  *             status reading from IC failed. If this is repeated for
- *             fatal_cnt times the core will call die() callback or BUG()
- *             as a last resort to protect the HW.
+ *             fatal_cnt times the core will call die() callback or power-off
+ *             the system as a last resort to protect the HW.
  * @renable:   Optional callback to check status (if HW supports that) before
  *             re-enabling IRQ. If implemented this should clear the error
  *             flags so that errors fetched by regulator_get_error_flags()
@@ -531,7 +537,8 @@ struct regulator_irq_data {
  *             REGULATOR_FAILED_RETRY can be returned to
  *             indicate that the status reading from IC failed. If this is
  *             repeated for 'fatal_cnt' times the core will call die()
- *             callback or BUG() as a last resort to protect the HW.
+ *             callback or if die() is not populated then attempt to power-off
+ *             the system as a last resort to protect the HW.
  *             Returning zero indicates that the problem in HW has been solved
  *             and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
  *             indicates the error condition is still active and keeps IRQ
@@ -645,7 +652,6 @@ devm_regulator_register(struct device *dev,
                        const struct regulator_desc *regulator_desc,
                        const struct regulator_config *config);
 void regulator_unregister(struct regulator_dev *rdev);
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
 
 int regulator_notifier_call_chain(struct regulator_dev *rdev,
                                  unsigned long event, void *data);
index 68b4a51..621b7f4 100644 (file)
@@ -112,7 +112,7 @@ struct notification_limit {
  * @over_voltage_limits:       Limits for acting on over voltage.
  * @under_voltage_limits:      Limits for acting on under voltage.
  * @temp_limits:               Limits for acting on over temperature.
-
+ *
  * @max_spread: Max possible spread between coupled regulators
  * @max_uV_step: Max possible step change in voltage
  * @valid_modes_mask: Mask of modes which may be configured by consumers.
index 9b05af9..21deb52 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _RESCTRL_H
 #define _RESCTRL_H
 
+#include <linux/kernel.h>
+#include <linux/list.h>
 #include <linux/pid.h>
 
 #ifdef CONFIG_PROC_CPU_RESCTRL
@@ -13,4 +15,186 @@ int proc_resctrl_show(struct seq_file *m,
 
 #endif
 
+/**
+ * enum resctrl_conf_type - The type of configuration.
+ * @CDP_NONE:  No prioritisation, both code and data are controlled or monitored.
+ * @CDP_CODE:  Configuration applies to instruction fetches.
+ * @CDP_DATA:  Configuration applies to reads and writes.
+ */
+enum resctrl_conf_type {
+       CDP_NONE,
+       CDP_CODE,
+       CDP_DATA,
+};
+
+#define CDP_NUM_TYPES  (CDP_DATA + 1)
+
+/**
+ * struct resctrl_staged_config - parsed configuration to be applied
+ * @new_ctrl:          new ctrl value to be loaded
+ * @have_new_ctrl:     whether the user provided new_ctrl is valid
+ */
+struct resctrl_staged_config {
+       u32                     new_ctrl;
+       bool                    have_new_ctrl;
+};
+
+/**
+ * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * @list:              all instances of this resource
+ * @id:                        unique id for this instance
+ * @cpu_mask:          which CPUs share this resource
+ * @rmid_busy_llc:     bitmap of which limbo RMIDs are above threshold
+ * @mbm_total:         saved state for MBM total bandwidth
+ * @mbm_local:         saved state for MBM local bandwidth
+ * @mbm_over:          worker to periodically read MBM h/w counters
+ * @cqm_limbo:         worker to periodically read CQM h/w counters
+ * @mbm_work_cpu:      worker CPU for MBM h/w counters
+ * @cqm_work_cpu:      worker CPU for CQM h/w counters
+ * @plr:               pseudo-locked region (if any) associated with domain
+ * @staged_config:     parsed configuration to be applied
+ */
+struct rdt_domain {
+       struct list_head                list;
+       int                             id;
+       struct cpumask                  cpu_mask;
+       unsigned long                   *rmid_busy_llc;
+       struct mbm_state                *mbm_total;
+       struct mbm_state                *mbm_local;
+       struct delayed_work             mbm_over;
+       struct delayed_work             cqm_limbo;
+       int                             mbm_work_cpu;
+       int                             cqm_work_cpu;
+       struct pseudo_lock_region       *plr;
+       struct resctrl_staged_config    staged_config[CDP_NUM_TYPES];
+};
+
+/**
+ * struct resctrl_cache - Cache allocation related data
+ * @cbm_len:           Length of the cache bit mask
+ * @min_cbm_bits:      Minimum number of consecutive bits to be set
+ * @shareable_bits:    Bitmask of shareable resource with other
+ *                     executing entities
+ * @arch_has_sparse_bitmaps:   True if a bitmap like f00f is valid.
+ * @arch_has_empty_bitmaps:    True if the '0' bitmap is valid.
+ * @arch_has_per_cpu_cfg:      True if QOS_CFG register for this cache
+ *                             level has CPU scope.
+ */
+struct resctrl_cache {
+       unsigned int    cbm_len;
+       unsigned int    min_cbm_bits;
+       unsigned int    shareable_bits;
+       bool            arch_has_sparse_bitmaps;
+       bool            arch_has_empty_bitmaps;
+       bool            arch_has_per_cpu_cfg;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX:       Memory bandwidth is throttled at the core
+ *                             always using smallest bandwidth percentage
+ *                             assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD:        Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+       THREAD_THROTTLE_UNDEFINED = 0,
+       THREAD_THROTTLE_MAX,
+       THREAD_THROTTLE_PER_THREAD,
+};
+
+/**
+ * struct resctrl_membw - Memory bandwidth allocation related data
+ * @min_bw:            Minimum memory bandwidth percentage user can request
+ * @bw_gran:           Granularity at which the memory bandwidth is allocated
+ * @delay_linear:      True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode:     Bandwidth throttling mode when threads request
+ *                     different memory bandwidths
+ * @mba_sc:            True if MBA software controller(mba_sc) is enabled
+ * @mb_map:            Mapping of memory B/W percentage to memory B/W delay
+ */
+struct resctrl_membw {
+       u32                             min_bw;
+       u32                             bw_gran;
+       u32                             delay_linear;
+       bool                            arch_needs_linear;
+       enum membw_throttle_mode        throttle_mode;
+       bool                            mba_sc;
+       u32                             *mb_map;
+};
+
+struct rdt_parse_data;
+struct resctrl_schema;
+
+/**
+ * struct rdt_resource - attributes of a resctrl resource
+ * @rid:               The index of the resource
+ * @alloc_enabled:     Is allocation enabled on this machine
+ * @mon_enabled:       Is monitoring enabled for this feature
+ * @alloc_capable:     Is allocation available on this machine
+ * @mon_capable:       Is monitor feature available on this machine
+ * @num_rmid:          Number of RMIDs available
+ * @cache_level:       Which cache level defines scope of this resource
+ * @cache:             Cache allocation related data
+ * @membw:             If the component has bandwidth controls, their properties.
+ * @domains:           All domains for this resource
+ * @name:              Name to use in "schemata" file.
+ * @data_width:                Character width of data when displaying
+ * @default_ctrl:      Specifies default cache cbm or memory B/W percent.
+ * @format_str:                Per resource format string to show domain value
+ * @parse_ctrlval:     Per resource function pointer to parse control values
+ * @evt_list:          List of monitoring events
+ * @fflags:            flags to choose base and info files
+ * @cdp_capable:       Is the CDP feature available on this resource
+ */
+struct rdt_resource {
+       int                     rid;
+       bool                    alloc_enabled;
+       bool                    mon_enabled;
+       bool                    alloc_capable;
+       bool                    mon_capable;
+       int                     num_rmid;
+       int                     cache_level;
+       struct resctrl_cache    cache;
+       struct resctrl_membw    membw;
+       struct list_head        domains;
+       char                    *name;
+       int                     data_width;
+       u32                     default_ctrl;
+       const char              *format_str;
+       int                     (*parse_ctrlval)(struct rdt_parse_data *data,
+                                                struct resctrl_schema *s,
+                                                struct rdt_domain *d);
+       struct list_head        evt_list;
+       unsigned long           fflags;
+       bool                    cdp_capable;
+};
+
+/**
+ * struct resctrl_schema - configuration abilities of a resource presented to
+ *                        user-space
+ * @list:      Member of resctrl_schema_all.
+ * @name:      The name to use in the "schemata" file.
+ * @conf_type: Whether this schema is specific to code/data.
+ * @res:       The resource structure exported by the architecture to describe
+ *             the hardware that is configured by this schema.
+ * @num_closid:        The number of closid that can be used with this schema. When
+ *             features like CDP are enabled, this will be lower than the
+ *             hardware supports for the resource.
+ */
+struct resctrl_schema {
+       struct list_head                list;
+       char                            name[8];
+       enum resctrl_conf_type          conf_type;
+       struct rdt_resource             *res;
+       u32                             num_closid;
+};
+
+/* The number of closid supported by this resource regardless of CDP */
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+                           u32 closid, enum resctrl_conf_type type);
+
 #endif /* _RESCTRL_H */
index d1672de..9deedfe 100644 (file)
 #ifndef __LINUX_RT_MUTEX_H
 #define __LINUX_RT_MUTEX_H
 
+#include <linux/compiler.h>
 #include <linux/linkage.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/rbtree_types.h>
+#include <linux/spinlock_types_raw.h>
 
 extern int max_lock_depth; /* for sysctl */
 
+struct rt_mutex_base {
+       raw_spinlock_t          wait_lock;
+       struct rb_root_cached   waiters;
+       struct task_struct      *owner;
+};
+
+#define __RT_MUTEX_BASE_INITIALIZER(rtbasename)                                \
+{                                                                      \
+       .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock),    \
+       .waiters = RB_ROOT_CACHED,                                      \
+       .owner = NULL                                                   \
+}
+
+/**
+ * rt_mutex_base_is_locked - is the rtmutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
+{
+       return READ_ONCE(lock->owner) != NULL;
+}
+
+extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
+
 /**
  * The rt_mutex structure
  *
@@ -28,9 +55,7 @@ extern int max_lock_depth; /* for sysctl */
  * @owner:     the mutex owner
  */
 struct rt_mutex {
-       raw_spinlock_t          wait_lock;
-       struct rb_root_cached   waiters;
-       struct task_struct      *owner;
+       struct rt_mutex_base    rtmutex;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
 #endif
@@ -52,32 +77,24 @@ do { \
 } while (0)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
-       , .dep_map = { .name = #mutexname }
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)      \
+       .dep_map = {                                    \
+               .name = #mutexname,                     \
+               .wait_type_inner = LD_WAIT_SLEEP,       \
+       }
 #else
 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
 #endif
 
-#define __RT_MUTEX_INITIALIZER(mutexname) \
-       { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
-       , .waiters = RB_ROOT_CACHED \
-       , .owner = NULL \
-       __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+#define __RT_MUTEX_INITIALIZER(mutexname)                              \
+{                                                                      \
+       .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex),      \
+       __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)                       \
+}
 
 #define DEFINE_RT_MUTEX(mutexname) \
        struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
 
-/**
- * rt_mutex_is_locked - is the mutex locked
- * @lock: the mutex to be queried
- *
- * Returns 1 if the mutex is locked, 0 if unlocked.
- */
-static inline int rt_mutex_is_locked(struct rt_mutex *lock)
-{
-       return lock->owner != NULL;
-}
-
 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
new file mode 100644 (file)
index 0000000..1d264dd
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _LINUX_RWBASE_RT_H
+#define _LINUX_RWBASE_RT_H
+
+#include <linux/rtmutex.h>
+#include <linux/atomic.h>
+
+#define READER_BIAS            (1U << 31)
+#define WRITER_BIAS            (1U << 30)
+
+struct rwbase_rt {
+       atomic_t                readers;
+       struct rt_mutex_base    rtmutex;
+};
+
+#define __RWBASE_INITIALIZER(name)                             \
+{                                                              \
+       .readers = ATOMIC_INIT(READER_BIAS),                    \
+       .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex),   \
+}
+
+#define init_rwbase_rt(rwbase)                                 \
+       do {                                                    \
+               rt_mutex_base_init(&(rwbase)->rtmutex);         \
+               atomic_set(&(rwbase)->readers, READER_BIAS);    \
+       } while (0)
+
+
+static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb)
+{
+       return atomic_read(&rwb->readers) != READER_BIAS;
+}
+
+static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb)
+{
+       return atomic_read(&rwb->readers) > 0;
+}
+
+#endif /* _LINUX_RWBASE_RT_H */
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644 (file)
index 0000000..49c1f38
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_RT_H
+#error Do not #include directly. Use <linux/spinlock.h>.
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+                            struct lock_class_key *key);
+#else
+static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
+                                   struct lock_class_key *key)
+{
+}
+#endif
+
+#define rwlock_init(rwl)                               \
+do {                                                   \
+       static struct lock_class_key __key;             \
+                                                       \
+       init_rwbase_rt(&(rwl)->rwbase);                 \
+       __rt_rwlock_init(rwl, #rwl, &__key);            \
+} while (0)
+
+extern void rt_read_lock(rwlock_t *rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock);
+extern void rt_write_lock(rwlock_t *rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock);
+
+static __always_inline void read_lock(rwlock_t *rwlock)
+{
+       rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_bh(rwlock_t *rwlock)
+{
+       local_bh_disable();
+       rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_irq(rwlock_t *rwlock)
+{
+       rt_read_lock(rwlock);
+}
+
+#define read_lock_irqsave(lock, flags)                 \
+       do {                                            \
+               typecheck(unsigned long, flags);        \
+               rt_read_lock(lock);                     \
+               flags = 0;                              \
+       } while (0)
+
+#define read_trylock(lock)     __cond_lock(lock, rt_read_trylock(lock))
+
+static __always_inline void read_unlock(rwlock_t *rwlock)
+{
+       rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+{
+       rt_read_unlock(rwlock);
+       local_bh_enable();
+}
+
+static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+{
+       rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
+                                                  unsigned long flags)
+{
+       rt_read_unlock(rwlock);
+}
+
+static __always_inline void write_lock(rwlock_t *rwlock)
+{
+       rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_bh(rwlock_t *rwlock)
+{
+       local_bh_disable();
+       rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_irq(rwlock_t *rwlock)
+{
+       rt_write_lock(rwlock);
+}
+
+#define write_lock_irqsave(lock, flags)                        \
+       do {                                            \
+               typecheck(unsigned long, flags);        \
+               rt_write_lock(lock);                    \
+               flags = 0;                              \
+       } while (0)
+
+#define write_trylock(lock)    __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags)             \
+({                                                     \
+       int __locked;                                   \
+                                                       \
+       typecheck(unsigned long, flags);                \
+       flags = 0;                                      \
+       __locked = write_trylock(lock);                 \
+       __locked;                                       \
+})
+
+static __always_inline void write_unlock(rwlock_t *rwlock)
+{
+       rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+{
+       rt_write_unlock(rwlock);
+       local_bh_enable();
+}
+
+static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+{
+       rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
+                                                   unsigned long flags)
+{
+       rt_write_unlock(rwlock);
+}
+
+#define rwlock_is_contended(lock)              (((void)(lock), 0))
+
+#endif /* __LINUX_RWLOCK_RT_H */
index 3bd03e1..1948442 100644 (file)
@@ -1,9 +1,23 @@
 #ifndef __LINUX_RWLOCK_TYPES_H
 #define __LINUX_RWLOCK_TYPES_H
 
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname)                                     \
+       .dep_map = {                                                    \
+               .name = #lockname,                                      \
+               .wait_type_inner = LD_WAIT_CONFIG,                      \
+       }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
 /*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- *                               and initializers
+ * generic rwlock type definitions and initializers
  *
  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  * Released under the General Public License (GPL).
@@ -21,16 +35,6 @@ typedef struct {
 
 #define RWLOCK_MAGIC           0xdeaf1eed
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname)                                     \
-       .dep_map = {                                                    \
-               .name = #lockname,                                      \
-               .wait_type_inner = LD_WAIT_CONFIG,                      \
-       }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
 #ifdef CONFIG_DEBUG_SPINLOCK
 #define __RW_LOCK_UNLOCKED(lockname)                                   \
        (rwlock_t)      {       .raw_lock = __ARCH_RW_LOCK_UNLOCKED,    \
@@ -46,4 +50,29 @@ typedef struct {
 
 #define DEFINE_RWLOCK(x)       rwlock_t x = __RW_LOCK_UNLOCKED(x)
 
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+typedef struct {
+       struct rwbase_rt        rwbase;
+       atomic_t                readers;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+} rwlock_t;
+
+#define __RWLOCK_RT_INITIALIZER(name)                                  \
+{                                                                      \
+       .rwbase = __RWBASE_INITIALIZER(name),                           \
+       RW_DEP_MAP_INIT(name)                                           \
+}
+
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
+
+#define DEFINE_RWLOCK(name)                                            \
+       rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
 #endif /* __LINUX_RWLOCK_TYPES_H */
index a66038d..426e98e 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
 #include <linux/err.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname)                        \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_SLEEP,       \
+       },
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 #include <linux/osq_lock.h>
 #endif
@@ -64,16 +77,6 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 
 /* Common initializer macros and functions */
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname)                        \
-       .dep_map = {                                    \
-               .name = #lockname,                      \
-               .wait_type_inner = LD_WAIT_SLEEP,       \
-       },
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
 #ifdef CONFIG_DEBUG_RWSEMS
 # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
 #else
@@ -119,6 +122,61 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
        return !list_empty(&sem->wait_list);
 }
 
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+struct rw_semaphore {
+       struct rwbase_rt        rwbase;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name)                              \
+       {                                                       \
+               .rwbase = __RWBASE_INITIALIZER(name),           \
+               __RWSEM_DEP_MAP_INIT(name)                      \
+       }
+
+#define DECLARE_RWSEM(lockname) \
+       struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void  __rwsem_init(struct rw_semaphore *rwsem, const char *name,
+                         struct lock_class_key *key);
+#else
+static inline void  __rwsem_init(struct rw_semaphore *rwsem, const char *name,
+                                struct lock_class_key *key)
+{
+}
+#endif
+
+#define init_rwsem(sem)                                                \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       init_rwbase_rt(&(sem)->rwbase);                 \
+       __rwsem_init((sem), #sem, &__key);                      \
+} while (0)
+
+static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+       return rw_base_is_locked(&sem->rwbase);
+}
+
+static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+       return rw_base_is_contended(&sem->rwbase);
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * The functions below are the same for all rwsem implementations including
+ * the RT specific variant.
+ */
+
 /*
  * lock for reading
  */
index ec8d07d..1780260 100644 (file)
@@ -95,7 +95,9 @@ struct task_group;
 #define TASK_WAKING                    0x0200
 #define TASK_NOLOAD                    0x0400
 #define TASK_NEW                       0x0800
-#define TASK_STATE_MAX                 0x1000
+/* RT specific auxilliary flag to mark RT lock waiters */
+#define TASK_RTLOCK_WAIT               0x1000
+#define TASK_STATE_MAX                 0x2000
 
 /* Convenience macros for the sake of set_current_state: */
 #define TASK_KILLABLE                  (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -121,8 +123,6 @@ struct task_group;
 
 #define task_is_stopped_or_traced(task)        ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-
 /*
  * Special states are those that do not use the normal wait-loop pattern. See
  * the comment with set_special_state().
@@ -130,30 +130,37 @@ struct task_group;
 #define is_special_task_state(state)                           \
        ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
 
-#define __set_current_state(state_value)                       \
-       do {                                                    \
-               WARN_ON_ONCE(is_special_task_state(state_value));\
-               current->task_state_change = _THIS_IP_;         \
-               WRITE_ONCE(current->__state, (state_value));    \
-       } while (0)
-
-#define set_current_state(state_value)                         \
-       do {                                                    \
-               WARN_ON_ONCE(is_special_task_state(state_value));\
-               current->task_state_change = _THIS_IP_;         \
-               smp_store_mb(current->__state, (state_value));  \
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define debug_normal_state_change(state_value)                                \
+       do {                                                            \
+               WARN_ON_ONCE(is_special_task_state(state_value));       \
+               current->task_state_change = _THIS_IP_;                 \
        } while (0)
 
-#define set_special_state(state_value)                                 \
+# define debug_special_state_change(state_value)                       \
        do {                                                            \
-               unsigned long flags; /* may shadow */                   \
                WARN_ON_ONCE(!is_special_task_state(state_value));      \
-               raw_spin_lock_irqsave(&current->pi_lock, flags);        \
                current->task_state_change = _THIS_IP_;                 \
-               WRITE_ONCE(current->__state, (state_value));            \
-               raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
        } while (0)
+
+# define debug_rtlock_wait_set_state()                                 \
+       do {                                                             \
+               current->saved_state_change = current->task_state_change;\
+               current->task_state_change = _THIS_IP_;                  \
+       } while (0)
+
+# define debug_rtlock_wait_restore_state()                             \
+       do {                                                             \
+               current->task_state_change = current->saved_state_change;\
+       } while (0)
+
 #else
+# define debug_normal_state_change(cond)       do { } while (0)
+# define debug_special_state_change(cond)      do { } while (0)
+# define debug_rtlock_wait_set_state()         do { } while (0)
+# define debug_rtlock_wait_restore_state()     do { } while (0)
+#endif
+
 /*
  * set_current_state() includes a barrier so that the write of current->state
  * is correctly serialised wrt the caller's subsequent test of whether to
@@ -192,26 +199,77 @@ struct task_group;
  * Also see the comments of try_to_wake_up().
  */
 #define __set_current_state(state_value)                               \
-       WRITE_ONCE(current->__state, (state_value))
+       do {                                                            \
+               debug_normal_state_change((state_value));               \
+               WRITE_ONCE(current->__state, (state_value));            \
+       } while (0)
 
 #define set_current_state(state_value)                                 \
-       smp_store_mb(current->__state, (state_value))
+       do {                                                            \
+               debug_normal_state_change((state_value));               \
+               smp_store_mb(current->__state, (state_value));          \
+       } while (0)
 
 /*
  * set_special_state() should be used for those states when the blocking task
  * can not use the regular condition based wait-loop. In that case we must
- * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
- * will not collide with our state change.
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING
+ * stores will not collide with our state change.
  */
 #define set_special_state(state_value)                                 \
        do {                                                            \
                unsigned long flags; /* may shadow */                   \
+                                                                       \
                raw_spin_lock_irqsave(&current->pi_lock, flags);        \
+               debug_special_state_change((state_value));              \
                WRITE_ONCE(current->__state, (state_value));            \
                raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
        } while (0)
 
-#endif
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired.  These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ *     current_save_and_set_rtlock_wait_state();
+ *     for (;;) {
+ *             if (try_lock())
+ *                     break;
+ *             raw_spin_unlock_irq(&lock->wait_lock);
+ *             schedule_rtlock();
+ *             raw_spin_lock_irq(&lock->wait_lock);
+ *             set_current_state(TASK_RTLOCK_WAIT);
+ *     }
+ *     current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state()                       \
+       do {                                                            \
+               lockdep_assert_irqs_disabled();                         \
+               raw_spin_lock(&current->pi_lock);                       \
+               current->saved_state = current->__state;                \
+               debug_rtlock_wait_set_state();                          \
+               WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);         \
+               raw_spin_unlock(&current->pi_lock);                     \
+       } while (0);
+
+#define current_restore_rtlock_saved_state()                           \
+       do {                                                            \
+               lockdep_assert_irqs_disabled();                         \
+               raw_spin_lock(&current->pi_lock);                       \
+               debug_rtlock_wait_restore_state();                      \
+               WRITE_ONCE(current->__state, current->saved_state);     \
+               current->saved_state = TASK_RUNNING;                    \
+               raw_spin_unlock(&current->pi_lock);                     \
+       } while (0);
 
 #define get_current_state()    READ_ONCE(current->__state)
 
@@ -230,6 +288,9 @@ extern long schedule_timeout_idle(long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 asmlinkage void preempt_schedule_irq(void);
+#ifdef CONFIG_PREEMPT_RT
+ extern void schedule_rtlock(void);
+#endif
 
 extern int __must_check io_schedule_prepare(void);
 extern void io_schedule_finish(int token);
@@ -668,6 +729,11 @@ struct task_struct {
 #endif
        unsigned int                    __state;
 
+#ifdef CONFIG_PREEMPT_RT
+       /* saved state for "spinlock sleepers" */
+       unsigned int                    saved_state;
+#endif
+
        /*
         * This begins the randomizable portion of task_struct. Only
         * scheduling-critical items should be added above here.
@@ -748,6 +814,7 @@ struct task_struct {
        unsigned int                    policy;
        int                             nr_cpus_allowed;
        const cpumask_t                 *cpus_ptr;
+       cpumask_t                       *user_cpus_ptr;
        cpumask_t                       cpus_mask;
        void                            *migration_pending;
 #ifdef CONFIG_SMP
@@ -863,6 +930,10 @@ struct task_struct {
        /* Used by page_owner=on to detect recursion in page tracking. */
        unsigned                        in_page_owner:1;
 #endif
+#ifdef CONFIG_EVENTFD
+       /* Recursion prevention for eventfd_signal() */
+       unsigned                        in_eventfd_signal:1;
+#endif
 
        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 
@@ -1357,6 +1428,9 @@ struct task_struct {
        struct kmap_ctrl                kmap_ctrl;
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
        unsigned long                   task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+       unsigned long                   saved_state_change;
+# endif
 #endif
        int                             pagefault_disabled;
 #ifdef CONFIG_MMU
@@ -1400,6 +1474,16 @@ struct task_struct {
        struct llist_head               kretprobe_instances;
 #endif
 
+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
+       /*
+        * If L1D flush is supported on mm context switch
+        * then we use this callback head to queue kill work
+        * to kill tasks that are not running on SMT disabled
+        * cores
+        */
+       struct callback_head            l1d_flush_kill;
+#endif
+
        /*
         * New fields for task_struct should be added above here, so that
         * they are included in the randomized portion of task_struct.
@@ -1705,6 +1789,11 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
 #ifdef CONFIG_SMP
 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
+extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
+extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
+extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
 #else
 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
@@ -1715,6 +1804,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
                return -EINVAL;
        return 0;
 }
+static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
+{
+       if (src->user_cpus_ptr)
+               return -EINVAL;
+       return 0;
+}
+static inline void release_user_cpus_ptr(struct task_struct *p)
+{
+       WARN_ON(p->user_cpus_ptr);
+}
+
+static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+       return 0;
+}
 #endif
 
 extern int yield_to(struct task_struct *p, bool preempt);
index b9126fe..0310a5a 100644 (file)
@@ -714,6 +714,12 @@ static inline void unlock_task_sighand(struct task_struct *task,
        spin_unlock_irqrestore(&task->sighand->siglock, *flags);
 }
 
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_task_sighand_held(struct task_struct *task);
+#else
+static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
+#endif
+
 static inline unsigned long task_rlimit(const struct task_struct *task,
                unsigned int limit)
 {
index db2c0f3..304f431 100644 (file)
@@ -28,30 +28,12 @@ enum { sysctl_hung_task_timeout_secs = 0 };
 
 extern unsigned int sysctl_sched_child_runs_first;
 
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-
 enum sched_tunable_scaling {
        SCHED_TUNABLESCALING_NONE,
        SCHED_TUNABLESCALING_LOG,
        SCHED_TUNABLESCALING_LINEAR,
        SCHED_TUNABLESCALING_END,
 };
-extern unsigned int sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_size;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern __read_mostly unsigned int sysctl_sched_migration_cost;
-extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-
-extern int sysctl_resched_latency_warn_ms;
-extern int sysctl_resched_latency_warn_once;
-#endif
 
 /*
  *  control realtime throttling:
index 26a2013..06cd8fb 100644 (file)
@@ -42,8 +42,11 @@ struct wake_q_head {
 
 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
 
-#define DEFINE_WAKE_Q(name)                            \
-       struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+#define WAKE_Q_HEAD_INITIALIZER(name)                          \
+       { WAKE_Q_TAIL, &name.first }
+
+#define DEFINE_WAKE_Q(name)                                    \
+       struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name)
 
 static inline void wake_q_init(struct wake_q_head *head)
 {
index 24eda04..5b72885 100644 (file)
@@ -120,10 +120,11 @@ enum lockdown_reason {
        LOCKDOWN_MMIOTRACE,
        LOCKDOWN_DEBUGFS,
        LOCKDOWN_XMON_WR,
+       LOCKDOWN_BPF_WRITE_USER,
        LOCKDOWN_INTEGRITY_MAX,
        LOCKDOWN_KCORE,
        LOCKDOWN_KPROBES,
-       LOCKDOWN_BPF_READ,
+       LOCKDOWN_BPF_READ_KERNEL,
        LOCKDOWN_PERF,
        LOCKDOWN_TRACEFS,
        LOCKDOWN_XMON_RW,
index 52d7fb9..c58cc14 100644 (file)
@@ -518,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
        if (sysrq_ch)
                handle_sysrq(sysrq_ch);
 }
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+               unsigned long flags)
+{
+       int sysrq_ch;
+
+       if (!port->has_sysrq) {
+               spin_unlock_irqrestore(&port->lock, flags);
+               return;
+       }
+
+       sysrq_ch = port->sysrq_ch;
+       port->sysrq_ch = 0;
+
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       if (sysrq_ch)
+               handle_sysrq(sysrq_ch);
+}
 #else  /* CONFIG_MAGIC_SYSRQ_SERIAL */
 static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
 {
@@ -531,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
 {
        spin_unlock(&port->lock);
 }
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+               unsigned long flags)
+{
+       spin_unlock_irqrestore(&port->lock, flags);
+}
 #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
 
 /*
index 0d8e3dc..d3c1a42 100644 (file)
@@ -421,6 +421,9 @@ extern int __sys_accept4_file(struct file *file, unsigned file_flags,
                        struct sockaddr __user *upeer_sockaddr,
                         int __user *upeer_addrlen, int flags,
                         unsigned long nofile);
+extern struct file *do_accept(struct file *file, unsigned file_flags,
+                             struct sockaddr __user *upeer_sockaddr,
+                             int __user *upeer_addrlen, int flags);
 extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
                         int __user *upeer_addrlen, int flags);
 extern int __sys_socket(int family, int type, int protocol);
index 97b8d12..8371bca 100644 (file)
@@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
  *     not using a GPIO line)
  * @word_delay: delay to be inserted between consecutive
  *     words of a transfer
- *
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ *     deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ *     two delays will be added up.
  * @statistics: statistics for the spi_device
  *
  * A @spi_device is used to interchange data between an SPI slave
@@ -188,6 +192,10 @@ struct spi_device {
        int                     cs_gpio;        /* LEGACY: chip select gpio */
        struct gpio_desc        *cs_gpiod;      /* chip select gpio desc */
        struct spi_delay        word_delay; /* inter-word delay */
+       /* CS delays */
+       struct spi_delay        cs_setup;
+       struct spi_delay        cs_hold;
+       struct spi_delay        cs_inactive;
 
        /* the statistics */
        struct spi_statistics   statistics;
@@ -339,6 +347,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  * @max_speed_hz: Highest supported transfer speed
  * @flags: other constraints relevant to this driver
  * @slave: indicates that this is an SPI slave controller
+ * @devm_allocated: whether the allocation of this struct is devres-managed
  * @max_transfer_size: function that returns the max transfer size for
  *     a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
  * @max_message_size: function that returns the max message size for
@@ -412,11 +421,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  *          controller has native support for memory like operations.
  * @unprepare_message: undo any work done by prepare_message().
  * @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_setup: delay to be introduced by the controller after CS is asserted
- * @cs_hold: delay to be introduced by the controller before CS is deasserted
- * @cs_inactive: delay to be introduced by the controller after CS is
- *     deasserted. If @cs_change_delay is used from @spi_transfer, then the
- *     two delays will be added up.
  * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
  *     CS number. Any individual value may be -ENOENT for CS lines that
  *     are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
@@ -511,7 +515,7 @@ struct spi_controller {
 
 #define SPI_MASTER_GPIO_SS             BIT(5)  /* GPIO CS must select slave */
 
-       /* flag indicating this is a non-devres managed controller */
+       /* flag indicating if the allocation of this struct is devres-managed */
        bool                    devm_allocated;
 
        /* flag indicating this is an SPI slave controller */
@@ -550,8 +554,7 @@ struct spi_controller {
         * to configure specific CS timing through spi_set_cs_timing() after
         * spi_setup().
         */
-       int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
-                            struct spi_delay *hold, struct spi_delay *inactive);
+       int (*set_cs_timing)(struct spi_device *spi);
 
        /* bidirectional bulk transfers
         *
@@ -638,11 +641,6 @@ struct spi_controller {
        /* Optimized handlers for SPI memory-like operations. */
        const struct spi_controller_mem_ops *mem_ops;
 
-       /* CS delays */
-       struct spi_delay        cs_setup;
-       struct spi_delay        cs_hold;
-       struct spi_delay        cs_inactive;
-
        /* gpio chip select */
        int                     *cs_gpios;
        struct gpio_desc        **cs_gpiods;
index 7989784..45310ea 100644 (file)
@@ -12,6 +12,8 @@
  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
  *                        initializers
  *
+ *  linux/spinlock_types_raw:
+ *                       The raw types and initializers
  *  linux/spinlock_types.h:
  *                        defines the generic type and initializers
  *
@@ -31,6 +33,8 @@
  *                        contains the generic, simplified UP spinlock type.
  *                        (which is an empty structure on non-debug builds)
  *
+ *  linux/spinlock_types_raw:
+ *                       The raw RT types and initializers
  *  linux/spinlock_types.h:
  *                        defines the generic type and initializers
  *
@@ -308,8 +312,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
        1 : ({ local_irq_restore(flags); 0; }); \
 })
 
-/* Include rwlock functions */
+#ifndef CONFIG_PREEMPT_RT
+/* Include rwlock functions for !RT */
 #include <linux/rwlock.h>
+#endif
 
 /*
  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -320,6 +326,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 # include <linux/spinlock_api_up.h>
 #endif
 
+/* Non PREEMPT_RT kernel, map to raw spinlocks: */
+#ifndef CONFIG_PREEMPT_RT
+
 /*
  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  */
@@ -454,6 +463,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
 
 #define assert_spin_locked(lock)       assert_raw_spin_locked(&(lock)->rlock)
 
+#else  /* !CONFIG_PREEMPT_RT */
+# include <linux/spinlock_rt.h>
+#endif /* CONFIG_PREEMPT_RT */
+
 /*
  * Pull the atomic_t declaration:
  * (asm-mips/atomic.h needs above definitions)
index 19a9be9..6b8e1a0 100644 (file)
@@ -187,6 +187,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
        return 0;
 }
 
+/* PREEMPT_RT has its own rwlock implementation */
+#ifndef CONFIG_PREEMPT_RT
 #include <linux/rwlock_api_smp.h>
+#endif
 
 #endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
new file mode 100644 (file)
index 0000000..835aeda
--- /dev/null
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+                               struct lock_class_key *key, bool percpu);
+#else
+static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+                               struct lock_class_key *key, bool percpu)
+{
+}
+#endif
+
+#define spin_lock_init(slock)                                  \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       rt_mutex_base_init(&(slock)->lock);                     \
+       __rt_spin_lock_init(slock, #slock, &__key, false);      \
+} while (0)
+
+#define local_spin_lock_init(slock)                            \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       rt_mutex_base_init(&(slock)->lock);                     \
+       __rt_spin_lock_init(slock, #slock, &__key, true);       \
+} while (0)
+
+extern void rt_spin_lock(spinlock_t *lock);
+extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
+extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
+extern void rt_spin_unlock(spinlock_t *lock);
+extern void rt_spin_lock_unlock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock);
+extern int rt_spin_trylock(spinlock_t *lock);
+
+static __always_inline void spin_lock(spinlock_t *lock)
+{
+       rt_spin_lock(lock);
+}
+
+#ifdef CONFIG_LOCKDEP
+# define __spin_lock_nested(lock, subclass)                            \
+       rt_spin_lock_nested(lock, subclass)
+
+# define __spin_lock_nest_lock(lock, nest_lock)                                \
+       do {                                                            \
+               typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+               rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);    \
+       } while (0)
+# define __spin_lock_irqsave_nested(lock, flags, subclass)     \
+       do {                                                    \
+               typecheck(unsigned long, flags);                \
+               flags = 0;                                      \
+               __spin_lock_nested(lock, subclass);             \
+       } while (0)
+
+#else
+ /*
+  * Always evaluate the 'subclass' argument to avoid that the compiler
+  * warns about set-but-not-used variables when building with
+  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+  */
+# define __spin_lock_nested(lock, subclass)    spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_irqsave_nested(lock, flags, subclass)     \
+       spin_lock_irqsave(((void)(subclass), (lock)), flags)
+#endif
+
+#define spin_lock_nested(lock, subclass)               \
+       __spin_lock_nested(lock, subclass)
+
+#define spin_lock_nest_lock(lock, nest_lock)           \
+       __spin_lock_nest_lock(lock, nest_lock)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass)        \
+       __spin_lock_irqsave_nested(lock, flags, subclass)
+
+static __always_inline void spin_lock_bh(spinlock_t *lock)
+{
+       /* Investigate: Drop bh when blocking ? */
+       local_bh_disable();
+       rt_spin_lock(lock);
+}
+
+static __always_inline void spin_lock_irq(spinlock_t *lock)
+{
+       rt_spin_lock(lock);
+}
+
+#define spin_lock_irqsave(lock, flags)                  \
+       do {                                             \
+               typecheck(unsigned long, flags);         \
+               flags = 0;                               \
+               spin_lock(lock);                         \
+       } while (0)
+
+static __always_inline void spin_unlock(spinlock_t *lock)
+{
+       rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
+{
+       rt_spin_unlock(lock);
+       local_bh_enable();
+}
+
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
+{
+       rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
+                                                  unsigned long flags)
+{
+       rt_spin_unlock(lock);
+}
+
+#define spin_trylock(lock)                             \
+       __cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock_bh(lock)                          \
+       __cond_lock(lock, rt_spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock)                         \
+       __cond_lock(lock, rt_spin_trylock(lock))
+
+#define __spin_trylock_irqsave(lock, flags)            \
+({                                                     \
+       int __locked;                                   \
+                                                       \
+       typecheck(unsigned long, flags);                \
+       flags = 0;                                      \
+       __locked = spin_trylock(lock);                  \
+       __locked;                                       \
+})
+
+#define spin_trylock_irqsave(lock, flags)              \
+       __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
+
+#define spin_is_contended(lock)                (((void)(lock), 0))
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+       return rt_mutex_base_is_locked(&lock->lock);
+}
+
+#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
+
+#include <linux/rwlock_rt.h>
+
+#endif
index b981caa..2dfa35f 100644 (file)
@@ -9,65 +9,11 @@
  * Released under the General Public License (GPL).
  */
 
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep_types.h>
+#include <linux/spinlock_types_raw.h>
 
-typedef struct raw_spinlock {
-       arch_spinlock_t raw_lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned int magic, owner_cpu;
-       void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC         0xdead4ead
-
-#define SPINLOCK_OWNER_INIT    ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RAW_SPIN_DEP_MAP_INIT(lockname)               \
-       .dep_map = {                                    \
-               .name = #lockname,                      \
-               .wait_type_inner = LD_WAIT_SPIN,        \
-       }
-# define SPIN_DEP_MAP_INIT(lockname)                   \
-       .dep_map = {                                    \
-               .name = #lockname,                      \
-               .wait_type_inner = LD_WAIT_CONFIG,      \
-       }
-#else
-# define RAW_SPIN_DEP_MAP_INIT(lockname)
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname)             \
-       .magic = SPINLOCK_MAGIC,                \
-       .owner_cpu = -1,                        \
-       .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname)  \
-       {                                       \
-       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
-       SPIN_DEBUG_INIT(lockname)               \
-       RAW_SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname)     \
-       (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+#ifndef CONFIG_PREEMPT_RT
 
+/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
 typedef struct spinlock {
        union {
                struct raw_spinlock rlock;
@@ -96,6 +42,35 @@ typedef struct spinlock {
 
 #define DEFINE_SPINLOCK(x)     spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
 
+#else /* !CONFIG_PREEMPT_RT */
+
+/* PREEMPT_RT kernels map spinlock to rt_mutex */
+#include <linux/rtmutex.h>
+
+typedef struct spinlock {
+       struct rt_mutex_base    lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+} spinlock_t;
+
+#define __SPIN_LOCK_UNLOCKED(name)                             \
+       {                                                       \
+               .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+               SPIN_DEP_MAP_INIT(name)                         \
+       }
+
+#define __LOCAL_SPIN_LOCK_UNLOCKED(name)                       \
+       {                                                       \
+               .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+               LOCAL_SPIN_DEP_MAP_INIT(name)                   \
+       }
+
+#define DEFINE_SPINLOCK(name)                                  \
+       spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
 #include <linux/rwlock_types.h>
 
 #endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
new file mode 100644 (file)
index 0000000..91cb36b
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+
+#include <linux/types.h>
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep_types.h>
+
+typedef struct raw_spinlock {
+       arch_spinlock_t raw_lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+       unsigned int magic, owner_cpu;
+       void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC         0xdead4ead
+
+#define SPINLOCK_OWNER_INIT    ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RAW_SPIN_DEP_MAP_INIT(lockname)               \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_SPIN,        \
+       }
+# define SPIN_DEP_MAP_INIT(lockname)                   \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_CONFIG,      \
+       }
+
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)             \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_CONFIG,      \
+               .lock_type = LD_LOCK_PERCPU,            \
+       }
+#else
+# define RAW_SPIN_DEP_MAP_INIT(lockname)
+# define SPIN_DEP_MAP_INIT(lockname)
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname)             \
+       .magic = SPINLOCK_MAGIC,                \
+       .owner_cpu = -1,                        \
+       .owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname)  \
+{                                              \
+       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
+       SPIN_DEBUG_INIT(lockname)               \
+       RAW_SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname)     \
+       (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x)  raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */
index 0e0cf4d..6cfaa0a 100644 (file)
@@ -61,7 +61,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
        int idx;
 
        idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
-       WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
+       WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
        return idx;
 }
 
@@ -81,11 +81,11 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
 {
        int idx;
 
-       idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
+       idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
        pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
                 tt, tf, idx,
-                READ_ONCE(ssp->srcu_lock_nesting[!idx]),
-                READ_ONCE(ssp->srcu_lock_nesting[idx]));
+                data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
+                data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])));
 }
 
 #endif
index fc94faa..3e56a97 100644 (file)
  *   DECLARE_STATIC_CALL(name, func);
  *   DEFINE_STATIC_CALL(name, func);
  *   DEFINE_STATIC_CALL_NULL(name, typename);
+ *   DEFINE_STATIC_CALL_RET0(name, typename);
+ *
+ *   __static_call_return0;
+ *
  *   static_call(name)(args...);
  *   static_call_cond(name)(args...);
  *   static_call_update(name, func);
  *   static_call_query(name);
  *
+ *   EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
+ *
  * Usage example:
  *
  *   # Start with the following functions (with identical prototypes):
  *   To query which function is currently set to be called, use:
  *
  *   func = static_call_query(name);
+ *
+ *
+ * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
+ *
+ *   Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
+ *   conditional void function call, DEFINE_STATIC_CALL_RET0 /
+ *   __static_call_return0 optimize the do nothing return 0 function.
+ *
+ *   This feature is strictly UB per the C standard (since it casts a function
+ *   pointer to a different signature) and relies on the architecture ABI to
+ *   make things work. In particular it relies on Caller Stack-cleanup and the
+ *   whole return register being clobbered for short return values. All normal
+ *   CDECL style ABIs conform.
+ *
+ *   In particular the x86_64 implementation replaces the 5 byte CALL
+ *   instruction at the callsite with a 5 byte clear of the RAX register,
+ *   completely eliding any function call overhead.
+ *
+ *   Notably argument setup is unconditional.
+ *
+ *
+ * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
+ *
+ *   The difference is that the _TRAMP variant tries to only export the
+ *   trampoline with the result that a module can use static_call{,_cond}() but
+ *   not static_call_update().
+ *
  */
 
 #include <linux/types.h>
index 938c2bf..02117ed 100644 (file)
@@ -20,6 +20,7 @@ enum rpc_auth_flavors {
        RPC_AUTH_DES   = 3,
        RPC_AUTH_KRB   = 4,
        RPC_AUTH_GSS   = 6,
+       RPC_AUTH_TLS   = 7,
        RPC_AUTH_MAXFLAVOR = 8,
        /* pseudoflavors: */
        RPC_AUTH_GSS_KRB5  = 390003,
index e91d51e..f0f846f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sunrpc/svcauth.h>
 #include <linux/wait.h>
 #include <linux/mm.h>
+#include <linux/pagevec.h>
 
 /* statistics for svc_pool structures */
 struct svc_pool_stats {
@@ -256,6 +257,7 @@ struct svc_rqst {
        struct page *           *rq_next_page; /* next reply page to use */
        struct page *           *rq_page_end;  /* one past the last page */
 
+       struct pagevec          rq_pvec;
        struct kvec             rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
        struct bio_vec          rq_bvec[RPCSVC_MAXPAGES];
 
@@ -502,6 +504,8 @@ struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
                                        struct svc_pool *pool, int node);
 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
                                        struct svc_pool *pool, int node);
+void              svc_rqst_replace_page(struct svc_rqst *rqstp,
+                                        struct page *page);
 void              svc_rqst_free(struct svc_rqst *);
 void              svc_exit_thread(struct svc_rqst *);
 unsigned int      svc_pool_map_get(void);
@@ -523,6 +527,7 @@ void                   svc_wake_up(struct svc_serv *);
 void              svc_reserve(struct svc_rqst *rqstp, int space);
 struct svc_pool *  svc_pool_for_cpu(struct svc_serv *serv, int cpu);
 char *            svc_print_addr(struct svc_rqst *, char *, size_t);
+const char *      svc_proc_name(const struct svc_rqst *rqstp);
 int               svc_encode_result_payload(struct svc_rqst *rqstp,
                                             unsigned int offset,
                                             unsigned int length);
index 3184465..24aa159 100644 (file)
@@ -90,9 +90,9 @@ struct svcxprt_rdma {
        struct ib_pd         *sc_pd;
 
        spinlock_t           sc_send_lock;
-       struct list_head     sc_send_ctxts;
+       struct llist_head    sc_send_ctxts;
        spinlock_t           sc_rw_ctxt_lock;
-       struct list_head     sc_rw_ctxts;
+       struct llist_head    sc_rw_ctxts;
 
        u32                  sc_pending_recvs;
        u32                  sc_recv_batch;
@@ -150,7 +150,7 @@ struct svc_rdma_recv_ctxt {
 };
 
 struct svc_rdma_send_ctxt {
-       struct list_head        sc_list;
+       struct llist_node       sc_node;
        struct rpc_rdma_cid     sc_cid;
 
        struct ib_send_wr       sc_send_wr;
@@ -207,6 +207,7 @@ extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
                                    struct svc_rdma_send_ctxt *sctxt,
                                    struct svc_rdma_recv_ctxt *rctxt,
                                    int status);
+extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail);
 extern int svc_rdma_sendto(struct svc_rqst *);
 extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
                                   unsigned int length);
index a965cbc..b519609 100644 (file)
@@ -95,6 +95,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
 #define        rpc_auth_unix   cpu_to_be32(RPC_AUTH_UNIX)
 #define        rpc_auth_short  cpu_to_be32(RPC_AUTH_SHORT)
 #define        rpc_auth_gss    cpu_to_be32(RPC_AUTH_GSS)
+#define        rpc_auth_tls    cpu_to_be32(RPC_AUTH_TLS)
 
 #define        rpc_call        cpu_to_be32(RPC_CALL)
 #define        rpc_reply       cpu_to_be32(RPC_REPLY)
index c8c39f2..b15c1f0 100644 (file)
@@ -288,7 +288,6 @@ struct rpc_xprt {
        const char              *address_strings[RPC_DISPLAY_MAX];
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        struct dentry           *debugfs;               /* debugfs directory */
-       atomic_t                inject_disconnect;
 #endif
        struct rcu_head         rcu;
        const struct xprt_class *xprt_class;
@@ -502,21 +501,4 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
        return test_and_set_bit(XPRT_BINDING, &xprt->state);
 }
 
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-extern unsigned int rpc_inject_disconnect;
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
-       if (!rpc_inject_disconnect)
-               return;
-       if (atomic_dec_return(&xprt->inject_disconnect))
-               return;
-       atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
-       xprt->ops->inject_disconnect(xprt);
-}
-#else
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
-}
-#endif
-
 #endif /* _LINUX_SUNRPC_XPRT_H */
index d99ca99..1fa2b69 100644 (file)
@@ -48,6 +48,8 @@ typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
                size_t *lenp, loff_t *ppos);
 
 int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dobool(struct ctl_table *table, int write, void *buffer,
+               size_t *lenp, loff_t *ppos);
 int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *);
 int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
 int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
index 54269e4..3ebfea0 100644 (file)
@@ -27,6 +27,7 @@
 #define TEE_SHM_USER_MAPPED    BIT(4)  /* Memory mapped in user space */
 #define TEE_SHM_POOL           BIT(5)  /* Memory allocated from pool */
 #define TEE_SHM_KERNEL_MAPPED  BIT(6)  /* Memory mapped in kernel space */
+#define TEE_SHM_PRIV           BIT(7)  /* Memory private to TEE driver */
 
 struct device;
 struct tee_device;
@@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev);
  * @returns a pointer to 'struct tee_shm'
  */
 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
 
 /**
  * tee_shm_register() - Register shared memory buffer
index dcc30a5..4a25e0c 100644 (file)
@@ -4,6 +4,22 @@
 
 #include <linux/math.h>
 
+/* Metric prefixes in accordance with Système international (d'unités) */
+#define PETA   1000000000000000ULL
+#define TERA   1000000000000ULL
+#define GIGA   1000000000UL
+#define MEGA   1000000UL
+#define KILO   1000UL
+#define HECTO  100UL
+#define DECA   10UL
+#define DECI   10UL
+#define CENTI  100UL
+#define MILLI  1000UL
+#define MICRO  1000000UL
+#define NANO   1000000000UL
+#define PICO   1000000000000ULL
+#define FEMTO  1000000000000000ULL
+
 #define MILLIWATT_PER_WATT     1000L
 #define MICROWATT_PER_MILLIWATT        1000L
 #define MICROWATT_PER_WATT     1000000L
index 3aee78d..784659d 100644 (file)
@@ -196,6 +196,7 @@ struct otg_fsm {
        struct mutex lock;
        u8 *host_req_flag;
        struct delayed_work hnp_polling_work;
+       bool hnp_work_inited;
        bool state_changed;
 };
 
index 3357ac9..8cfe49d 100644 (file)
@@ -277,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
                                        size_t size, const char *name);
 
+/**
+ * vdpa_alloc_device - allocate and initilaize a vDPA device
+ *
+ * @dev_struct: the type of the parent structure
+ * @member: the name of struct vdpa_device within the @dev_struct
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @name: name of the vdpa device
+ *
+ * Return allocated data structure or ERR_PTR upon error
+ */
 #define vdpa_alloc_device(dev_struct, member, parent, config, name)   \
                          container_of(__vdpa_alloc_device( \
                                       parent, config, \
index b1894e0..41edbc0 100644 (file)
@@ -110,6 +110,7 @@ struct virtio_device {
        bool config_enabled;
        bool config_change_pending;
        spinlock_t config_lock;
+       spinlock_t vqs_list_lock; /* Protects VQs list access */
        struct device dev;
        struct virtio_device_id id;
        const struct virtio_config_ops *config;
index 84db7b8..212892c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/virtio_byteorder.h>
 #include <linux/uio.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
 #include <linux/dma-direction.h>
 #include <linux/vhost_iotlb.h>
index 6598ae3..93dab0e 100644 (file)
@@ -56,7 +56,7 @@ struct task_struct;
 
 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                                  \
        .lock           = __SPIN_LOCK_UNLOCKED(name.lock),                      \
-       .head           = { &(name).head, &(name).head } }
+       .head           = LIST_HEAD_INIT(name.head) }
 
 #define DECLARE_WAIT_QUEUE_HEAD(name) \
        struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
index 667e86c..270677d 100644 (file)
@@ -336,14 +336,9 @@ static inline void cgroup_writeback_umount(void)
 /*
  * mm/page-writeback.c
  */
-#ifdef CONFIG_BLOCK
 void laptop_io_completion(struct backing_dev_info *info);
 void laptop_sync_completion(void);
-void laptop_mode_sync(struct work_struct *work);
 void laptop_mode_timer_fn(struct timer_list *t);
-#else
-static inline void laptop_sync_completion(void) { }
-#endif
 bool node_dirty_ok(struct pglist_data *pgdat);
 int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
 #ifdef CONFIG_CGROUP_WRITEBACK
index b77f39f..29db736 100644 (file)
 #define __LINUX_WW_MUTEX_H
 
 #include <linux/mutex.h>
+#include <linux/rtmutex.h>
+
+#if defined(CONFIG_DEBUG_MUTEXES) || \
+   (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES))
+#define DEBUG_WW_MUTEXES
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+#define WW_MUTEX_BASE                  mutex
+#define ww_mutex_base_init(l,n,k)      __mutex_init(l,n,k)
+#define ww_mutex_base_trylock(l)       mutex_trylock(l)
+#define ww_mutex_base_is_locked(b)     mutex_is_locked((b))
+#else
+#define WW_MUTEX_BASE                  rt_mutex
+#define ww_mutex_base_init(l,n,k)      __rt_mutex_init(l,n,k)
+#define ww_mutex_base_trylock(l)       rt_mutex_trylock(l)
+#define ww_mutex_base_is_locked(b)     rt_mutex_base_is_locked(&(b)->rtmutex)
+#endif
 
 struct ww_class {
        atomic_long_t stamp;
@@ -28,16 +46,24 @@ struct ww_class {
        unsigned int is_wait_die;
 };
 
+struct ww_mutex {
+       struct WW_MUTEX_BASE base;
+       struct ww_acquire_ctx *ctx;
+#ifdef DEBUG_WW_MUTEXES
+       struct ww_class *ww_class;
+#endif
+};
+
 struct ww_acquire_ctx {
        struct task_struct *task;
        unsigned long stamp;
        unsigned int acquired;
        unsigned short wounded;
        unsigned short is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        unsigned int done_acquire;
        struct ww_class *ww_class;
-       struct ww_mutex *contending_lock;
+       void *contending_lock;
 #endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map dep_map;
@@ -74,9 +100,9 @@ struct ww_acquire_ctx {
 static inline void ww_mutex_init(struct ww_mutex *lock,
                                 struct ww_class *ww_class)
 {
-       __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
+       ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
        lock->ctx = NULL;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        lock->ww_class = ww_class;
 #endif
 }
@@ -113,7 +139,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
        ctx->acquired = 0;
        ctx->wounded = false;
        ctx->is_wait_die = ww_class->is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        ctx->ww_class = ww_class;
        ctx->done_acquire = 0;
        ctx->contending_lock = NULL;
@@ -143,7 +169,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
  */
 static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        lockdep_assert_held(ctx);
 
        DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
@@ -163,7 +189,7 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        mutex_release(&ctx->dep_map, _THIS_IP_);
 #endif
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        DEBUG_LOCKS_WARN_ON(ctx->acquired);
        if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
                /*
@@ -269,7 +295,7 @@ static inline void
 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        int ret;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
 #endif
        ret = ww_mutex_lock(lock, ctx);
@@ -305,7 +331,7 @@ static inline int __must_check
 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
                                 struct ww_acquire_ctx *ctx)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
 #endif
        return ww_mutex_lock_interruptible(lock, ctx);
@@ -322,7 +348,7 @@ extern void ww_mutex_unlock(struct ww_mutex *lock);
  */
 static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
 {
-       return mutex_trylock(&lock->base);
+       return ww_mutex_base_trylock(&lock->base);
 }
 
 /***
@@ -335,7 +361,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
  */
 static inline void ww_mutex_destroy(struct ww_mutex *lock)
 {
+#ifndef CONFIG_PREEMPT_RT
        mutex_destroy(&lock->base);
+#endif
 }
 
 /**
@@ -346,7 +374,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock)
  */
 static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
 {
-       return mutex_is_locked(&lock->base);
+       return ww_mutex_base_is_locked(&lock->base);
 }
 
 #endif
index a53e944..db4312e 100644 (file)
@@ -1230,6 +1230,7 @@ struct hci_dev *hci_alloc_dev(void);
 void hci_free_dev(struct hci_dev *hdev);
 int hci_register_dev(struct hci_dev *hdev);
 void hci_unregister_dev(struct hci_dev *hdev);
+void hci_cleanup_dev(struct hci_dev *hdev);
 int hci_suspend_dev(struct hci_dev *hdev);
 int hci_resume_dev(struct hci_dev *hdev);
 int hci_reset_dev(struct hci_dev *hdev);
index 69c9eab..1b9d75a 100644 (file)
@@ -293,7 +293,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
 }
 
 /**
- * flow_action_has_one_action() - check if exactly one action is present
+ * flow_offload_has_one_action() - check if exactly one action is present
  * @action: tc filter flow offload action
  *
  * Returns true if exactly one action is present.
@@ -319,14 +319,12 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
        if (flow_offload_has_one_action(action))
                return true;
 
-       if (action) {
-               flow_action_for_each(i, action_entry, action) {
-                       if (i && action_entry->hw_stats != last_hw_stats) {
-                               NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
-                               return false;
-                       }
-                       last_hw_stats = action_entry->hw_stats;
+       flow_action_for_each(i, action_entry, action) {
+               if (i && action_entry->hw_stats != last_hw_stats) {
+                       NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+                       return false;
                }
+               last_hw_stats = action_entry->hw_stats;
        }
        return true;
 }
index 15b7fbe..c412dde 100644 (file)
@@ -267,7 +267,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
        return false;
 }
 
-/* Function to safely get fn->sernum for passed in rt
+/* Function to safely get fn->fn_sernum for passed in rt
  * and store result in passed in cookie.
  * Return true if we can get cookie safely
  * Return false if not
@@ -282,7 +282,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
 
        if (fn) {
                *cookie = fn->fn_sernum;
-               /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
+               /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
                smp_rmb();
                status = true;
        }
index 625a38c..0bf09a9 100644 (file)
@@ -265,7 +265,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
 static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
-       int mtu;
+       unsigned int mtu;
 
        struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
                                inet6_sk(skb->sk) : NULL;
index 37e5300..fefd38d 100644 (file)
@@ -30,7 +30,6 @@ struct nf_tcp_net {
        u8 tcp_ignore_invalid_rst;
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        unsigned int offload_timeout;
-       unsigned int offload_pickup;
 #endif
 };
 
@@ -44,7 +43,6 @@ struct nf_udp_net {
        unsigned int timeouts[UDP_CT_MAX];
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        unsigned int offload_timeout;
-       unsigned int offload_pickup;
 #endif
 };
 
index e946366..1f4e181 100644 (file)
@@ -75,6 +75,7 @@ struct netns_xfrm {
 #endif
        spinlock_t              xfrm_state_lock;
        seqcount_spinlock_t     xfrm_state_hash_generation;
+       seqcount_spinlock_t     xfrm_policy_hash_generation;
 
        spinlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
index ec78239..298a8d1 100644 (file)
@@ -337,6 +337,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
 
 /**
  * struct tcf_pkt_info - packet information
+ *
+ * @ptr: start of the pkt data
+ * @nexthdr: offset of the next header
  */
 struct tcf_pkt_info {
        unsigned char *         ptr;
@@ -355,6 +358,7 @@ struct tcf_ematch_ops;
  * @ops: the operations lookup table of the corresponding ematch module
  * @datalen: length of the ematch specific configuration data
  * @data: ematch specific data
+ * @net: the network namespace
  */
 struct tcf_ematch {
        struct tcf_ematch_ops * ops;
index e328c51..0509d2d 100644 (file)
@@ -31,6 +31,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num);
 void psample_group_take(struct psample_group *group);
 void psample_group_put(struct psample_group *group);
 
+struct sk_buff;
+
 #if IS_ENABLED(CONFIG_PSAMPLE)
 
 void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
index b671b1f..8f58fd9 100644 (file)
@@ -94,6 +94,7 @@ struct btrfs_space_info;
        EM( FLUSH_DELAYED_ITEMS,        "FLUSH_DELAYED_ITEMS")          \
        EM( FLUSH_DELALLOC,             "FLUSH_DELALLOC")               \
        EM( FLUSH_DELALLOC_WAIT,        "FLUSH_DELALLOC_WAIT")          \
+       EM( FLUSH_DELALLOC_FULL,        "FLUSH_DELALLOC_FULL")          \
        EM( FLUSH_DELAYED_REFS_NR,      "FLUSH_DELAYED_REFS_NR")        \
        EM( FLUSH_DELAYED_REFS,         "FLUSH_ELAYED_REFS")            \
        EM( ALLOC_CHUNK,                "ALLOC_CHUNK")                  \
@@ -2037,7 +2038,7 @@ TRACE_EVENT(btrfs_convert_extent_bit,
 );
 
 DECLARE_EVENT_CLASS(btrfs_dump_space_info,
-       TP_PROTO(const struct btrfs_fs_info *fs_info,
+       TP_PROTO(struct btrfs_fs_info *fs_info,
                 const struct btrfs_space_info *sinfo),
 
        TP_ARGS(fs_info, sinfo),
@@ -2057,6 +2058,8 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info,
                __field(        u64,    delayed_refs_reserved   )
                __field(        u64,    delayed_reserved        )
                __field(        u64,    free_chunk_space        )
+               __field(        u64,    delalloc_bytes          )
+               __field(        u64,    ordered_bytes           )
        ),
 
        TP_fast_assign_btrfs(fs_info,
@@ -2074,6 +2077,8 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info,
                __entry->delayed_refs_reserved  =       fs_info->delayed_refs_rsv.reserved;
                __entry->delayed_reserved       =       fs_info->delayed_block_rsv.reserved;
                __entry->free_chunk_space       =       atomic64_read(&fs_info->free_chunk_space);
+               __entry->delalloc_bytes         =       percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+               __entry->ordered_bytes          =       percpu_counter_sum_positive(&fs_info->ordered_bytes);
        ),
 
        TP_printk_btrfs("flags=%s total_bytes=%llu bytes_used=%llu "
@@ -2081,7 +2086,8 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info,
                        "bytes_may_use=%llu bytes_readonly=%llu "
                        "reclaim_size=%llu clamp=%d global_reserved=%llu "
                        "trans_reserved=%llu delayed_refs_reserved=%llu "
-                       "delayed_reserved=%llu chunk_free_space=%llu",
+                       "delayed_reserved=%llu chunk_free_space=%llu "
+                       "delalloc_bytes=%llu ordered_bytes=%llu",
                        __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS),
                        __entry->total_bytes, __entry->bytes_used,
                        __entry->bytes_pinned, __entry->bytes_reserved,
@@ -2089,11 +2095,18 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info,
                        __entry->reclaim_size, __entry->clamp,
                        __entry->global_reserved, __entry->trans_reserved,
                        __entry->delayed_refs_reserved,
-                       __entry->delayed_reserved, __entry->free_chunk_space)
+                       __entry->delayed_reserved, __entry->free_chunk_space,
+                       __entry->delalloc_bytes, __entry->ordered_bytes)
 );
 
 DEFINE_EVENT(btrfs_dump_space_info, btrfs_done_preemptive_reclaim,
-       TP_PROTO(const struct btrfs_fs_info *fs_info,
+       TP_PROTO(struct btrfs_fs_info *fs_info,
+                const struct btrfs_space_info *sinfo),
+       TP_ARGS(fs_info, sinfo)
+);
+
+DEFINE_EVENT(btrfs_dump_space_info, btrfs_fail_all_tickets,
+       TP_PROTO(struct btrfs_fs_info *fs_info,
                 const struct btrfs_space_info *sinfo),
        TP_ARGS(fs_info, sinfo)
 );
index f980256..491098a 100644 (file)
@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(queue_to_disk(q));
+               __entry->dev            = disk_devt(q->disk);
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                strlcpy(__entry->type, type, sizeof(__entry->type));
                __entry->percentile     = percentile;
@@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(queue_to_disk(q));
+               __entry->dev            = disk_devt(q->disk);
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                __entry->depth          = depth;
        ),
@@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(queue_to_disk(q));
+               __entry->dev            = disk_devt(q->disk);
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
        ),
 
index 390270e..f160484 100644 (file)
@@ -48,7 +48,9 @@
        {(unsigned long)__GFP_WRITE,            "__GFP_WRITE"},         \
        {(unsigned long)__GFP_RECLAIM,          "__GFP_RECLAIM"},       \
        {(unsigned long)__GFP_DIRECT_RECLAIM,   "__GFP_DIRECT_RECLAIM"},\
-       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"}\
+       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"},\
+       {(unsigned long)__GFP_ZEROTAGS,         "__GFP_ZEROTAGS"},      \
+       {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
 
 #define show_gfp_flags(flags)                                          \
        (flags) ? __print_flags(flags, "|",                             \
index 861f199..d323f5a 100644 (file)
@@ -1642,7 +1642,7 @@ TRACE_EVENT(svc_process,
                __field(u32, vers)
                __field(u32, proc)
                __string(service, name)
-               __string(procedure, rqst->rq_procinfo->pc_name)
+               __string(procedure, svc_proc_name(rqst))
                __string(addr, rqst->rq_xprt ?
                         rqst->rq_xprt->xpt_remotebuf : "(null)")
        ),
@@ -1652,7 +1652,7 @@ TRACE_EVENT(svc_process,
                __entry->vers = rqst->rq_vers;
                __entry->proc = rqst->rq_proc;
                __assign_str(service, name);
-               __assign_str(procedure, rqst->rq_procinfo->pc_name);
+               __assign_str(procedure, svc_proc_name(rqst));
                __assign_str(addr, rqst->rq_xprt ?
                             rqst->rq_xprt->xpt_remotebuf : "(null)");
        ),
@@ -1918,7 +1918,7 @@ TRACE_EVENT(svc_stats_latency,
        TP_STRUCT__entry(
                __field(u32, xid)
                __field(unsigned long, execute)
-               __string(procedure, rqst->rq_procinfo->pc_name)
+               __string(procedure, svc_proc_name(rqst))
                __string(addr, rqst->rq_xprt->xpt_remotebuf)
        ),
 
@@ -1926,7 +1926,7 @@ TRACE_EVENT(svc_stats_latency,
                __entry->xid = be32_to_cpu(rqst->rq_xid);
                __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
                                                         rqst->rq_stime));
-               __assign_str(procedure, rqst->rq_procinfo->pc_name);
+               __assign_str(procedure, svc_proc_name(rqst));
                __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
        ),
 
index acc1719..08810a4 100644 (file)
@@ -102,6 +102,9 @@ TRACE_MAKE_SYSTEM_STR();
 #undef __string
 #define __string(item, src) __dynamic_array(char, item, -1)
 
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
 #undef __bitmask
 #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
 
@@ -197,6 +200,9 @@ TRACE_MAKE_SYSTEM_STR();
 #undef __string
 #define __string(item, src) __dynamic_array(char, item, -1)
 
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
 #undef __bitmask
 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 
@@ -459,6 +465,9 @@ static struct trace_event_functions trace_event_type_funcs_##call = {       \
 #undef __string
 #define __string(item, src) __dynamic_array(char, item, -1)
 
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
 #undef __bitmask
 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 
@@ -507,6 +516,9 @@ static struct trace_event_fields trace_event_fields_##call[] = {    \
 #define __string(item, src) __dynamic_array(char, item,                        \
                    strlen((src) ? (const char *)(src) : "(null)") + 1)
 
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
+
 /*
  * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
  * num_possible_cpus().
@@ -670,10 +682,20 @@ static inline notrace int trace_event_get_offsets_##call(         \
 #undef __string
 #define __string(item, src) __dynamic_array(char, item, -1)
 
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
 #undef __assign_str
 #define __assign_str(dst, src)                                         \
        strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
 
+#undef __assign_str_len
+#define __assign_str_len(dst, src, len)                                        \
+       do {                                                            \
+               memcpy(__get_str(dst), (src), (len));                   \
+               __get_str(dst)[len] = '\0';                             \
+       } while(0)
+
 #undef __bitmask
 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 
index 22cd037..d7d3cfe 100644 (file)
@@ -288,6 +288,7 @@ struct btrfs_ioctl_fs_info_args {
  * first mount when booting older kernel versions.
  */
 #define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID  (1ULL << 1)
+#define BTRFS_FEATURE_COMPAT_RO_VERITY                 (1ULL << 2)
 
 #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF   (1ULL << 0)
 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL  (1ULL << 1)
index ccdb40f..e1c4c73 100644 (file)
 #define BTRFS_INODE_REF_KEY            12
 #define BTRFS_INODE_EXTREF_KEY         13
 #define BTRFS_XATTR_ITEM_KEY           24
+
+/*
+ * fs verity items are stored under two different key types on disk.
+ * The descriptor items:
+ * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
+ *
+ * At offset 0, we store a btrfs_verity_descriptor_item which tracks the size
+ * of the descriptor item and some extra data for encryption.
+ * Starting at offset 1, these hold the generic fs verity descriptor.  The
+ * latter are opaque to btrfs, we just read and write them as a blob for the
+ * higher level verity code.  The most common descriptor size is 256 bytes.
+ *
+ * The merkle tree items:
+ * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
+ *
+ * These also start at offset 0, and correspond to the merkle tree bytes.  When
+ * fsverity asks for page 0 of the merkle tree, we pull up one page starting at
+ * offset 0 for this key type.  These are also opaque to btrfs, we're blindly
+ * storing whatever fsverity sends down.
+ */
+#define BTRFS_VERITY_DESC_ITEM_KEY     36
+#define BTRFS_VERITY_MERKLE_ITEM_KEY   37
+
 #define BTRFS_ORPHAN_ITEM_KEY          48
 /* reserve 2-15 close to the inode for later flexibility */
 
@@ -991,4 +1014,16 @@ struct btrfs_qgroup_limit_item {
        __le64 rsv_excl;
 } __attribute__ ((__packed__));
 
+struct btrfs_verity_descriptor_item {
+       /* Size of the verity descriptor in bytes */
+       __le64 size;
+       /*
+        * When we implement support for fscrypt, we will need to encrypt the
+        * Merkle tree for encrypted verity files. These 128 bits are for the
+        * eventual storage of an fscrypt initialization vector.
+        */
+       __le64 reserved[2];
+       __u8 encryption;
+} __attribute__ ((__packed__));
+
 #endif /* _BTRFS_CTREE_H_ */
diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h
new file mode 100644 (file)
index 0000000..2f869da
--- /dev/null
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * audio.h - DEPRECATED MPEG-TS audio decoder API
+ *
+ * NOTE: should not be used on future drivers
+ *
+ * Copyright (C) 2000 Ralph  Metzler <ralph@convergence.de>
+ *                  & Marcus Metzler <marcus@convergence.de>
+ *                    for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Lesser Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+#ifndef _DVBAUDIO_H_
+#define _DVBAUDIO_H_
+
+#include <linux/types.h>
+
+typedef enum {
+       AUDIO_SOURCE_DEMUX, /* Select the demux as the main source */
+       AUDIO_SOURCE_MEMORY /* Select internal memory as the main source */
+} audio_stream_source_t;
+
+
+typedef enum {
+       AUDIO_STOPPED,      /* Device is stopped */
+       AUDIO_PLAYING,      /* Device is currently playing */
+       AUDIO_PAUSED        /* Device is paused */
+} audio_play_state_t;
+
+
+typedef enum {
+       AUDIO_STEREO,
+       AUDIO_MONO_LEFT,
+       AUDIO_MONO_RIGHT,
+       AUDIO_MONO,
+       AUDIO_STEREO_SWAPPED
+} audio_channel_select_t;
+
+
+typedef struct audio_mixer {
+       unsigned int volume_left;
+       unsigned int volume_right;
+  /* what else do we need? bass, pass-through, ... */
+} audio_mixer_t;
+
+
+typedef struct audio_status {
+       int                    AV_sync_state;  /* sync audio and video? */
+       int                    mute_state;     /* audio is muted */
+       audio_play_state_t     play_state;     /* current playback state */
+       audio_stream_source_t  stream_source;  /* current stream source */
+       audio_channel_select_t channel_select; /* currently selected channel */
+       int                    bypass_mode;    /* pass on audio data to */
+       audio_mixer_t          mixer_state;    /* current mixer state */
+} audio_status_t;                              /* separate decoder hardware */
+
+
+/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
+#define AUDIO_CAP_DTS    1
+#define AUDIO_CAP_LPCM   2
+#define AUDIO_CAP_MP1    4
+#define AUDIO_CAP_MP2    8
+#define AUDIO_CAP_MP3   16
+#define AUDIO_CAP_AAC   32
+#define AUDIO_CAP_OGG   64
+#define AUDIO_CAP_SDDS 128
+#define AUDIO_CAP_AC3  256
+
+#define AUDIO_STOP                 _IO('o', 1)
+#define AUDIO_PLAY                 _IO('o', 2)
+#define AUDIO_PAUSE                _IO('o', 3)
+#define AUDIO_CONTINUE             _IO('o', 4)
+#define AUDIO_SELECT_SOURCE        _IO('o', 5)
+#define AUDIO_SET_MUTE             _IO('o', 6)
+#define AUDIO_SET_AV_SYNC          _IO('o', 7)
+#define AUDIO_SET_BYPASS_MODE      _IO('o', 8)
+#define AUDIO_CHANNEL_SELECT       _IO('o', 9)
+#define AUDIO_GET_STATUS           _IOR('o', 10, audio_status_t)
+
+#define AUDIO_GET_CAPABILITIES     _IOR('o', 11, unsigned int)
+#define AUDIO_CLEAR_BUFFER         _IO('o',  12)
+#define AUDIO_SET_ID               _IO('o', 13)
+#define AUDIO_SET_MIXER            _IOW('o', 14, audio_mixer_t)
+#define AUDIO_SET_STREAMTYPE       _IO('o', 15)
+#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
+
+#endif /* _DVBAUDIO_H_ */
diff --git a/include/uapi/linux/dvb/osd.h b/include/uapi/linux/dvb/osd.h
new file mode 100644 (file)
index 0000000..858997c
--- /dev/null
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * osd.h - DEPRECATED On Screen Display API
+ *
+ * NOTE: should not be used on future drivers
+ *
+ * Copyright (C) 2001 Ralph  Metzler <ralph@convergence.de>
+ *                  & Marcus Metzler <marcus@convergence.de>
+ *                    for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Lesser Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+#ifndef _DVBOSD_H_
+#define _DVBOSD_H_
+
+#include <linux/compiler.h>
+
+typedef enum {
+       /* All functions return -2 on "not open" */
+       OSD_Close = 1,  /* () */
+       /*
+        * Disables OSD and releases the buffers
+        * returns 0 on success
+        */
+       OSD_Open,       /* (x0,y0,x1,y1,BitPerPixel[2/4/8](color&0x0F),mix[0..15](color&0xF0)) */
+       /*
+        * Opens OSD with this size and bit depth
+        * returns 0 on success, -1 on DRAM allocation error, -2 on "already open"
+        */
+       OSD_Show,       /* () */
+       /*
+        * enables OSD mode
+        * returns 0 on success
+        */
+       OSD_Hide,       /* () */
+       /*
+        * disables OSD mode
+        * returns 0 on success
+        */
+       OSD_Clear,      /* () */
+       /*
+        * Sets all pixel to color 0
+        * returns 0 on success
+        */
+       OSD_Fill,       /* (color) */
+       /*
+        * Sets all pixel to color <col>
+        * returns 0 on success
+        */
+       OSD_SetColor,   /* (color,R{x0},G{y0},B{x1},opacity{y1}) */
+       /*
+        * set palette entry <num> to <r,g,b>, <mix> and <trans> apply
+        * R,G,B: 0..255
+        * R=Red, G=Green, B=Blue
+        * opacity=0:      pixel opacity 0% (only video pixel shows)
+        * opacity=1..254: pixel opacity as specified in header
+        * opacity=255:    pixel opacity 100% (only OSD pixel shows)
+        * returns 0 on success, -1 on error
+        */
+       OSD_SetPalette, /* (firstcolor{color},lastcolor{x0},data) */
+       /*
+        * Set a number of entries in the palette
+        * sets the entries "firstcolor" through "lastcolor" from the array "data"
+        * data has 4 byte for each color:
+        * R,G,B, and a opacity value: 0->transparent, 1..254->mix, 255->pixel
+        */
+       OSD_SetTrans,   /* (transparency{color}) */
+       /*
+        * Sets transparency of mixed pixel (0..15)
+        * returns 0 on success
+        */
+       OSD_SetPixel,   /* (x0,y0,color) */
+       /*
+        * sets pixel <x>,<y> to color number <col>
+        * returns 0 on success, -1 on error
+        */
+       OSD_GetPixel,   /* (x0,y0) */
+       /* returns color number of pixel <x>,<y>,  or -1 */
+       OSD_SetRow,     /* (x0,y0,x1,data) */
+       /*
+        * fills pixels x0,y through  x1,y with the content of data[]
+        * returns 0 on success, -1 on clipping all pixel (no pixel drawn)
+        */
+       OSD_SetBlock,   /* (x0,y0,x1,y1,increment{color},data) */
+       /*
+        * fills pixels x0,y0 through  x1,y1 with the content of data[]
+        * inc contains the width of one line in the data block,
+        * inc<=0 uses blockwidth as linewidth
+        * returns 0 on success, -1 on clipping all pixel
+        */
+       OSD_FillRow,    /* (x0,y0,x1,color) */
+       /*
+        * fills pixels x0,y through  x1,y with the color <col>
+        * returns 0 on success, -1 on clipping all pixel
+        */
+       OSD_FillBlock,  /* (x0,y0,x1,y1,color) */
+       /*
+        * fills pixels x0,y0 through  x1,y1 with the color <col>
+        * returns 0 on success, -1 on clipping all pixel
+        */
+       OSD_Line,       /* (x0,y0,x1,y1,color) */
+       /*
+        * draw a line from x0,y0 to x1,y1 with the color <col>
+        * returns 0 on success
+        */
+       OSD_Query,      /* (x0,y0,x1,y1,xasp{color}}), yasp=11 */
+       /*
+        * fills parameters with the picture dimensions and the pixel aspect ratio
+        * returns 0 on success
+        */
+       OSD_Test,       /* () */
+       /*
+        * draws a test picture. for debugging purposes only
+        * returns 0 on success
+        * TODO: remove "test" in final version
+        */
+       OSD_Text,       /* (x0,y0,size,color,text) */
+       OSD_SetWindow,  /* (x0) set window with number 0<x0<8 as current */
+       OSD_MoveWindow, /* move current window to (x0, y0) */
+       OSD_OpenRaw,    /* Open other types of OSD windows */
+} OSD_Command;
+
+typedef struct osd_cmd_s {
+       OSD_Command cmd;
+       int x0;
+       int y0;
+       int x1;
+       int y1;
+       int color;
+       void __user *data;
+} osd_cmd_t;
+
+/* OSD_OpenRaw: set 'color' to desired window type */
+typedef enum {
+       OSD_BITMAP1,           /* 1 bit bitmap */
+       OSD_BITMAP2,           /* 2 bit bitmap */
+       OSD_BITMAP4,           /* 4 bit bitmap */
+       OSD_BITMAP8,           /* 8 bit bitmap */
+       OSD_BITMAP1HR,         /* 1 Bit bitmap half resolution */
+       OSD_BITMAP2HR,         /* 2 bit bitmap half resolution */
+       OSD_BITMAP4HR,         /* 4 bit bitmap half resolution */
+       OSD_BITMAP8HR,         /* 8 bit bitmap half resolution */
+       OSD_YCRCB422,          /* 4:2:2 YCRCB Graphic Display */
+       OSD_YCRCB444,          /* 4:4:4 YCRCB Graphic Display */
+       OSD_YCRCB444HR,        /* 4:4:4 YCRCB graphic half resolution */
+       OSD_VIDEOTSIZE,        /* True Size Normal MPEG Video Display */
+       OSD_VIDEOHSIZE,        /* MPEG Video Display Half Resolution */
+       OSD_VIDEOQSIZE,        /* MPEG Video Display Quarter Resolution */
+       OSD_VIDEODSIZE,        /* MPEG Video Display Double Resolution */
+       OSD_VIDEOTHSIZE,       /* True Size MPEG Video Display Half Resolution */
+       OSD_VIDEOTQSIZE,       /* True Size MPEG Video Display Quarter Resolution*/
+       OSD_VIDEOTDSIZE,       /* True Size MPEG Video Display Double Resolution */
+       OSD_VIDEONSIZE,        /* Full Size MPEG Video Display */
+       OSD_CURSOR             /* Cursor */
+} osd_raw_window_t;
+
+typedef struct osd_cap_s {
+       int  cmd;
+#define OSD_CAP_MEMSIZE         1  /* memory size */
+       long val;
+} osd_cap_t;
+
+
+#define OSD_SEND_CMD            _IOW('o', 160, osd_cmd_t)
+#define OSD_GET_CAPABILITY      _IOR('o', 161, osd_cap_t)
+
+#endif
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
new file mode 100644 (file)
index 0000000..179f1ec
--- /dev/null
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * video.h - DEPRECATED MPEG-TS video decoder API
+ *
+ * NOTE: should not be used on future drivers
+ *
+ * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
+ *                  & Ralph  Metzler <ralph@convergence.de>
+ *                    for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+#ifndef _UAPI_DVBVIDEO_H_
+#define _UAPI_DVBVIDEO_H_
+
+#include <linux/types.h>
+#ifndef __KERNEL__
+#include <time.h>
+#endif
+
+typedef enum {
+       VIDEO_FORMAT_4_3,     /* Select 4:3 format */
+       VIDEO_FORMAT_16_9,    /* Select 16:9 format. */
+       VIDEO_FORMAT_221_1    /* 2.21:1 */
+} video_format_t;
+
+
+typedef enum {
+       VIDEO_PAN_SCAN,       /* use pan and scan format */
+       VIDEO_LETTER_BOX,     /* use letterbox format */
+       VIDEO_CENTER_CUT_OUT  /* use center cut out format */
+} video_displayformat_t;
+
+typedef struct {
+       int w;
+       int h;
+       video_format_t aspect_ratio;
+} video_size_t;
+
+typedef enum {
+       VIDEO_SOURCE_DEMUX, /* Select the demux as the main source */
+       VIDEO_SOURCE_MEMORY /* If this source is selected, the stream
+                              comes from the user through the write
+                              system call */
+} video_stream_source_t;
+
+
+typedef enum {
+       VIDEO_STOPPED, /* Video is stopped */
+       VIDEO_PLAYING, /* Video is currently playing */
+       VIDEO_FREEZED  /* Video is freezed */
+} video_play_state_t;
+
+
+/* Decoder commands */
+#define VIDEO_CMD_PLAY        (0)
+#define VIDEO_CMD_STOP        (1)
+#define VIDEO_CMD_FREEZE      (2)
+#define VIDEO_CMD_CONTINUE    (3)
+
+/* Flags for VIDEO_CMD_FREEZE */
+#define VIDEO_CMD_FREEZE_TO_BLACK      (1 << 0)
+
+/* Flags for VIDEO_CMD_STOP */
+#define VIDEO_CMD_STOP_TO_BLACK                (1 << 0)
+#define VIDEO_CMD_STOP_IMMEDIATELY     (1 << 1)
+
+/* Play input formats: */
+/* The decoder has no special format requirements */
+#define VIDEO_PLAY_FMT_NONE         (0)
+/* The decoder requires full GOPs */
+#define VIDEO_PLAY_FMT_GOP          (1)
+
+/* The structure must be zeroed before use by the application
+   This ensures it can be extended safely in the future. */
+struct video_command {
+       __u32 cmd;
+       __u32 flags;
+       union {
+               struct {
+                       __u64 pts;
+               } stop;
+
+               struct {
+                       /* 0 or 1000 specifies normal speed,
+                          1 specifies forward single stepping,
+                          -1 specifies backward single stepping,
+                          >1: playback at speed/1000 of the normal speed,
+                          <-1: reverse playback at (-speed/1000) of the normal speed. */
+                       __s32 speed;
+                       __u32 format;
+               } play;
+
+               struct {
+                       __u32 data[16];
+               } raw;
+       };
+};
+
+/* FIELD_UNKNOWN can be used if the hardware does not know whether
+   the Vsync is for an odd, even or progressive (i.e. non-interlaced)
+   field. */
+#define VIDEO_VSYNC_FIELD_UNKNOWN      (0)
+#define VIDEO_VSYNC_FIELD_ODD          (1)
+#define VIDEO_VSYNC_FIELD_EVEN         (2)
+#define VIDEO_VSYNC_FIELD_PROGRESSIVE  (3)
+
+struct video_event {
+       __s32 type;
+#define VIDEO_EVENT_SIZE_CHANGED       1
+#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
+#define VIDEO_EVENT_DECODER_STOPPED    3
+#define VIDEO_EVENT_VSYNC              4
+       /* unused, make sure to use atomic time for y2038 if it ever gets used */
+       long timestamp;
+       union {
+               video_size_t size;
+               unsigned int frame_rate;        /* in frames per 1000sec */
+               unsigned char vsync_field;      /* unknown/odd/even/progressive */
+       } u;
+};
+
+
+struct video_status {
+       int                   video_blank;   /* blank video on freeze? */
+       video_play_state_t    play_state;    /* current state of playback */
+       video_stream_source_t stream_source; /* current source (demux/memory) */
+       video_format_t        video_format;  /* current aspect ratio of stream*/
+       video_displayformat_t display_format;/* selected cropping mode */
+};
+
+
+struct video_still_picture {
+       char __user *iFrame;        /* pointer to a single iframe in memory */
+       __s32 size;
+};
+
+
+typedef __u16 video_attributes_t;
+/*   bits: descr. */
+/*   15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
+/*   13-12 TV system (0=525/60, 1=625/50) */
+/*   11-10 Aspect ratio (0=4:3, 3=16:9) */
+/*    9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
+/*    7    line 21-1 data present in GOP (1=yes, 0=no) */
+/*    6    line 21-2 data present in GOP (1=yes, 0=no) */
+/*    5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
+/*    2    source letterboxed (1=yes, 0=no) */
+/*    0    film/camera mode (0=
+ *camera, 1=film (625/50 only)) */
+
+
+/* bit definitions for capabilities: */
+/* can the hardware decode MPEG1 and/or MPEG2? */
+#define VIDEO_CAP_MPEG1   1
+#define VIDEO_CAP_MPEG2   2
+/* can you send a system and/or program stream to video device?
+   (you still have to open the video and the audio device but only
+    send the stream to the video device) */
+#define VIDEO_CAP_SYS     4
+#define VIDEO_CAP_PROG    8
+/* can the driver also handle SPU, NAVI and CSS encoded data?
+   (CSS API is not present yet) */
+#define VIDEO_CAP_SPU    16
+#define VIDEO_CAP_NAVI   32
+#define VIDEO_CAP_CSS    64
+
+
+#define VIDEO_STOP                 _IO('o', 21)
+#define VIDEO_PLAY                 _IO('o', 22)
+#define VIDEO_FREEZE               _IO('o', 23)
+#define VIDEO_CONTINUE             _IO('o', 24)
+#define VIDEO_SELECT_SOURCE        _IO('o', 25)
+#define VIDEO_SET_BLANK            _IO('o', 26)
+#define VIDEO_GET_STATUS           _IOR('o', 27, struct video_status)
+#define VIDEO_GET_EVENT            _IOR('o', 28, struct video_event)
+#define VIDEO_SET_DISPLAY_FORMAT   _IO('o', 29)
+#define VIDEO_STILLPICTURE         _IOW('o', 30, struct video_still_picture)
+#define VIDEO_FAST_FORWARD         _IO('o', 31)
+#define VIDEO_SLOWMOTION           _IO('o', 32)
+#define VIDEO_GET_CAPABILITIES     _IOR('o', 33, unsigned int)
+#define VIDEO_CLEAR_BUFFER         _IO('o',  34)
+#define VIDEO_SET_STREAMTYPE       _IO('o', 36)
+#define VIDEO_SET_FORMAT           _IO('o', 37)
+#define VIDEO_GET_SIZE             _IOR('o', 55, video_size_t)
+
+/**
+ * VIDEO_GET_PTS
+ *
+ * Read the 33 bit presentation time stamp as defined
+ * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
+ *
+ * The PTS should belong to the currently played
+ * frame if possible, but may also be a value close to it
+ * like the PTS of the last decoded frame or the last PTS
+ * extracted by the PES parser.
+ */
+#define VIDEO_GET_PTS              _IOR('o', 57, __u64)
+
+/* Read the number of displayed frames since the decoder was started */
+#define VIDEO_GET_FRAME_COUNT     _IOR('o', 58, __u64)
+
+#define VIDEO_COMMAND             _IOWR('o', 59, struct video_command)
+#define VIDEO_TRY_COMMAND         _IOWR('o', 60, struct video_command)
+
+#endif /* _UAPI_DVBVIDEO_H_ */
index fbf9c5c..64553df 100644 (file)
@@ -51,6 +51,7 @@
 #define FAN_ENABLE_AUDIT       0x00000040
 
 /* Flags to determine fanotify event format */
+#define FAN_REPORT_PIDFD       0x00000080      /* Report pidfd for event->pid */
 #define FAN_REPORT_TID         0x00000100      /* event->pid is thread id */
 #define FAN_REPORT_FID         0x00000200      /* Report unique file id */
 #define FAN_REPORT_DIR_FID     0x00000400      /* Report unique directory id */
@@ -123,6 +124,7 @@ struct fanotify_event_metadata {
 #define FAN_EVENT_INFO_TYPE_FID                1
 #define FAN_EVENT_INFO_TYPE_DFID_NAME  2
 #define FAN_EVENT_INFO_TYPE_DFID       3
+#define FAN_EVENT_INFO_TYPE_PIDFD      4
 
 /* Variable length info record following event metadata */
 struct fanotify_event_info_header {
@@ -148,6 +150,15 @@ struct fanotify_event_info_fid {
        unsigned char handle[0];
 };
 
+/*
+ * This structure is used for info records of type FAN_EVENT_INFO_TYPE_PIDFD.
+ * It holds a pidfd for the pid that was responsible for generating an event.
+ */
+struct fanotify_event_info_pidfd {
+       struct fanotify_event_info_header hdr;
+       __s32 pidfd;
+};
+
 struct fanotify_response {
        __s32 fd;
        __u32 response;
@@ -160,6 +171,8 @@ struct fanotify_response {
 
 /* No fd set in event */
 #define FAN_NOFD       -1
+#define FAN_NOPIDFD    FAN_NOFD
+#define FAN_EPIDFD     -2
 
 /* Helper functions to deal with fanotify_event_metadata buffers */
 #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
index 4c32e97..bdf7b40 100644 (file)
@@ -184,6 +184,7 @@ struct fsxattr {
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
+#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
 /*
  * A jump here: 130-136 are reserved for zoned block devices
  * (see uapi/linux/blkzoned.h)
index 79126d5..59ef351 100644 (file)
@@ -44,6 +44,7 @@ struct io_uring_sqe {
                __u32           splice_flags;
                __u32           rename_flags;
                __u32           unlink_flags;
+               __u32           hardlink_flags;
        };
        __u64   user_data;      /* data to be passed back at completion time */
        /* pack this to avoid bogus arm OABI complaints */
@@ -55,7 +56,10 @@ struct io_uring_sqe {
        } __attribute__((packed));
        /* personality to use, if used */
        __u16   personality;
-       __s32   splice_fd_in;
+       union {
+               __s32   splice_fd_in;
+               __u32   file_index;
+       };
        __u64   __pad2[2];
 };
 
@@ -133,6 +137,9 @@ enum {
        IORING_OP_SHUTDOWN,
        IORING_OP_RENAMEAT,
        IORING_OP_UNLINKAT,
+       IORING_OP_MKDIRAT,
+       IORING_OP_SYMLINKAT,
+       IORING_OP_LINKAT,
 
        /* this goes last, obviously */
        IORING_OP_LAST,
@@ -146,9 +153,13 @@ enum {
 /*
  * sqe->timeout_flags
  */
-#define IORING_TIMEOUT_ABS     (1U << 0)
-#define IORING_TIMEOUT_UPDATE  (1U << 1)
-
+#define IORING_TIMEOUT_ABS             (1U << 0)
+#define IORING_TIMEOUT_UPDATE          (1U << 1)
+#define IORING_TIMEOUT_BOOTTIME                (1U << 2)
+#define IORING_TIMEOUT_REALTIME                (1U << 3)
+#define IORING_LINK_TIMEOUT_UPDATE     (1U << 4)
+#define IORING_TIMEOUT_CLOCK_MASK      (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
+#define IORING_TIMEOUT_UPDATE_MASK     (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
 /*
  * sqe->splice_flags
  * extends splice(2) flags
@@ -306,6 +317,9 @@ enum {
        IORING_REGISTER_IOWQ_AFF                = 17,
        IORING_UNREGISTER_IOWQ_AFF              = 18,
 
+       /* set/get max number of workers */
+       IORING_REGISTER_IOWQ_MAX_WORKERS        = 19,
+
        /* this goes last */
        IORING_REGISTER_LAST
 };
diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h
new file mode 100644 (file)
index 0000000..f70f259
--- /dev/null
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_IOPRIO_H
+#define _UAPI_LINUX_IOPRIO_H
+
+/*
+ * Gives us 8 prio classes with 13-bits of data for each class
+ */
+#define IOPRIO_CLASS_SHIFT     13
+#define IOPRIO_CLASS_MASK      0x07
+#define IOPRIO_PRIO_MASK       ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+
+#define IOPRIO_PRIO_CLASS(ioprio)      \
+       (((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)
+#define IOPRIO_PRIO_DATA(ioprio)       ((ioprio) & IOPRIO_PRIO_MASK)
+#define IOPRIO_PRIO_VALUE(class, data) \
+       ((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \
+        ((data) & IOPRIO_PRIO_MASK))
+
+/*
+ * These are the io priority groups as implemented by the BFQ and mq-deadline
+ * schedulers. RT is the realtime class, it always gets premium service. For
+ * ATA disks supporting NCQ IO priority, RT class IOs will be processed using
+ * high priority NCQ commands. BE is the best-effort scheduling class, the
+ * default for any process. IDLE is the idle scheduling class, it is only
+ * served when no one else is using the disk.
+ */
+enum {
+       IOPRIO_CLASS_NONE,
+       IOPRIO_CLASS_RT,
+       IOPRIO_CLASS_BE,
+       IOPRIO_CLASS_IDLE,
+};
+
+/*
+ * The RT and BE priority classes both support up to 8 priority levels.
+ */
+#define IOPRIO_NR_LEVELS       8
+#define IOPRIO_BE_NR           IOPRIO_NR_LEVELS
+
+enum {
+       IOPRIO_WHO_PROCESS = 1,
+       IOPRIO_WHO_PGRP,
+       IOPRIO_WHO_USER,
+};
+
+/*
+ * Fallback BE priority level.
+ */
+#define IOPRIO_NORM    4
+#define IOPRIO_BE_NORM IOPRIO_NORM
+
+#endif /* _UAPI_LINUX_IOPRIO_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
deleted file mode 100644 (file)
index 2745afd..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2015 CNEX Labs.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- */
-
-#ifndef _UAPI_LINUX_LIGHTNVM_H
-#define _UAPI_LINUX_LIGHTNVM_H
-
-#ifdef __KERNEL__
-#include <linux/const.h>
-#else /* __KERNEL__ */
-#include <stdio.h>
-#include <sys/ioctl.h>
-#define DISK_NAME_LEN 32
-#endif /* __KERNEL__ */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define NVM_TTYPE_NAME_MAX 48
-#define NVM_TTYPE_MAX 63
-#define NVM_MMTYPE_LEN 8
-
-#define NVM_CTRL_FILE "/dev/lightnvm/control"
-
-struct nvm_ioctl_info_tgt {
-       __u32 version[3];
-       __u32 reserved;
-       char tgtname[NVM_TTYPE_NAME_MAX];
-};
-
-struct nvm_ioctl_info {
-       __u32 version[3];       /* in/out - major, minor, patch */
-       __u16 tgtsize;          /* number of targets */
-       __u16 reserved16;       /* pad to 4K page */
-       __u32 reserved[12];
-       struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX];
-};
-
-enum {
-       NVM_DEVICE_ACTIVE = 1 << 0,
-};
-
-struct nvm_ioctl_device_info {
-       char devname[DISK_NAME_LEN];
-       char bmname[NVM_TTYPE_NAME_MAX];
-       __u32 bmversion[3];
-       __u32 flags;
-       __u32 reserved[8];
-};
-
-struct nvm_ioctl_get_devices {
-       __u32 nr_devices;
-       __u32 reserved[31];
-       struct nvm_ioctl_device_info info[31];
-};
-
-struct nvm_ioctl_create_simple {
-       __u32 lun_begin;
-       __u32 lun_end;
-};
-
-struct nvm_ioctl_create_extended {
-       __u16 lun_begin;
-       __u16 lun_end;
-       __u16 op;
-       __u16 rsv;
-};
-
-enum {
-       NVM_CONFIG_TYPE_SIMPLE = 0,
-       NVM_CONFIG_TYPE_EXTENDED = 1,
-};
-
-struct nvm_ioctl_create_conf {
-       __u32 type;
-       union {
-               struct nvm_ioctl_create_simple s;
-               struct nvm_ioctl_create_extended e;
-       };
-};
-
-enum {
-       NVM_TARGET_FACTORY = 1 << 0,    /* Init target in factory mode */
-};
-
-struct nvm_ioctl_create {
-       char dev[DISK_NAME_LEN];                /* open-channel SSD device */
-       char tgttype[NVM_TTYPE_NAME_MAX];       /* target type name */
-       char tgtname[DISK_NAME_LEN];            /* dev to expose target as */
-
-       __u32 flags;
-
-       struct nvm_ioctl_create_conf conf;
-};
-
-struct nvm_ioctl_remove {
-       char tgtname[DISK_NAME_LEN];
-
-       __u32 flags;
-};
-
-struct nvm_ioctl_dev_init {
-       char dev[DISK_NAME_LEN];                /* open-channel SSD device */
-       char mmtype[NVM_MMTYPE_LEN];            /* register to media manager */
-
-       __u32 flags;
-};
-
-enum {
-       NVM_FACTORY_ERASE_ONLY_USER     = 1 << 0, /* erase only blocks used as
-                                                  * host blks or grown blks */
-       NVM_FACTORY_RESET_HOST_BLKS     = 1 << 1, /* remove host blk marks */
-       NVM_FACTORY_RESET_GRWN_BBLKS    = 1 << 2, /* remove grown blk marks */
-       NVM_FACTORY_NR_BITS             = 1 << 3, /* stops here */
-};
-
-struct nvm_ioctl_dev_factory {
-       char dev[DISK_NAME_LEN];
-
-       __u32 flags;
-};
-
-struct nvm_user_vio {
-       __u8 opcode;
-       __u8 flags;
-       __u16 control;
-       __u16 nppas;
-       __u16 rsvd;
-       __u64 metadata;
-       __u64 addr;
-       __u64 ppa_list;
-       __u32 metadata_len;
-       __u32 data_len;
-       __u64 status;
-       __u32 result;
-       __u32 rsvd3[3];
-};
-
-struct nvm_passthru_vio {
-       __u8 opcode;
-       __u8 flags;
-       __u8 rsvd[2];
-       __u32 nsid;
-       __u32 cdw2;
-       __u32 cdw3;
-       __u64 metadata;
-       __u64 addr;
-       __u32 metadata_len;
-       __u32 data_len;
-       __u64 ppa_list;
-       __u16 nppas;
-       __u16 control;
-       __u32 cdw13;
-       __u32 cdw14;
-       __u32 cdw15;
-       __u64 status;
-       __u32 result;
-       __u32 timeout_ms;
-};
-
-/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
-enum {
-       /* top level cmds */
-       NVM_INFO_CMD = 0x20,
-       NVM_GET_DEVICES_CMD,
-
-       /* device level cmds */
-       NVM_DEV_CREATE_CMD,
-       NVM_DEV_REMOVE_CMD,
-
-       /* Init a device to support LightNVM media managers */
-       NVM_DEV_INIT_CMD,
-
-       /* Factory reset device */
-       NVM_DEV_FACTORY_CMD,
-
-       /* Vector user I/O */
-       NVM_DEV_VIO_ADMIN_CMD = 0x41,
-       NVM_DEV_VIO_CMD = 0x42,
-       NVM_DEV_VIO_USER_CMD = 0x43,
-};
-
-#define NVM_IOCTL 'L' /* 0x4c */
-
-#define NVM_INFO               _IOWR(NVM_IOCTL, NVM_INFO_CMD, \
-                                               struct nvm_ioctl_info)
-#define NVM_GET_DEVICES                _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \
-                                               struct nvm_ioctl_get_devices)
-#define NVM_DEV_CREATE         _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \
-                                               struct nvm_ioctl_create)
-#define NVM_DEV_REMOVE         _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
-                                               struct nvm_ioctl_remove)
-#define NVM_DEV_INIT           _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
-                                               struct nvm_ioctl_dev_init)
-#define NVM_DEV_FACTORY                _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
-                                               struct nvm_ioctl_dev_factory)
-
-#define NVME_NVM_IOCTL_IO_VIO          _IOWR(NVM_IOCTL, NVM_DEV_VIO_USER_CMD, \
-                                               struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_ADMIN_VIO       _IOWR(NVM_IOCTL, NVM_DEV_VIO_ADMIN_CMD,\
-                                               struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_SUBMIT_VIO      _IOWR(NVM_IOCTL, NVM_DEV_VIO_CMD,\
-                                               struct nvm_user_vio)
-
-#define NVM_VERSION_MAJOR      1
-#define NVM_VERSION_MINOR      0
-#define NVM_VERSION_PATCHLEVEL 0
-
-#endif
index dd7a166..4d93967 100644 (file)
@@ -73,7 +73,8 @@
 #define MOVE_MOUNT_T_SYMLINKS          0x00000010 /* Follow symlinks on to path */
 #define MOVE_MOUNT_T_AUTOMOUNTS                0x00000020 /* Follow automounts on to path */
 #define MOVE_MOUNT_T_EMPTY_PATH                0x00000040 /* Empty to path permitted */
-#define MOVE_MOUNT__MASK               0x00000077
+#define MOVE_MOUNT_SET_GROUP           0x00000100 /* Set sharing group instead */
+#define MOVE_MOUNT__MASK               0x00000177
 
 /*
  * fsopen() flags.
index dc8b722..00a6069 100644 (file)
@@ -66,8 +66,11 @@ enum {
 #define NUD_NONE       0x00
 
 /* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
-   and make no address resolution or NUD.
-   NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * and make no address resolution or NUD.
+ * NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
+ * states don't make sense and thus are ignored. Such entries don't age and
+ * can roam.
  */
 
 struct nda_cacheinfo {
index 912ec60..bbcd285 100644 (file)
@@ -43,6 +43,15 @@ enum nfnl_hook_chain_info_attributes {
 };
 #define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
 
+enum nfnl_hook_chain_desc_attributes {
+       NFNLA_CHAIN_UNSPEC,
+       NFNLA_CHAIN_TABLE,
+       NFNLA_CHAIN_FAMILY,
+       NFNLA_CHAIN_NAME,
+       __NFNLA_CHAIN_MAX,
+};
+#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1)
+
 /**
  * enum nfnl_hook_chaintype - chain type
  *
index 427294d..e29e8ac 100644 (file)
@@ -33,7 +33,6 @@ struct nfs_fhbase_old {
 
 /*
  * This is the new flexible, extensible style NFSv2/v3/v4 file handle.
- * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000
  *
  * The file handle starts with a sequence of four-byte words.
  * The first word contains a version number (1) and three descriptor bytes
index 967d9c5..964c41e 100644 (file)
@@ -213,6 +213,7 @@ struct prctl_mm_map {
 /* Speculation control variants */
 # define PR_SPEC_STORE_BYPASS          0
 # define PR_SPEC_INDIRECT_BRANCH       1
+# define PR_SPEC_L1D_FLUSH             2
 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */
 # define PR_SPEC_NOT_AFFECTED          0
 # define PR_SPEC_PRCTL                 (1UL << 0)
diff --git a/include/uapi/linux/virtio_i2c.h b/include/uapi/linux/virtio_i2c.h
new file mode 100644 (file)
index 0000000..7c6a6fc
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Definitions for virtio I2C Adpter
+ *
+ * Copyright (c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_I2C_H
+#define _UAPI_LINUX_VIRTIO_I2C_H
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */
+#define VIRTIO_I2C_FLAGS_FAIL_NEXT     _BITUL(0)
+
+/**
+ * struct virtio_i2c_out_hdr - the virtio I2C message OUT header
+ * @addr: the controlled device address
+ * @padding: used to pad to full dword
+ * @flags: used for feature extensibility
+ */
+struct virtio_i2c_out_hdr {
+       __le16 addr;
+       __le16 padding;
+       __le32 flags;
+};
+
+/**
+ * struct virtio_i2c_in_hdr - the virtio I2C message IN header
+ * @status: the processing result from the backend
+ */
+struct virtio_i2c_in_hdr {
+       __u8 status;
+};
+
+/* The final status written by the device */
+#define VIRTIO_I2C_MSG_OK      0
+#define VIRTIO_I2C_MSG_ERR     1
+
+#endif /* _UAPI_LINUX_VIRTIO_I2C_H */
index 70a8057..99aa27b 100644 (file)
@@ -55,6 +55,7 @@
 #define VIRTIO_ID_FS                   26 /* virtio filesystem */
 #define VIRTIO_ID_PMEM                 27 /* virtio pmem */
 #define VIRTIO_ID_MAC80211_HWSIM       29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_I2C_ADAPTER          34 /* virtio i2c adapter */
 #define VIRTIO_ID_BT                   40 /* virtio bluetooth */
 
 /*
index 74aede8..b691d68 100644 (file)
@@ -432,10 +432,6 @@ retry:
                printk("Please append a correct \"root=\" boot option; here are the available partitions:\n");
 
                printk_all_partitions();
-#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
-               printk("DEBUG_BLOCK_EXT_DEVT is enabled, you need to specify "
-                      "explicit textual name for \"root=\" boot option.\n");
-#endif
                panic("VFS: Unable to mount root fs on %s", b);
        }
        if (!(flags & SB_RDONLY)) {
index 562f2ef..2d02406 100644 (file)
@@ -80,6 +80,7 @@ struct task_struct init_task
        .normal_prio    = MAX_PRIO - 20,
        .policy         = SCHED_NORMAL,
        .cpus_ptr       = &init_task.cpus_mask,
+       .user_cpus_ptr  = NULL,
        .cpus_mask      = CPU_MASK_ALL,
        .nr_cpus_allowed= NR_CPUS,
        .mm             = NULL,
index f5b8246..8d97aba 100644 (file)
@@ -397,6 +397,12 @@ static int __init bootconfig_params(char *param, char *val,
        return 0;
 }
 
+static int __init warn_bootconfig(char *str)
+{
+       /* The 'bootconfig' has been handled by bootconfig_params(). */
+       return 0;
+}
+
 static void __init setup_boot_config(void)
 {
        static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
@@ -475,9 +481,8 @@ static int __init warn_bootconfig(char *str)
        pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
        return 0;
 }
-early_param("bootconfig", warn_bootconfig);
-
 #endif
+early_param("bootconfig", warn_bootconfig);
 
 /* Change NUL term back to "=", to make "param" the whole string. */
 static void __init repair_env_string(char *param, char *val)
index 3de8fd1..4198f02 100644 (file)
@@ -251,7 +251,7 @@ config ARCH_USE_QUEUED_RWLOCKS
 
 config QUEUED_RWLOCKS
        def_bool y if ARCH_USE_QUEUED_RWLOCKS
-       depends on SMP
+       depends on SMP && !PREEMPT_RT
 
 config ARCH_HAS_MMIOWB
        bool
index b565ea1..d6a2c89 100644 (file)
@@ -6,6 +6,9 @@
  * Copyright 2005 IBM Corporation
  */
 
+#ifndef _KERNEL_AUDIT_H_
+#define _KERNEL_AUDIT_H_
+
 #include <linux/fs.h>
 #include <linux/audit.h>
 #include <linux/skbuff.h>
@@ -331,3 +334,5 @@ extern int audit_filter(int msgtype, unsigned int listtype);
 
 extern void audit_ctl_lock(void);
 extern void audit_ctl_unlock(void);
+
+#endif
index b2be4e9..2cd7b56 100644 (file)
@@ -593,7 +593,6 @@ static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
                spin_lock(&hash_lock);
        }
        spin_unlock(&hash_lock);
-       put_tree(victim);
 }
 
 /*
@@ -602,6 +601,7 @@ static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
 static void prune_one(struct audit_tree *victim)
 {
        prune_tree_chunks(victim, false);
+       put_tree(victim);
 }
 
 /* trim the uncommitted chunks from tree */
index b1a5fc0..0a28a80 100644 (file)
@@ -1362,11 +1362,13 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
 }
 
 /**
- *     __bpf_prog_run - run eBPF program on a given context
+ *     ___bpf_prog_run - run eBPF program on a given context
  *     @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
  *     @insn: is the array of eBPF instructions
  *
  * Decode and execute eBPF instructions.
+ *
+ * Return: whatever value is in %BPF_R0 at program exit
  */
 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 {
@@ -1878,6 +1880,9 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
  *
  * Try to JIT eBPF program, if JIT is not available, use interpreter.
  * The BPF program will be executed via BPF_PROG_RUN() macro.
+ *
+ * Return: the &fp argument along with &err set to 0 for success or
+ * a negative errno code on failure
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
index 72c58cc..9c011f3 100644 (file)
@@ -1565,8 +1565,8 @@ alloc:
        /* We cannot do copy_from_user or copy_to_user inside
         * the rcu_read_lock. Allocate enough space here.
         */
-       keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
-       values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
+       keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
+       values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
        if (!keys || !values) {
                ret = -ENOMEM;
                goto after_loop;
index 62cf003..55f83ea 100644 (file)
@@ -353,9 +353,15 @@ const struct bpf_func_proto bpf_jiffies64_proto = {
 #ifdef CONFIG_CGROUPS
 BPF_CALL_0(bpf_get_current_cgroup_id)
 {
-       struct cgroup *cgrp = task_dfl_cgroup(current);
+       struct cgroup *cgrp;
+       u64 cgrp_id;
 
-       return cgroup_id(cgrp);
+       rcu_read_lock();
+       cgrp = task_dfl_cgroup(current);
+       cgrp_id = cgroup_id(cgrp);
+       rcu_read_unlock();
+
+       return cgrp_id;
 }
 
 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
@@ -366,13 +372,17 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
 
 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
 {
-       struct cgroup *cgrp = task_dfl_cgroup(current);
+       struct cgroup *cgrp;
        struct cgroup *ancestor;
+       u64 cgrp_id;
 
+       rcu_read_lock();
+       cgrp = task_dfl_cgroup(current);
        ancestor = cgroup_ancestor(cgrp, ancestor_level);
-       if (!ancestor)
-               return 0;
-       return cgroup_id(ancestor);
+       cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
+       rcu_read_unlock();
+
+       return cgrp_id;
 }
 
 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
@@ -397,8 +407,8 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
        void *ptr;
        int i;
 
-       for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
-               if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+       for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+               if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
                        continue;
 
                storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
@@ -1070,12 +1080,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_str_proto;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
index f9bda54..49f07e2 100644 (file)
@@ -5150,8 +5150,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
        case BPF_MAP_TYPE_RINGBUF:
                if (func_id != BPF_FUNC_ringbuf_output &&
                    func_id != BPF_FUNC_ringbuf_reserve &&
-                   func_id != BPF_FUNC_ringbuf_submit &&
-                   func_id != BPF_FUNC_ringbuf_discard &&
                    func_id != BPF_FUNC_ringbuf_query)
                        goto error;
                break;
@@ -5260,6 +5258,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
                if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
                        goto error;
                break;
+       case BPF_FUNC_ringbuf_output:
+       case BPF_FUNC_ringbuf_reserve:
+       case BPF_FUNC_ringbuf_query:
+               if (map->map_type != BPF_MAP_TYPE_RINGBUF)
+                       goto error;
+               break;
        case BPF_FUNC_get_stackid:
                if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
                        goto error;
@@ -11663,6 +11667,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
                if (aux_data[i].seen)
                        continue;
                memcpy(insn + i, &trap, sizeof(trap));
+               aux_data[i].zext_dst = false;
        }
 }
 
index e17a566..9594cfd 100644 (file)
@@ -248,9 +248,9 @@ static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
 {
        cfi_check_fn fn;
 
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 
        return fn;
 }
@@ -269,11 +269,11 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
        cfi_check_fn fn = NULL;
        struct module *mod;
 
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        mod = __module_address(ptr);
        if (mod)
                fn = mod->cfi_check;
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 
        return fn;
 }
index adb5190..6500cbe 100644 (file)
@@ -372,18 +372,29 @@ static inline bool is_in_v2_mode(void)
 }
 
 /*
- * Return in pmask the portion of a cpusets's cpus_allowed that
- * are online.  If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus.
+ * Return in pmask the portion of a task's cpusets's cpus_allowed that
+ * are online and are capable of running the task.  If none are found,
+ * walk up the cpuset hierarchy until we find one that does have some
+ * appropriate cpus.
  *
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
  *
  * Call with callback_lock or cpuset_mutex held.
  */
-static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
+static void guarantee_online_cpus(struct task_struct *tsk,
+                                 struct cpumask *pmask)
 {
-       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
+       const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
+       struct cpuset *cs;
+
+       if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
+               cpumask_copy(pmask, cpu_online_mask);
+
+       rcu_read_lock();
+       cs = task_cs(tsk);
+
+       while (!cpumask_intersects(cs->effective_cpus, pmask)) {
                cs = parent_cs(cs);
                if (unlikely(!cs)) {
                        /*
@@ -393,11 +404,13 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
                         * cpuset's effective_cpus is on its way to be
                         * identical to cpu_online_mask.
                         */
-                       cpumask_copy(pmask, cpu_online_mask);
-                       return;
+                       goto out_unlock;
                }
        }
-       cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
+       cpumask_and(pmask, pmask, cs->effective_cpus);
+
+out_unlock:
+       rcu_read_unlock();
 }
 
 /*
@@ -2199,15 +2212,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)
 
        percpu_down_write(&cpuset_rwsem);
 
-       /* prepare for attach */
-       if (cs == &top_cpuset)
-               cpumask_copy(cpus_attach, cpu_possible_mask);
-       else
-               guarantee_online_cpus(cs, cpus_attach);
-
        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 
        cgroup_taskset_for_each(task, css, tset) {
+               if (cs != &top_cpuset)
+                       guarantee_online_cpus(task, cpus_attach);
+               else
+                       cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
                /*
                 * can_attach beforehand should guarantee that this doesn't
                 * fail.  TODO: have a better way to handle failure here
@@ -3302,9 +3313,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
        unsigned long flags;
 
        spin_lock_irqsave(&callback_lock, flags);
-       rcu_read_lock();
-       guarantee_online_cpus(task_cs(tsk), pmask);
-       rcu_read_unlock();
+       guarantee_online_cpus(tsk, pmask);
        spin_unlock_irqrestore(&callback_lock, flags);
 }
 
@@ -3318,13 +3327,22 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
  * which will not contain a sane cpumask during cases such as cpu hotplugging.
  * This is the absolute last resort for the scheduler and it is only used if
  * _every_ other avenue has been traveled.
+ *
+ * Returns true if the affinity of @tsk was changed, false otherwise.
  **/
 
-void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
+bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
 {
+       const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
+       const struct cpumask *cs_mask;
+       bool changed = false;
+
        rcu_read_lock();
-       do_set_cpus_allowed(tsk, is_in_v2_mode() ?
-               task_cs(tsk)->cpus_allowed : cpu_possible_mask);
+       cs_mask = task_cs(tsk)->cpus_allowed;
+       if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
+               do_set_cpus_allowed(tsk, cs_mask);
+               changed = true;
+       }
        rcu_read_unlock();
 
        /*
@@ -3344,6 +3362,7 @@ void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
         * select_fallback_rq() will fix things ups and set cpu_possible_mask
         * if required.
         */
+       return changed;
 }
 
 void __init cpuset_init_current_mems_allowed(void)
index 7f0e589..b264ab5 100644 (file)
@@ -347,19 +347,20 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
 }
 
 static struct cgroup_rstat_cpu *
-cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
+cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
 {
        struct cgroup_rstat_cpu *rstatc;
 
        rstatc = get_cpu_ptr(cgrp->rstat_cpu);
-       u64_stats_update_begin(&rstatc->bsync);
+       *flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
        return rstatc;
 }
 
 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
-                                                struct cgroup_rstat_cpu *rstatc)
+                                                struct cgroup_rstat_cpu *rstatc,
+                                                unsigned long flags)
 {
-       u64_stats_update_end(&rstatc->bsync);
+       u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
        cgroup_rstat_updated(cgrp, smp_processor_id());
        put_cpu_ptr(rstatc);
 }
@@ -367,18 +368,20 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
        rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 void __cgroup_account_cputime_field(struct cgroup *cgrp,
                                    enum cpu_usage_stat index, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
 
        switch (index) {
        case CPUTIME_USER:
@@ -394,7 +397,7 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
                break;
        }
 
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 /*
index 804b847..192e43a 100644 (file)
 #include "smpboot.h"
 
 /**
- * cpuhp_cpu_state - Per cpu hotplug state storage
+ * struct cpuhp_cpu_state - Per cpu hotplug state storage
  * @state:     The current cpu state
  * @target:    The target state
+ * @fail:      Current CPU hotplug callback state
  * @thread:    Pointer to the hotplug thread
  * @should_run:        Thread should execute
  * @rollback:  Perform a rollback
  * @single:    Single callback invocation
  * @bringup:   Single callback bringup or teardown selector
+ * @cpu:       CPU number
+ * @node:      Remote CPU node; for multi-instance, do a
+ *             single entry callback for install/remove
+ * @last:      For multi-instance rollback, remember how far we got
  * @cb_state:  The state for a single callback (install/uninstall)
  * @result:    Result of the operation
  * @done_up:   Signal completion to the issuer of the task for cpu-up
@@ -106,11 +111,12 @@ static inline void cpuhp_lock_release(bool bringup) { }
 #endif
 
 /**
- * cpuhp_step - Hotplug state machine step
+ * struct cpuhp_step - Hotplug state machine step
  * @name:      Name of the step
  * @startup:   Startup function of the step
  * @teardown:  Teardown function of the step
  * @cant_stop: Bringup/teardown can't be stopped at this step
+ * @multi_instance:    State has multiple instances which get added afterwards
  */
 struct cpuhp_step {
        const char              *name;
@@ -124,7 +130,9 @@ struct cpuhp_step {
                int             (*multi)(unsigned int cpu,
                                         struct hlist_node *node);
        } teardown;
+       /* private: */
        struct hlist_head       list;
+       /* public: */
        bool                    cant_stop;
        bool                    multi_instance;
 };
@@ -143,7 +151,7 @@ static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
 }
 
 /**
- * cpuhp_invoke_callback _ Invoke the callbacks for a given state
+ * cpuhp_invoke_callback - Invoke the callbacks for a given state
  * @cpu:       The cpu for which the callback should be invoked
  * @state:     The state to do callbacks for
  * @bringup:   True if the bringup callback should be invoked
@@ -151,6 +159,8 @@ static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
  * @lastp:     For multi-instance rollback, remember how far we got
  *
  * Called from cpu hotplug and from the state register machinery.
+ *
+ * Return: %0 on success or a negative errno code
  */
 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
                                 bool bringup, struct hlist_node *node,
@@ -682,6 +692,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 
        ret = cpuhp_invoke_callback_range(true, cpu, st, target);
        if (ret) {
+               pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
+                        ret, cpu, cpuhp_get_step(st->state)->name,
+                        st->state);
+
                cpuhp_reset_state(st, prev_state);
                if (can_rollback_cpu(st))
                        WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
@@ -1081,6 +1095,9 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 
        ret = cpuhp_invoke_callback_range(false, cpu, st, target);
        if (ret) {
+               pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
+                        ret, cpu, cpuhp_get_step(st->state)->name,
+                        st->state);
 
                cpuhp_reset_state(st, prev_state);
 
@@ -1183,6 +1200,8 @@ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
  * This function is meant to be used by device core cpu subsystem only.
  *
  * Other subsystems should use remove_cpu() instead.
+ *
+ * Return: %0 on success or a negative errno code
  */
 int cpu_device_down(struct device *dev)
 {
@@ -1395,6 +1414,8 @@ out:
  * This function is meant to be used by device core cpu subsystem only.
  *
  * Other subsystems should use add_cpu() instead.
+ *
+ * Return: %0 on success or a negative errno code
  */
 int cpu_device_up(struct device *dev)
 {
@@ -1420,6 +1441,8 @@ EXPORT_SYMBOL_GPL(add_cpu);
  * On some architectures like arm64, we can hibernate on any CPU, but on
  * wake up the CPU we hibernated on might be offline as a side effect of
  * using maxcpus= for example.
+ *
+ * Return: %0 on success or a negative errno code
  */
 int bringup_hibernate_cpu(unsigned int sleep_cpu)
 {
@@ -1976,6 +1999,7 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
 /**
  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
  * @state:             The state to setup
+ * @name:              Name of the step
  * @invoke:            If true, the startup function is invoked for cpus where
  *                     cpu state >= @state
  * @startup:           startup callback function
@@ -1984,9 +2008,9 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
  *                     added afterwards.
  *
  * The caller needs to hold cpus read locked while calling this function.
- * Returns:
+ * Return:
  *   On success:
- *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
+ *      Positive state number if @state is CPUHP_AP_ONLINE_DYN;
  *      0 for all other states
  *   On failure: proper (negative) error code
  */
@@ -2232,18 +2256,17 @@ int cpuhp_smt_enable(void)
 #endif
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
-static ssize_t show_cpuhp_state(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t state_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
 
        return sprintf(buf, "%d\n", st->state);
 }
-static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
+static DEVICE_ATTR_RO(state);
 
-static ssize_t write_cpuhp_target(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
+static ssize_t target_store(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
        struct cpuhp_step *sp;
@@ -2281,19 +2304,17 @@ out:
        return ret ? ret : count;
 }
 
-static ssize_t show_cpuhp_target(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+static ssize_t target_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
 
        return sprintf(buf, "%d\n", st->target);
 }
-static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
-
+static DEVICE_ATTR_RW(target);
 
-static ssize_t write_cpuhp_fail(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
+static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
        struct cpuhp_step *sp;
@@ -2342,15 +2363,15 @@ static ssize_t write_cpuhp_fail(struct device *dev,
        return count;
 }
 
-static ssize_t show_cpuhp_fail(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t fail_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
 
        return sprintf(buf, "%d\n", st->fail);
 }
 
-static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
+static DEVICE_ATTR_RW(fail);
 
 static struct attribute *cpuhp_cpu_attrs[] = {
        &dev_attr_state.attr,
@@ -2365,7 +2386,7 @@ static const struct attribute_group cpuhp_cpu_attr_group = {
        NULL
 };
 
-static ssize_t show_cpuhp_states(struct device *dev,
+static ssize_t states_show(struct device *dev,
                                 struct device_attribute *attr, char *buf)
 {
        ssize_t cur, res = 0;
@@ -2384,7 +2405,7 @@ static ssize_t show_cpuhp_states(struct device *dev,
        mutex_unlock(&cpuhp_state_mutex);
        return res;
 }
-static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
+static DEVICE_ATTR_RO(states);
 
 static struct attribute *cpuhp_cpu_root_attrs[] = {
        &dev_attr_states.attr,
@@ -2457,28 +2478,27 @@ static const char *smt_states[] = {
        [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
 };
 
-static ssize_t
-show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t control_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
 {
        const char *state = smt_states[cpu_smt_control];
 
        return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
 }
 
-static ssize_t
-store_smt_control(struct device *dev, struct device_attribute *attr,
-                 const char *buf, size_t count)
+static ssize_t control_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
 {
        return __store_smt_control(dev, attr, buf, count);
 }
-static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
+static DEVICE_ATTR_RW(control);
 
-static ssize_t
-show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t active_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
 {
        return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
 }
-static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
+static DEVICE_ATTR_RO(active);
 
 static struct attribute *cpuhp_smt_attrs[] = {
        &dev_attr_control.attr,
index f7e1d0e..246efc7 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/syscore_ops.h>
 
-static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+/*
+ * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT.
+ * Notifications for cpu_pm will be issued by the idle task itself, which can
+ * never block, IOW it requires using a raw_spinlock_t.
+ */
+static struct {
+       struct raw_notifier_head chain;
+       raw_spinlock_t lock;
+} cpu_pm_notifier = {
+       .chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain),
+       .lock  = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock),
+};
 
 static int cpu_pm_notify(enum cpu_pm_event event)
 {
        int ret;
 
        /*
-        * atomic_notifier_call_chain has a RCU read critical section, which
-        * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
-        * RCU know this.
+        * This introduces a RCU read critical section, which could be
+        * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
+        * this.
         */
        rcu_irq_enter_irqson();
-       ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
+       rcu_read_lock();
+       ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
+       rcu_read_unlock();
        rcu_irq_exit_irqson();
 
        return notifier_to_errno(ret);
@@ -33,10 +46,13 @@ static int cpu_pm_notify(enum cpu_pm_event event)
 
 static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
 {
+       unsigned long flags;
        int ret;
 
        rcu_irq_enter_irqson();
-       ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
+       raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
+       ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
+       raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
        rcu_irq_exit_irqson();
 
        return notifier_to_errno(ret);
@@ -49,12 +65,17 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev
  * Add a driver to a list of drivers that are notified about
  * CPU and CPU cluster low power entry and exit.
  *
- * This function may sleep, and has the same return conditions as
- * raw_notifier_chain_register.
+ * This function has the same return conditions as raw_notifier_chain_register.
  */
 int cpu_pm_register_notifier(struct notifier_block *nb)
 {
-       return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+       unsigned long flags;
+       int ret;
+
+       raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
+       ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb);
+       raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
 
@@ -64,12 +85,17 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
  *
  * Remove a driver from the CPU PM notifier list.
  *
- * This function may sleep, and has the same return conditions as
- * raw_notifier_chain_unregister.
+ * This function has the same return conditions as raw_notifier_chain_unregister.
  */
 int cpu_pm_unregister_notifier(struct notifier_block *nb)
 {
-       return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+       unsigned long flags;
+       int ret;
+
+       raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
+       ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb);
+       raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
 
index e6fd2b3..f784e08 100644 (file)
@@ -286,13 +286,13 @@ struct cred *prepare_creds(void)
        new->security = NULL;
 #endif
 
-       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
-               goto error;
-
        new->ucounts = get_ucounts(new->ucounts);
        if (!new->ucounts)
                goto error;
 
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+               goto error;
+
        validate_creds(new);
        return new;
 
@@ -753,13 +753,13 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
 #ifdef CONFIG_SECURITY
        new->security = NULL;
 #endif
-       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
-               goto error;
-
        new->ucounts = get_ucounts(new->ucounts);
        if (!new->ucounts)
                goto error;
 
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+               goto error;
+
        put_cred(old);
        validate_creds(new);
        return new;
index 4649170..1cb1f9b 100644 (file)
@@ -11917,6 +11917,37 @@ again:
        return gctx;
 }
 
+static bool
+perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
+{
+       unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
+       bool is_capable = perfmon_capable();
+
+       if (attr->sigtrap) {
+               /*
+                * perf_event_attr::sigtrap sends signals to the other task.
+                * Require the current task to also have CAP_KILL.
+                */
+               rcu_read_lock();
+               is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
+               rcu_read_unlock();
+
+               /*
+                * If the required capabilities aren't available, checks for
+                * ptrace permissions: upgrade to ATTACH, since sending signals
+                * can effectively change the target task.
+                */
+               ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
+       }
+
+       /*
+        * Preserve ptrace permission check for backwards compatibility. The
+        * ptrace check also includes checks that the current task and other
+        * task have matching uids, and is therefore not done here explicitly.
+        */
+       return is_capable || ptrace_may_access(task, ptrace_mode);
+}
+
 /**
  * sys_perf_event_open - open a performance event, associate it to a task/cpu
  *
@@ -12163,15 +12194,13 @@ SYSCALL_DEFINE5(perf_event_open,
                        goto err_file;
 
                /*
-                * Preserve ptrace permission check for backwards compatibility.
-                *
                 * We must hold exec_update_lock across this and any potential
                 * perf_install_in_context() call for this new event to
                 * serialize against exec() altering our credentials (and the
                 * perf_event_exit_task() that could imply).
                 */
                err = -EACCES;
-               if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+               if (!perf_check_permission(&attr, task))
                        goto err_cred;
        }
 
index 8359734..f32320a 100644 (file)
@@ -568,7 +568,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
        if (!cpu_events)
                return (void __percpu __force *)ERR_PTR(-ENOMEM);
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                bp = perf_event_create_kernel_counter(attr, cpu, NULL,
                                                      triggered, context);
@@ -579,7 +579,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
 
                per_cpu(*cpu_events, cpu) = bp;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (likely(!err))
                return cpu_events;
index 9a89e7f..91a43e5 100644 (file)
@@ -777,7 +777,7 @@ void __noreturn do_exit(long code)
                schedule();
        }
 
-       io_uring_files_cancel(tsk->files);
+       io_uring_files_cancel();
        exit_signals(tsk);  /* sets PF_EXITING */
 
        /* sync mm's RSS info before statistics gathering */
index bc94b2c..757301c 100644 (file)
@@ -446,6 +446,7 @@ void put_task_stack(struct task_struct *tsk)
 
 void free_task(struct task_struct *tsk)
 {
+       release_user_cpus_ptr(tsk);
        scs_release(tsk);
 
 #ifndef CONFIG_THREAD_INFO_IN_TASK
@@ -828,10 +829,10 @@ void __init fork_init(void)
        for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
 
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK));
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC,      RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE,   RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK,    RLIM_INFINITY);
 
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -924,6 +925,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 #endif
        if (orig->cpus_ptr == &orig->cpus_mask)
                tsk->cpus_ptr = &tsk->cpus_mask;
+       dup_user_cpus_ptr(tsk, orig, node);
 
        /*
         * One for the user space visible state that goes away when reaped.
index 2ecb075..e7b4c61 100644 (file)
@@ -179,7 +179,7 @@ struct futex_pi_state {
        /*
         * The PI object:
         */
-       struct rt_mutex pi_mutex;
+       struct rt_mutex_base pi_mutex;
 
        struct task_struct *owner;
        refcount_t refcount;
@@ -197,6 +197,8 @@ struct futex_pi_state {
  * @rt_waiter:         rt_waiter storage for use with requeue_pi
  * @requeue_pi_key:    the requeue_pi target futex key
  * @bitset:            bitset for the optional bitmasked wakeup
+ * @requeue_state:     State field for futex_requeue_pi()
+ * @requeue_wait:      RCU wait for futex_requeue_pi() (RT only)
  *
  * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
  * we can wake only the relevant ones (hashed queues may be shared).
@@ -219,12 +221,68 @@ struct futex_q {
        struct rt_mutex_waiter *rt_waiter;
        union futex_key *requeue_pi_key;
        u32 bitset;
+       atomic_t requeue_state;
+#ifdef CONFIG_PREEMPT_RT
+       struct rcuwait requeue_wait;
+#endif
 } __randomize_layout;
 
+/*
+ * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an
+ * underlying rtmutex. The task which is about to be requeued could have
+ * just woken up (timeout, signal). After the wake up the task has to
+ * acquire hash bucket lock, which is held by the requeue code.  As a task
+ * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking
+ * and the hash bucket lock blocking would collide and corrupt state.
+ *
+ * On !PREEMPT_RT this is not a problem and everything could be serialized
+ * on hash bucket lock, but aside of having the benefit of common code,
+ * this allows to avoid doing the requeue when the task is already on the
+ * way out and taking the hash bucket lock of the original uaddr1 when the
+ * requeue has been completed.
+ *
+ * The following state transitions are valid:
+ *
+ * On the waiter side:
+ *   Q_REQUEUE_PI_NONE         -> Q_REQUEUE_PI_IGNORE
+ *   Q_REQUEUE_PI_IN_PROGRESS  -> Q_REQUEUE_PI_WAIT
+ *
+ * On the requeue side:
+ *   Q_REQUEUE_PI_NONE         -> Q_REQUEUE_PI_INPROGRESS
+ *   Q_REQUEUE_PI_IN_PROGRESS  -> Q_REQUEUE_PI_DONE/LOCKED
+ *   Q_REQUEUE_PI_IN_PROGRESS  -> Q_REQUEUE_PI_NONE (requeue failed)
+ *   Q_REQUEUE_PI_WAIT         -> Q_REQUEUE_PI_DONE/LOCKED
+ *   Q_REQUEUE_PI_WAIT         -> Q_REQUEUE_PI_IGNORE (requeue failed)
+ *
+ * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this
+ * signals that the waiter is already on the way out. It also means that
+ * the waiter is still on the 'wait' futex, i.e. uaddr1.
+ *
+ * The waiter side signals early wakeup to the requeue side either through
+ * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending
+ * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately
+ * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT,
+ * which means the wakeup is interleaving with a requeue in progress it has
+ * to wait for the requeue side to change the state. Either to DONE/LOCKED
+ * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex
+ * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by
+ * the requeue side when the requeue attempt failed via deadlock detection
+ * and therefore the waiter q is still on the uaddr1 futex.
+ */
+enum {
+       Q_REQUEUE_PI_NONE               =  0,
+       Q_REQUEUE_PI_IGNORE,
+       Q_REQUEUE_PI_IN_PROGRESS,
+       Q_REQUEUE_PI_WAIT,
+       Q_REQUEUE_PI_DONE,
+       Q_REQUEUE_PI_LOCKED,
+};
+
 static const struct futex_q futex_q_init = {
        /* list gets initialized in queue_me()*/
-       .key = FUTEX_KEY_INIT,
-       .bitset = FUTEX_BITSET_MATCH_ANY
+       .key            = FUTEX_KEY_INIT,
+       .bitset         = FUTEX_BITSET_MATCH_ANY,
+       .requeue_state  = ATOMIC_INIT(Q_REQUEUE_PI_NONE),
 };
 
 /*
@@ -1299,27 +1357,6 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
        return 0;
 }
 
-static int lookup_pi_state(u32 __user *uaddr, u32 uval,
-                          struct futex_hash_bucket *hb,
-                          union futex_key *key, struct futex_pi_state **ps,
-                          struct task_struct **exiting)
-{
-       struct futex_q *top_waiter = futex_top_waiter(hb, key);
-
-       /*
-        * If there is a waiter on that futex, validate it and
-        * attach to the pi_state when the validation succeeds.
-        */
-       if (top_waiter)
-               return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
-
-       /*
-        * We are the first waiter - try to look up the owner based on
-        * @uval and attach to it.
-        */
-       return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
-}
-
 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
 {
        int err;
@@ -1354,7 +1391,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
  *  -  1 - acquired the lock;
  *  - <0 - error
  *
- * The hb->lock and futex_key refs shall be held by the caller.
+ * The hb->lock must be held by the caller.
  *
  * @exiting is only set when the return value is -EBUSY. If so, this holds
  * a refcount on the exiting task on return and the caller needs to drop it
@@ -1493,11 +1530,11 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
  */
 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
 {
-       u32 curval, newval;
        struct rt_mutex_waiter *top_waiter;
        struct task_struct *new_owner;
        bool postunlock = false;
-       DEFINE_WAKE_Q(wake_q);
+       DEFINE_RT_WAKE_Q(wqh);
+       u32 curval, newval;
        int ret = 0;
 
        top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
@@ -1549,14 +1586,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
                 * not fail.
                 */
                pi_state_update_owner(pi_state, new_owner);
-               postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+               postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh);
        }
 
 out_unlock:
        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 
        if (postunlock)
-               rt_mutex_postunlock(&wake_q);
+               rt_mutex_postunlock(&wqh);
 
        return ret;
 }
@@ -1793,6 +1830,108 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
        q->key = *key2;
 }
 
+static inline bool futex_requeue_pi_prepare(struct futex_q *q,
+                                           struct futex_pi_state *pi_state)
+{
+       int old, new;
+
+       /*
+        * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has
+        * already set Q_REQUEUE_PI_IGNORE to signal that requeue should
+        * ignore the waiter.
+        */
+       old = atomic_read_acquire(&q->requeue_state);
+       do {
+               if (old == Q_REQUEUE_PI_IGNORE)
+                       return false;
+
+               /*
+                * futex_proxy_trylock_atomic() might have set it to
+                * IN_PROGRESS and a interleaved early wake to WAIT.
+                *
+                * It was considered to have an extra state for that
+                * trylock, but that would just add more conditionals
+                * all over the place for a dubious value.
+                */
+               if (old != Q_REQUEUE_PI_NONE)
+                       break;
+
+               new = Q_REQUEUE_PI_IN_PROGRESS;
+       } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+       q->pi_state = pi_state;
+       return true;
+}
+
+static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
+{
+       int old, new;
+
+       old = atomic_read_acquire(&q->requeue_state);
+       do {
+               if (old == Q_REQUEUE_PI_IGNORE)
+                       return;
+
+               if (locked >= 0) {
+                       /* Requeue succeeded. Set DONE or LOCKED */
+                       WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS &&
+                                    old != Q_REQUEUE_PI_WAIT);
+                       new = Q_REQUEUE_PI_DONE + locked;
+               } else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
+                       /* Deadlock, no early wakeup interleave */
+                       new = Q_REQUEUE_PI_NONE;
+               } else {
+                       /* Deadlock, early wakeup interleave. */
+                       WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT);
+                       new = Q_REQUEUE_PI_IGNORE;
+               }
+       } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+#ifdef CONFIG_PREEMPT_RT
+       /* If the waiter interleaved with the requeue let it know */
+       if (unlikely(old == Q_REQUEUE_PI_WAIT))
+               rcuwait_wake_up(&q->requeue_wait);
+#endif
+}
+
+static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
+{
+       int old, new;
+
+       old = atomic_read_acquire(&q->requeue_state);
+       do {
+               /* Is requeue done already? */
+               if (old >= Q_REQUEUE_PI_DONE)
+                       return old;
+
+               /*
+                * If not done, then tell the requeue code to either ignore
+                * the waiter or to wake it up once the requeue is done.
+                */
+               new = Q_REQUEUE_PI_WAIT;
+               if (old == Q_REQUEUE_PI_NONE)
+                       new = Q_REQUEUE_PI_IGNORE;
+       } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+       /* If the requeue was in progress, wait for it to complete */
+       if (old == Q_REQUEUE_PI_IN_PROGRESS) {
+#ifdef CONFIG_PREEMPT_RT
+               rcuwait_wait_event(&q->requeue_wait,
+                                  atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT,
+                                  TASK_UNINTERRUPTIBLE);
+#else
+               (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT);
+#endif
+       }
+
+       /*
+        * Requeue is now either prohibited or complete. Reread state
+        * because during the wait above it might have changed. Nothing
+        * will modify q->requeue_state after this point.
+        */
+       return atomic_read(&q->requeue_state);
+}
+
 /**
  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
  * @q:         the futex_q
@@ -1820,6 +1959,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
 
        q->lock_ptr = &hb->lock;
 
+       /* Signal locked state to the waiter */
+       futex_requeue_pi_complete(q, 1);
        wake_up_state(q->task, TASK_NORMAL);
 }
 
@@ -1879,10 +2020,21 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
        if (!top_waiter)
                return 0;
 
+       /*
+        * Ensure that this is a waiter sitting in futex_wait_requeue_pi()
+        * and waiting on the 'waitqueue' futex which is always !PI.
+        */
+       if (!top_waiter->rt_waiter || top_waiter->pi_state)
+               ret = -EINVAL;
+
        /* Ensure we requeue to the expected futex. */
        if (!match_futex(top_waiter->requeue_pi_key, key2))
                return -EINVAL;
 
+       /* Ensure that this does not race against an early wakeup */
+       if (!futex_requeue_pi_prepare(top_waiter, NULL))
+               return -EAGAIN;
+
        /*
         * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
         * the contended case or if set_waiters is 1.  The pi_state is returned
@@ -1892,8 +2044,22 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
        ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
                                   exiting, set_waiters);
        if (ret == 1) {
+               /* Dequeue, wake up and update top_waiter::requeue_state */
                requeue_pi_wake_futex(top_waiter, key2, hb2);
                return vpid;
+       } else if (ret < 0) {
+               /* Rewind top_waiter::requeue_state */
+               futex_requeue_pi_complete(top_waiter, ret);
+       } else {
+               /*
+                * futex_lock_pi_atomic() did not acquire the user space
+                * futex, but managed to establish the proxy lock and pi
+                * state. top_waiter::requeue_state cannot be fixed up here
+                * because the waiter is not enqueued on the rtmutex
+                * yet. This is handled at the callsite depending on the
+                * result of rt_mutex_start_proxy_lock() which is
+                * guaranteed to be reached with this function returning 0.
+                */
        }
        return ret;
 }
@@ -1947,24 +2113,36 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
                if (uaddr1 == uaddr2)
                        return -EINVAL;
 
+               /*
+                * futex_requeue() allows the caller to define the number
+                * of waiters to wake up via the @nr_wake argument. With
+                * REQUEUE_PI, waking up more than one waiter is creating
+                * more problems than it solves. Waking up a waiter makes
+                * only sense if the PI futex @uaddr2 is uncontended as
+                * this allows the requeue code to acquire the futex
+                * @uaddr2 before waking the waiter. The waiter can then
+                * return to user space without further action. A secondary
+                * wakeup would just make the futex_wait_requeue_pi()
+                * handling more complex, because that code would have to
+                * look up pi_state and do more or less all the handling
+                * which the requeue code has to do for the to be requeued
+                * waiters. So restrict the number of waiters to wake to
+                * one, and only wake it up when the PI futex is
+                * uncontended. Otherwise requeue it and let the unlock of
+                * the PI futex handle the wakeup.
+                *
+                * All REQUEUE_PI users, e.g. pthread_cond_signal() and
+                * pthread_cond_broadcast() must use nr_wake=1.
+                */
+               if (nr_wake != 1)
+                       return -EINVAL;
+
                /*
                 * requeue_pi requires a pi_state, try to allocate it now
                 * without any locks in case it fails.
                 */
                if (refill_pi_state_cache())
                        return -ENOMEM;
-               /*
-                * requeue_pi must wake as many tasks as it can, up to nr_wake
-                * + nr_requeue, since it acquires the rt_mutex prior to
-                * returning to userspace, so as to not leave the rt_mutex with
-                * waiters and no owner.  However, second and third wake-ups
-                * cannot be predicted as they involve race conditions with the
-                * first wake and a fault while looking up the pi_state.  Both
-                * pthread_cond_signal() and pthread_cond_broadcast() should
-                * use nr_wake=1.
-                */
-               if (nr_wake != 1)
-                       return -EINVAL;
        }
 
 retry:
@@ -2014,7 +2192,7 @@ retry_private:
                }
        }
 
-       if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
+       if (requeue_pi) {
                struct task_struct *exiting = NULL;
 
                /*
@@ -2022,6 +2200,8 @@ retry_private:
                 * intend to requeue waiters, force setting the FUTEX_WAITERS
                 * bit.  We force this here where we are able to easily handle
                 * faults rather in the requeue loop below.
+                *
+                * Updates topwaiter::requeue_state if a top waiter exists.
                 */
                ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
                                                 &key2, &pi_state,
@@ -2031,28 +2211,52 @@ retry_private:
                 * At this point the top_waiter has either taken uaddr2 or is
                 * waiting on it.  If the former, then the pi_state will not
                 * exist yet, look it up one more time to ensure we have a
-                * reference to it. If the lock was taken, ret contains the
-                * vpid of the top waiter task.
+                * reference to it. If the lock was taken, @ret contains the
+                * VPID of the top waiter task.
                 * If the lock was not taken, we have pi_state and an initial
                 * refcount on it. In case of an error we have nothing.
+                *
+                * The top waiter's requeue_state is up to date:
+                *
+                *  - If the lock was acquired atomically (ret > 0), then
+                *    the state is Q_REQUEUE_PI_LOCKED.
+                *
+                *  - If the trylock failed with an error (ret < 0) then
+                *    the state is either Q_REQUEUE_PI_NONE, i.e. "nothing
+                *    happened", or Q_REQUEUE_PI_IGNORE when there was an
+                *    interleaved early wakeup.
+                *
+                *  - If the trylock did not succeed (ret == 0) then the
+                *    state is either Q_REQUEUE_PI_IN_PROGRESS or
+                *    Q_REQUEUE_PI_WAIT if an early wakeup interleaved.
+                *    This will be cleaned up in the loop below, which
+                *    cannot fail because futex_proxy_trylock_atomic() did
+                *    the same sanity checks for requeue_pi as the loop
+                *    below does.
                 */
                if (ret > 0) {
                        WARN_ON(pi_state);
                        task_count++;
                        /*
-                        * If we acquired the lock, then the user space value
-                        * of uaddr2 should be vpid. It cannot be changed by
-                        * the top waiter as it is blocked on hb2 lock if it
-                        * tries to do so. If something fiddled with it behind
-                        * our back the pi state lookup might unearth it. So
-                        * we rather use the known value than rereading and
-                        * handing potential crap to lookup_pi_state.
+                        * If futex_proxy_trylock_atomic() acquired the
+                        * user space futex, then the user space value
+                        * @uaddr2 has been set to the @hb1's top waiter
+                        * task VPID. This task is guaranteed to be alive
+                        * and cannot be exiting because it is either
+                        * sleeping or blocked on @hb2 lock.
+                        *
+                        * The @uaddr2 futex cannot have waiters either as
+                        * otherwise futex_proxy_trylock_atomic() would not
+                        * have succeeded.
                         *
-                        * If that call succeeds then we have pi_state and an
-                        * initial refcount on it.
+                        * In order to requeue waiters to @hb2, pi state is
+                        * required. Hand in the VPID value (@ret) and
+                        * allocate PI state with an initial refcount on
+                        * it.
                         */
-                       ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
-                                             &pi_state, &exiting);
+                       ret = attach_to_pi_owner(uaddr2, ret, &key2, &pi_state,
+                                                &exiting);
+                       WARN_ON(ret);
                }
 
                switch (ret) {
@@ -2060,7 +2264,10 @@ retry_private:
                        /* We hold a reference on the pi state. */
                        break;
 
-                       /* If the above failed, then pi_state is NULL */
+               /*
+                * If the above failed, then pi_state is NULL and
+                * waiter::requeue_state is correct.
+                */
                case -EFAULT:
                        double_unlock_hb(hb1, hb2);
                        hb_waiters_dec(hb2);
@@ -2112,18 +2319,17 @@ retry_private:
                        break;
                }
 
-               /*
-                * Wake nr_wake waiters.  For requeue_pi, if we acquired the
-                * lock, we already woke the top_waiter.  If not, it will be
-                * woken by futex_unlock_pi().
-                */
-               if (++task_count <= nr_wake && !requeue_pi) {
-                       mark_wake_futex(&wake_q, this);
+               /* Plain futexes just wake or requeue and are done */
+               if (!requeue_pi) {
+                       if (++task_count <= nr_wake)
+                               mark_wake_futex(&wake_q, this);
+                       else
+                               requeue_futex(this, hb1, hb2, &key2);
                        continue;
                }
 
                /* Ensure we requeue to the expected futex for requeue_pi. */
-               if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
+               if (!match_futex(this->requeue_pi_key, &key2)) {
                        ret = -EINVAL;
                        break;
                }
@@ -2131,54 +2337,67 @@ retry_private:
                /*
                 * Requeue nr_requeue waiters and possibly one more in the case
                 * of requeue_pi if we couldn't acquire the lock atomically.
+                *
+                * Prepare the waiter to take the rt_mutex. Take a refcount
+                * on the pi_state and store the pointer in the futex_q
+                * object of the waiter.
                 */
-               if (requeue_pi) {
+               get_pi_state(pi_state);
+
+               /* Don't requeue when the waiter is already on the way out. */
+               if (!futex_requeue_pi_prepare(this, pi_state)) {
                        /*
-                        * Prepare the waiter to take the rt_mutex. Take a
-                        * refcount on the pi_state and store the pointer in
-                        * the futex_q object of the waiter.
+                        * Early woken waiter signaled that it is on the
+                        * way out. Drop the pi_state reference and try the
+                        * next waiter. @this->pi_state is still NULL.
                         */
-                       get_pi_state(pi_state);
-                       this->pi_state = pi_state;
-                       ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
-                                                       this->rt_waiter,
-                                                       this->task);
-                       if (ret == 1) {
-                               /*
-                                * We got the lock. We do neither drop the
-                                * refcount on pi_state nor clear
-                                * this->pi_state because the waiter needs the
-                                * pi_state for cleaning up the user space
-                                * value. It will drop the refcount after
-                                * doing so.
-                                */
-                               requeue_pi_wake_futex(this, &key2, hb2);
-                               continue;
-                       } else if (ret) {
-                               /*
-                                * rt_mutex_start_proxy_lock() detected a
-                                * potential deadlock when we tried to queue
-                                * that waiter. Drop the pi_state reference
-                                * which we took above and remove the pointer
-                                * to the state from the waiters futex_q
-                                * object.
-                                */
-                               this->pi_state = NULL;
-                               put_pi_state(pi_state);
-                               /*
-                                * We stop queueing more waiters and let user
-                                * space deal with the mess.
-                                */
-                               break;
-                       }
+                       put_pi_state(pi_state);
+                       continue;
+               }
+
+               ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
+                                               this->rt_waiter,
+                                               this->task);
+
+               if (ret == 1) {
+                       /*
+                        * We got the lock. We do neither drop the refcount
+                        * on pi_state nor clear this->pi_state because the
+                        * waiter needs the pi_state for cleaning up the
+                        * user space value. It will drop the refcount
+                        * after doing so. this::requeue_state is updated
+                        * in the wakeup as well.
+                        */
+                       requeue_pi_wake_futex(this, &key2, hb2);
+                       task_count++;
+               } else if (!ret) {
+                       /* Waiter is queued, move it to hb2 */
+                       requeue_futex(this, hb1, hb2, &key2);
+                       futex_requeue_pi_complete(this, 0);
+                       task_count++;
+               } else {
+                       /*
+                        * rt_mutex_start_proxy_lock() detected a potential
+                        * deadlock when we tried to queue that waiter.
+                        * Drop the pi_state reference which we took above
+                        * and remove the pointer to the state from the
+                        * waiters futex_q object.
+                        */
+                       this->pi_state = NULL;
+                       put_pi_state(pi_state);
+                       futex_requeue_pi_complete(this, ret);
+                       /*
+                        * We stop queueing more waiters and let user space
+                        * deal with the mess.
+                        */
+                       break;
                }
-               requeue_futex(this, hb1, hb2, &key2);
        }
 
        /*
-        * We took an extra initial reference to the pi_state either
-        * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
-        * need to drop it here again.
+        * We took an extra initial reference to the pi_state either in
+        * futex_proxy_trylock_atomic() or in attach_to_pi_owner(). We need
+        * to drop it here again.
         */
        put_pi_state(pi_state);
 
@@ -2357,7 +2576,7 @@ static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
         * Modifying pi_state _before_ the user space value would leave the
         * pi_state in an inconsistent state when we fault here, because we
         * need to drop the locks to handle the fault. This might be observed
-        * in the PID check in lookup_pi_state.
+        * in the PID checks when attaching to PI state .
         */
 retry:
        if (!argowner) {
@@ -2614,8 +2833,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
  *
  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
  * compare it with the expected value.  Handle atomic faults internally.
- * Return with the hb lock held and a q.key reference on success, and unlocked
- * with no q.key reference on failure.
+ * Return with the hb lock held on success, and unlocked on failure.
  *
  * Return:
  *  -  0 - uaddr contains val and hb has been locked;
@@ -2693,8 +2911,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
                               current->timer_slack_ns);
 retry:
        /*
-        * Prepare to wait on uaddr. On success, holds hb lock and increments
-        * q.key refs.
+        * Prepare to wait on uaddr. On success, it holds hb->lock and q
+        * is initialized.
         */
        ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
        if (ret)
@@ -2705,7 +2923,6 @@ retry:
 
        /* If we were woken (and unqueued), we succeeded, whatever. */
        ret = 0;
-       /* unqueue_me() drops q.key ref */
        if (!unqueue_me(&q))
                goto out;
        ret = -ETIMEDOUT;
@@ -3072,27 +3289,22 @@ pi_faulted:
 }
 
 /**
- * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
+ * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex
  * @hb:                the hash_bucket futex_q was original enqueued on
  * @q:         the futex_q woken while waiting to be requeued
- * @key2:      the futex_key of the requeue target futex
  * @timeout:   the timeout associated with the wait (NULL if none)
  *
- * Detect if the task was woken on the initial futex as opposed to the requeue
- * target futex.  If so, determine if it was a timeout or a signal that caused
- * the wakeup and return the appropriate error code to the caller.  Must be
- * called with the hb lock held.
+ * Determine the cause for the early wakeup.
  *
  * Return:
- *  -  0 = no early wakeup detected;
- *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
+ *  -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR
  */
 static inline
 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
-                                  struct futex_q *q, union futex_key *key2,
+                                  struct futex_q *q,
                                   struct hrtimer_sleeper *timeout)
 {
-       int ret = 0;
+       int ret;
 
        /*
         * With the hb lock held, we avoid races while we process the wakeup.
@@ -3101,22 +3313,21 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
         * It can't be requeued from uaddr2 to something else since we don't
         * support a PI aware source futex for requeue.
         */
-       if (!match_futex(&q->key, key2)) {
-               WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
-               /*
-                * We were woken prior to requeue by a timeout or a signal.
-                * Unqueue the futex_q and determine which it was.
-                */
-               plist_del(&q->list, &hb->chain);
-               hb_waiters_dec(hb);
+       WARN_ON_ONCE(&hb->lock != q->lock_ptr);
 
-               /* Handle spurious wakeups gracefully */
-               ret = -EWOULDBLOCK;
-               if (timeout && !timeout->task)
-                       ret = -ETIMEDOUT;
-               else if (signal_pending(current))
-                       ret = -ERESTARTNOINTR;
-       }
+       /*
+        * We were woken prior to requeue by a timeout or a signal.
+        * Unqueue the futex_q and determine which it was.
+        */
+       plist_del(&q->list, &hb->chain);
+       hb_waiters_dec(hb);
+
+       /* Handle spurious wakeups gracefully */
+       ret = -EWOULDBLOCK;
+       if (timeout && !timeout->task)
+               ret = -ETIMEDOUT;
+       else if (signal_pending(current))
+               ret = -ERESTARTNOINTR;
        return ret;
 }
 
@@ -3169,6 +3380,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        struct futex_hash_bucket *hb;
        union futex_key key2 = FUTEX_KEY_INIT;
        struct futex_q q = futex_q_init;
+       struct rt_mutex_base *pi_mutex;
        int res, ret;
 
        if (!IS_ENABLED(CONFIG_FUTEX_PI))
@@ -3198,8 +3410,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        q.requeue_pi_key = &key2;
 
        /*
-        * Prepare to wait on uaddr. On success, increments q.key (key1) ref
-        * count.
+        * Prepare to wait on uaddr. On success, it holds hb->lock and q
+        * is initialized.
         */
        ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
        if (ret)
@@ -3218,32 +3430,22 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        /* Queue the futex_q, drop the hb lock, wait for wakeup. */
        futex_wait_queue_me(hb, &q, to);
 
-       spin_lock(&hb->lock);
-       ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-       spin_unlock(&hb->lock);
-       if (ret)
-               goto out;
-
-       /*
-        * In order for us to be here, we know our q.key == key2, and since
-        * we took the hb->lock above, we also know that futex_requeue() has
-        * completed and we no longer have to concern ourselves with a wakeup
-        * race with the atomic proxy lock acquisition by the requeue code. The
-        * futex_requeue dropped our key1 reference and incremented our key2
-        * reference count.
-        */
+       switch (futex_requeue_pi_wakeup_sync(&q)) {
+       case Q_REQUEUE_PI_IGNORE:
+               /* The waiter is still on uaddr1 */
+               spin_lock(&hb->lock);
+               ret = handle_early_requeue_pi_wakeup(hb, &q, to);
+               spin_unlock(&hb->lock);
+               break;
 
-       /*
-        * Check if the requeue code acquired the second futex for us and do
-        * any pertinent fixup.
-        */
-       if (!q.rt_waiter) {
+       case Q_REQUEUE_PI_LOCKED:
+               /* The requeue acquired the lock */
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
                        ret = fixup_owner(uaddr2, &q, true);
                        /*
-                        * Drop the reference to the pi state which
-                        * the requeue_pi() code acquired for us.
+                        * Drop the reference to the pi state which the
+                        * requeue_pi() code acquired for us.
                         */
                        put_pi_state(q.pi_state);
                        spin_unlock(q.lock_ptr);
@@ -3253,18 +3455,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                         */
                        ret = ret < 0 ? ret : 0;
                }
-       } else {
-               struct rt_mutex *pi_mutex;
+               break;
 
-               /*
-                * We have been woken up by futex_unlock_pi(), a timeout, or a
-                * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
-                * the pi_state.
-                */
-               WARN_ON(!q.pi_state);
+       case Q_REQUEUE_PI_DONE:
+               /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */
                pi_mutex = &q.pi_state->pi_mutex;
                ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
 
+               /* Current is not longer pi_blocked_on */
                spin_lock(q.lock_ptr);
                if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
                        ret = 0;
@@ -3284,17 +3482,21 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
 
                unqueue_me_pi(&q);
                spin_unlock(q.lock_ptr);
-       }
 
-       if (ret == -EINTR) {
-               /*
-                * We've already been requeued, but cannot restart by calling
-                * futex_lock_pi() directly. We could restart this syscall, but
-                * it would detect that the user space "val" changed and return
-                * -EWOULDBLOCK.  Save the overhead of the restart and return
-                * -EWOULDBLOCK directly.
-                */
-               ret = -EWOULDBLOCK;
+               if (ret == -EINTR) {
+                       /*
+                        * We've already been requeued, but cannot restart
+                        * by calling futex_lock_pi() directly. We could
+                        * restart this syscall, but it would detect that
+                        * the user space "val" changed and return
+                        * -EWOULDBLOCK.  Save the overhead of the restart
+                        * and return -EWOULDBLOCK directly.
+                        */
+                       ret = -EWOULDBLOCK;
+               }
+               break;
+       default:
+               BUG();
        }
 
 out:
index 4d89ad4..f7ff891 100644 (file)
@@ -355,7 +355,7 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
                goto fail_npresmsk;
 
        /* Stabilize the cpumasks */
-       get_online_cpus();
+       cpus_read_lock();
        build_node_to_cpumask(node_to_cpumask);
 
        /* Spread on present CPUs starting from affd->pre_vectors */
@@ -384,7 +384,7 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
                nr_others = ret;
 
  fail_build_affinity:
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (ret >= 0)
                WARN_ON(nr_present + nr_others < numvecs);
@@ -505,9 +505,9 @@ unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
        if (affd->calc_sets) {
                set_vecs = maxvec - resv;
        } else {
-               get_online_cpus();
+               cpus_read_lock();
                set_vecs = cpumask_weight(cpu_possible_mask);
-               put_online_cpus();
+               cpus_read_unlock();
        }
 
        return resv + min(set_vecs, maxvec - resv);
index 7f04c7d..a98bcfc 100644 (file)
@@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
        } else {
                switch (__irq_startup_managed(desc, aff, force)) {
                case IRQ_STARTUP_NORMAL:
+                       if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
+                               irq_setup_affinity(desc);
                        ret = __irq_startup(desc);
-                       irq_setup_affinity(desc);
+                       if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
+                               irq_setup_affinity(desc);
                        break;
                case IRQ_STARTUP_MANAGED:
                        irq_do_set_affinity(d, aff, false);
index 02236b1..39a41c5 100644 (file)
@@ -166,7 +166,7 @@ void irq_migrate_all_off_this_cpu(void)
                raw_spin_unlock(&desc->lock);
 
                if (affinity_broken) {
-                       pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
+                       pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
                                            irq, smp_processor_id());
                }
        }
index f8f23af..cc7cdd2 100644 (file)
@@ -240,9 +240,8 @@ irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
                       void __iomem *reg_base, irq_flow_handler_t handler)
 {
        struct irq_chip_generic *gc;
-       unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
 
-       gc = kzalloc(sz, GFP_KERNEL);
+       gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
        if (gc) {
                irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
                                      handler);
@@ -288,8 +287,11 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
 {
        struct irq_domain_chip_generic *dgc;
        struct irq_chip_generic *gc;
-       int numchips, sz, i;
        unsigned long flags;
+       int numchips, i;
+       size_t dgc_sz;
+       size_t gc_sz;
+       size_t sz;
        void *tmp;
 
        if (d->gc)
@@ -300,8 +302,9 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
                return -EINVAL;
 
        /* Allocate a pointer, generic chip and chiptypes for each chip */
-       sz = sizeof(*dgc) + numchips * sizeof(gc);
-       sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
+       gc_sz = struct_size(gc, chip_types, num_ct);
+       dgc_sz = struct_size(dgc, gc, numchips);
+       sz = dgc_sz + numchips * gc_sz;
 
        tmp = dgc = kzalloc(sz, GFP_KERNEL);
        if (!dgc)
@@ -314,7 +317,7 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
        d->gc = dgc;
 
        /* Calc pointer to the first generic chip */
-       tmp += sizeof(*dgc) + numchips * sizeof(gc);
+       tmp += dgc_sz;
        for (i = 0; i < numchips; i++) {
                /* Store the pointer to the generic chip */
                dgc->gc[i] = gc = tmp;
@@ -331,7 +334,7 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
                list_add_tail(&gc->list, &gc_list);
                raw_spin_unlock_irqrestore(&gc_lock, flags);
                /* Calc pointer to the next generic chip */
-               tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
+               tmp += gc_sz;
        }
        return 0;
 }
index 52f11c7..08ce7da 100644 (file)
 /**
  * irq_reserve_ipi() - Setup an IPI to destination cpumask
  * @domain:    IPI domain
- * @dest:      cpumask of cpus which can receive the IPI
+ * @dest:      cpumask of CPUs which can receive the IPI
  *
  * Allocate a virq that can be used to send IPI to any CPU in dest mask.
  *
- * On success it'll return linux irq number and error code on failure
+ * Return: Linux IRQ number on success or error code on failure
  */
 int irq_reserve_ipi(struct irq_domain *domain,
                             const struct cpumask *dest)
@@ -104,13 +104,13 @@ free_descs:
 
 /**
  * irq_destroy_ipi() - unreserve an IPI that was previously allocated
- * @irq:       linux irq number to be destroyed
- * @dest:      cpumask of cpus which should have the IPI removed
+ * @irq:       Linux IRQ number to be destroyed
+ * @dest:      cpumask of CPUs which should have the IPI removed
  *
  * The IPIs allocated with irq_reserve_ipi() are returned to the system
  * destroying all virqs associated with them.
  *
- * Return 0 on success or error code on failure.
+ * Return: %0 on success or error code on failure.
  */
 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
 {
@@ -150,14 +150,14 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
 }
 
 /**
- * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
- * @irq:       linux irq number
- * @cpu:       the target cpu
+ * ipi_get_hwirq - Get the hwirq associated with an IPI to a CPU
+ * @irq:       Linux IRQ number
+ * @cpu:       the target CPU
  *
  * When dealing with coprocessors IPI, we need to inform the coprocessor of
  * the hwirq it needs to use to receive and send IPIs.
  *
- * Returns hwirq value on success and INVALID_HWIRQ on failure.
+ * Return: hwirq value on success or INVALID_HWIRQ on failure.
  */
 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
 {
@@ -216,7 +216,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
  * This function is for architecture or core code to speed up IPI sending. Not
  * usable from driver code.
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
 {
@@ -250,7 +250,7 @@ int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
 }
 
 /**
- * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
+ * __ipi_send_mask - send an IPI to target Linux SMP CPU(s)
  * @desc:      pointer to irq_desc of the IRQ
  * @dest:      dest CPU(s), must be a subset of the mask passed to
  *             irq_reserve_ipi()
@@ -258,7 +258,7 @@ int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
  * This function is for architecture or core code to speed up IPI sending. Not
  * usable from driver code.
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
 {
@@ -298,11 +298,11 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
 
 /**
  * ipi_send_single - Send an IPI to a single CPU
- * @virq:      linux irq number from irq_reserve_ipi()
+ * @virq:      Linux IRQ number from irq_reserve_ipi()
  * @cpu:       destination CPU, must in the destination mask passed to
  *             irq_reserve_ipi()
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int ipi_send_single(unsigned int virq, unsigned int cpu)
 {
@@ -319,11 +319,11 @@ EXPORT_SYMBOL_GPL(ipi_send_single);
 
 /**
  * ipi_send_mask - Send an IPI to target CPU(s)
- * @virq:      linux irq number from irq_reserve_ipi()
+ * @virq:      Linux IRQ number from irq_reserve_ipi()
  * @dest:      dest CPU(s), must be a subset of the mask passed to
  *             irq_reserve_ipi()
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
 {
index fadb937..4e3c29b 100644 (file)
@@ -188,7 +188,7 @@ static ssize_t hwirq_show(struct kobject *kobj,
 
        raw_spin_lock_irq(&desc->lock);
        if (desc->irq_data.domain)
-               ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
+               ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
        raw_spin_unlock_irq(&desc->lock);
 
        return ret;
index 51c483c..62be161 100644 (file)
@@ -1215,6 +1215,7 @@ int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
        irqd->chip = ERR_PTR(-ENOTCONN);
        return 0;
 }
+EXPORT_SYMBOL_GPL(irq_domain_disconnect_hierarchy);
 
 static int irq_domain_trim_hierarchy(unsigned int virq)
 {
index ef30b47..27667e8 100644 (file)
 #include "internals.h"
 
 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
-__read_mostly bool force_irqthreads;
-EXPORT_SYMBOL_GPL(force_irqthreads);
+DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
 
 static int __init setup_forced_irqthreads(char *arg)
 {
-       force_irqthreads = true;
+       static_branch_enable(&force_irqthreads_key);
        return 0;
 }
 early_param("threadirqs", setup_forced_irqthreads);
@@ -1260,8 +1259,8 @@ static int irq_thread(void *data)
        irqreturn_t (*handler_fn)(struct irq_desc *desc,
                        struct irqaction *action);
 
-       if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
-                                       &action->thread_flags))
+       if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
+                                          &action->thread_flags))
                handler_fn = irq_forced_thread_fn;
        else
                handler_fn = irq_thread_fn;
@@ -1322,7 +1321,7 @@ EXPORT_SYMBOL_GPL(irq_wake_thread);
 
 static int irq_setup_forced_threading(struct irqaction *new)
 {
-       if (!force_irqthreads)
+       if (!force_irqthreads())
                return 0;
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
                return 0;
@@ -2072,9 +2071,9 @@ const void *free_nmi(unsigned int irq, void *dev_id)
  *     request_threaded_irq - allocate an interrupt line
  *     @irq: Interrupt line to allocate
  *     @handler: Function to be called when the IRQ occurs.
- *               Primary handler for threaded interrupts
- *               If NULL and thread_fn != NULL the default
- *               primary handler is installed
+ *               Primary handler for threaded interrupts.
+ *               If handler is NULL and thread_fn != NULL
+ *               the default primary handler is installed.
  *     @thread_fn: Function called from the irq handler thread
  *                 If NULL, no irq thread is created
  *     @irqflags: Interrupt type flags
@@ -2108,7 +2107,7 @@ const void *free_nmi(unsigned int irq, void *dev_id)
  *
  *     IRQF_SHARED             Interrupt is shared
  *     IRQF_TRIGGER_*          Specify active edge(s) or level
- *
+ *     IRQF_ONESHOT            Run thread_fn with interrupt line masked
  */
 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
                         irq_handler_t thread_fn, unsigned long irqflags,
index 578596e..bbfb264 100644 (file)
@@ -280,7 +280,8 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
 /**
  * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
  * @m:         Matrix pointer
- * @cpu:       On which CPU the interrupt should be allocated
+ * @msk:       Which CPUs to search in
+ * @mapped_cpu:        Pointer to store the CPU for which the irq was allocated
  */
 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
                             unsigned int *mapped_cpu)
index c41965e..6a5ecee 100644 (file)
 #include <linux/irqdomain.h>
 #include <linux/msi.h>
 #include <linux/slab.h>
+#include <linux/pci.h>
 
 #include "internals.h"
 
 /**
- * alloc_msi_entry - Allocate an initialize msi_entry
+ * alloc_msi_entry - Allocate an initialized msi_desc
  * @dev:       Pointer to the device for which this is allocated
  * @nvec:      The number of vectors used in this entry
  * @affinity:  Optional pointer to an affinity mask array size of @nvec
  *
- * If @affinity is not NULL then an affinity array[@nvec] is allocated
+ * If @affinity is not %NULL then an affinity array[@nvec] is allocated
  * and the affinity masks and flags from @affinity are copied.
+ *
+ * Return: pointer to allocated &msi_desc on success or %NULL on failure
  */
 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
                                 const struct irq_affinity_desc *affinity)
@@ -69,6 +72,139 @@ void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
 }
 EXPORT_SYMBOL_GPL(get_cached_msi_msg);
 
+static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct msi_desc *entry;
+       bool is_msix = false;
+       unsigned long irq;
+       int retval;
+
+       retval = kstrtoul(attr->attr.name, 10, &irq);
+       if (retval)
+               return retval;
+
+       entry = irq_get_msi_desc(irq);
+       if (!entry)
+               return -ENODEV;
+
+       if (dev_is_pci(dev))
+               is_msix = entry->msi_attrib.is_msix;
+
+       return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
+}
+
+/**
+ * msi_populate_sysfs - Populate msi_irqs sysfs entries for devices
+ * @dev:       The device(PCI, platform etc) who will get sysfs entries
+ *
+ * Return attribute_group ** so that specific bus MSI can save it to
+ * somewhere during initilizing msi irqs. If devices has no MSI irq,
+ * return NULL; if it fails to populate sysfs, return ERR_PTR
+ */
+const struct attribute_group **msi_populate_sysfs(struct device *dev)
+{
+       const struct attribute_group **msi_irq_groups;
+       struct attribute **msi_attrs, *msi_attr;
+       struct device_attribute *msi_dev_attr;
+       struct attribute_group *msi_irq_group;
+       struct msi_desc *entry;
+       int ret = -ENOMEM;
+       int num_msi = 0;
+       int count = 0;
+       int i;
+
+       /* Determine how many msi entries we have */
+       for_each_msi_entry(entry, dev)
+               num_msi += entry->nvec_used;
+       if (!num_msi)
+               return NULL;
+
+       /* Dynamically create the MSI attributes for the device */
+       msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
+       if (!msi_attrs)
+               return ERR_PTR(-ENOMEM);
+
+       for_each_msi_entry(entry, dev) {
+               for (i = 0; i < entry->nvec_used; i++) {
+                       msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+                       if (!msi_dev_attr)
+                               goto error_attrs;
+                       msi_attrs[count] = &msi_dev_attr->attr;
+
+                       sysfs_attr_init(&msi_dev_attr->attr);
+                       msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+                                                           entry->irq + i);
+                       if (!msi_dev_attr->attr.name)
+                               goto error_attrs;
+                       msi_dev_attr->attr.mode = 0444;
+                       msi_dev_attr->show = msi_mode_show;
+                       ++count;
+               }
+       }
+
+       msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
+       if (!msi_irq_group)
+               goto error_attrs;
+       msi_irq_group->name = "msi_irqs";
+       msi_irq_group->attrs = msi_attrs;
+
+       msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
+       if (!msi_irq_groups)
+               goto error_irq_group;
+       msi_irq_groups[0] = msi_irq_group;
+
+       ret = sysfs_create_groups(&dev->kobj, msi_irq_groups);
+       if (ret)
+               goto error_irq_groups;
+
+       return msi_irq_groups;
+
+error_irq_groups:
+       kfree(msi_irq_groups);
+error_irq_group:
+       kfree(msi_irq_group);
+error_attrs:
+       count = 0;
+       msi_attr = msi_attrs[count];
+       while (msi_attr) {
+               msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
+               kfree(msi_attr->name);
+               kfree(msi_dev_attr);
+               ++count;
+               msi_attr = msi_attrs[count];
+       }
+       kfree(msi_attrs);
+       return ERR_PTR(ret);
+}
+
+/**
+ * msi_destroy_sysfs - Destroy msi_irqs sysfs entries for devices
+ * @dev:               The device(PCI, platform etc) who will remove sysfs entries
+ * @msi_irq_groups:    attribute_group for device msi_irqs entries
+ */
+void msi_destroy_sysfs(struct device *dev, const struct attribute_group **msi_irq_groups)
+{
+       struct device_attribute *dev_attr;
+       struct attribute **msi_attrs;
+       int count = 0;
+
+       if (msi_irq_groups) {
+               sysfs_remove_groups(&dev->kobj, msi_irq_groups);
+               msi_attrs = msi_irq_groups[0]->attrs;
+               while (msi_attrs[count]) {
+                       dev_attr = container_of(msi_attrs[count],
+                                       struct device_attribute, attr);
+                       kfree(dev_attr->attr.name);
+                       kfree(dev_attr);
+                       ++count;
+               }
+               kfree(msi_attrs);
+               kfree(msi_irq_groups[0]);
+               kfree(msi_irq_groups);
+       }
+}
+
 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
 static inline void irq_chip_write_msi_msg(struct irq_data *data,
                                          struct msi_msg *msg)
@@ -97,6 +233,8 @@ static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
  *
  * Intended to be used by MSI interrupt controllers which are
  * implemented with hierarchical domains.
+ *
+ * Return: IRQ_SET_MASK_* result code
  */
 int msi_domain_set_affinity(struct irq_data *irq_data,
                            const struct cpumask *mask, bool force)
@@ -277,10 +415,12 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
 }
 
 /**
- * msi_create_irq_domain - Create a MSI interrupt domain
+ * msi_create_irq_domain - Create an MSI interrupt domain
  * @fwnode:    Optional fwnode of the interrupt controller
  * @info:      MSI domain info
  * @parent:    Parent irq domain
+ *
+ * Return: pointer to the created &struct irq_domain or %NULL on failure
  */
 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
                                         struct msi_domain_info *info,
@@ -476,11 +616,6 @@ skip_activate:
        return 0;
 
 cleanup:
-       for_each_msi_vector(desc, i, dev) {
-               irq_data = irq_domain_get_irq_data(domain, i);
-               if (irqd_is_activated(irq_data))
-                       irq_domain_deactivate_irq(irq_data);
-       }
        msi_domain_free_irqs(domain, dev);
        return ret;
 }
@@ -492,7 +627,7 @@ cleanup:
  *             are allocated
  * @nvec:      The number of interrupts to allocate
  *
- * Return0 on success or an error code.
+ * Return: %0 on success or an error code.
  */
 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
                          int nvec)
@@ -505,7 +640,15 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 
 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
 {
+       struct irq_data *irq_data;
        struct msi_desc *desc;
+       int i;
+
+       for_each_msi_vector(desc, i, dev) {
+               irq_data = irq_domain_get_irq_data(domain, i);
+               if (irqd_is_activated(irq_data))
+                       irq_domain_deactivate_irq(irq_data);
+       }
 
        for_each_msi_entry(desc, dev) {
                /*
@@ -521,7 +664,7 @@ void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
 }
 
 /**
- * __msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
+ * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
  * @domain:    The domain to managing the interrupts
  * @dev:       Pointer to device struct of the device for which the interrupts
  *             are free
@@ -538,8 +681,7 @@ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
  * @domain:    The interrupt domain to retrieve data from
  *
- * Returns the pointer to the msi_domain_info stored in
- * @domain->host_data.
+ * Return: the pointer to the msi_domain_info stored in @domain->host_data.
  */
 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
 {
index ce0adb2..ca71123 100644 (file)
@@ -227,7 +227,7 @@ unlock:
 }
 
 /**
- * irq_pm_syscore_ops - enable interrupt lines early
+ * irq_pm_syscore_resume - enable interrupt lines early
  *
  * Enable all interrupt lines with %IRQF_EARLY_RESUME set.
  */
index 7c5cd42..ee595ec 100644 (file)
@@ -513,7 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_printf(p, " %8s", "None");
        }
        if (desc->irq_data.domain)
-               seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
+               seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
        else
                seq_printf(p, " %*s", prec, "");
 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
index d309d6f..c43e2ac 100644 (file)
@@ -453,6 +453,11 @@ static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs,
         */
        index = irq_timings_interval_index(interval);
 
+       if (index > PREDICTION_BUFFER_SIZE - 1) {
+               irqs->count = 0;
+               return;
+       }
+
        /*
         * Store the index as an element of the pattern in another
         * circular array.
@@ -794,12 +799,14 @@ static int __init irq_timings_test_irqs(struct timings_intervals *ti)
 
                __irq_timings_store(irq, irqs, ti->intervals[i]);
                if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
+                       ret = -EBADSLT;
                        pr_err("Failed to store in the circular buffer\n");
                        goto out;
                }
        }
 
        if (irqs->count != ti->count) {
+               ret = -ERANGE;
                pr_err("Count differs\n");
                goto out;
        }
index e65de17..1d1d1b0 100644 (file)
@@ -64,7 +64,7 @@ static noinline void microbenchmark(unsigned long iters)
 {
        const struct kcsan_ctx ctx_save = current->kcsan_ctx;
        const bool was_enabled = READ_ONCE(kcsan_enabled);
-       cycles_t cycles;
+       u64 cycles;
 
        /* We may have been called from an atomic region; reset context. */
        memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
index 3572808..d51cabf 100644 (file)
@@ -24,7 +24,8 @@ obj-$(CONFIG_SMP) += spinlock.o
 obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
-obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+obj-$(CONFIG_RT_MUTEXES) += rtmutex_api.o
+obj-$(CONFIG_PREEMPT_RT) += spinlock_rt.o ww_rt_mutex.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
index b3adb40..7c5a4a0 100644 (file)
@@ -59,7 +59,7 @@ static struct task_struct **writer_tasks;
 static struct task_struct **reader_tasks;
 
 static bool lock_is_write_held;
-static bool lock_is_read_held;
+static atomic_t lock_is_read_held;
 static unsigned long last_lock_release;
 
 struct lock_stress_stats {
@@ -682,7 +682,7 @@ static int lock_torture_writer(void *arg)
                if (WARN_ON_ONCE(lock_is_write_held))
                        lwsp->n_lock_fail++;
                lock_is_write_held = true;
-               if (WARN_ON_ONCE(lock_is_read_held))
+               if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
                        lwsp->n_lock_fail++; /* rare, but... */
 
                lwsp->n_lock_acquired++;
@@ -717,13 +717,13 @@ static int lock_torture_reader(void *arg)
                        schedule_timeout_uninterruptible(1);
 
                cxt.cur_ops->readlock(tid);
-               lock_is_read_held = true;
+               atomic_inc(&lock_is_read_held);
                if (WARN_ON_ONCE(lock_is_write_held))
                        lrsp->n_lock_fail++; /* rare, but... */
 
                lrsp->n_lock_acquired++;
                cxt.cur_ops->read_delay(&rand);
-               lock_is_read_held = false;
+               atomic_dec(&lock_is_read_held);
                cxt.cur_ops->readunlock(tid);
 
                stutter_wait("lock_torture_reader");
@@ -738,20 +738,22 @@ static int lock_torture_reader(void *arg)
 static void __torture_print_stats(char *page,
                                  struct lock_stress_stats *statp, bool write)
 {
+       long cur;
        bool fail = false;
        int i, n_stress;
-       long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
+       long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
        long long sum = 0;
 
        n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
        for (i = 0; i < n_stress; i++) {
-               if (statp[i].n_lock_fail)
+               if (data_race(statp[i].n_lock_fail))
                        fail = true;
-               sum += statp[i].n_lock_acquired;
-               if (max < statp[i].n_lock_acquired)
-                       max = statp[i].n_lock_acquired;
-               if (min > statp[i].n_lock_acquired)
-                       min = statp[i].n_lock_acquired;
+               cur = data_race(statp[i].n_lock_acquired);
+               sum += cur;
+               if (max < cur)
+                       max = cur;
+               if (min > cur)
+                       min = cur;
        }
        page += sprintf(page,
                        "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
@@ -996,7 +998,6 @@ static int __init lock_torture_init(void)
                }
 
                if (nreaders_stress) {
-                       lock_is_read_held = false;
                        cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
                                                 sizeof(*cxt.lrsa),
                                                 GFP_KERNEL);
index db93015..bc8abb8 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * kernel/mutex-debug.c
- *
  * Debugging code for mutexes
  *
  * Started by Ingo Molnar:
@@ -22,7 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 
-#include "mutex-debug.h"
+#include "mutex.h"
 
 /*
  * Must be called with lock->wait_lock held.
@@ -32,6 +30,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
        memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
        waiter->magic = waiter;
        INIT_LIST_HEAD(&waiter->list);
+       waiter->ww_ctx = MUTEX_POISON_WW_CTX;
 }
 
 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
deleted file mode 100644 (file)
index 53e631e..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Mutexes: blocking mutual exclusion locks
- *
- * started by Ingo Molnar:
- *
- *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * This file contains mutex debugging related internal declarations,
- * prototypes and inline functions, for the CONFIG_DEBUG_MUTEXES case.
- * More details are in kernel/mutex-debug.c.
- */
-
-/*
- * This must be called with lock->wait_lock held.
- */
-extern void debug_mutex_lock_common(struct mutex *lock,
-                                   struct mutex_waiter *waiter);
-extern void debug_mutex_wake_waiter(struct mutex *lock,
-                                   struct mutex_waiter *waiter);
-extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
-extern void debug_mutex_add_waiter(struct mutex *lock,
-                                  struct mutex_waiter *waiter,
-                                  struct task_struct *task);
-extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                               struct task_struct *task);
-extern void debug_mutex_unlock(struct mutex *lock);
-extern void debug_mutex_init(struct mutex *lock, const char *name,
-                            struct lock_class_key *key);
index d2df5e6..d456579 100644 (file)
 #include <linux/debug_locks.h>
 #include <linux/osq_lock.h>
 
+#ifndef CONFIG_PREEMPT_RT
+#include "mutex.h"
+
 #ifdef CONFIG_DEBUG_MUTEXES
-# include "mutex-debug.h"
+# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
 #else
-# include "mutex.h"
+# define MUTEX_WARN_ON(cond)
 #endif
 
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
        atomic_long_set(&lock->owner, 0);
-       spin_lock_init(&lock->wait_lock);
+       raw_spin_lock_init(&lock->wait_lock);
        INIT_LIST_HEAD(&lock->wait_list);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        osq_lock_init(&lock->osq);
@@ -91,55 +94,56 @@ static inline unsigned long __owner_flags(unsigned long owner)
        return owner & MUTEX_FLAGS;
 }
 
-/*
- * Trylock variant that returns the owning task on failure.
- */
-static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
+static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
 {
        unsigned long owner, curr = (unsigned long)current;
 
        owner = atomic_long_read(&lock->owner);
        for (;;) { /* must loop, can race against a flag */
-               unsigned long old, flags = __owner_flags(owner);
+               unsigned long flags = __owner_flags(owner);
                unsigned long task = owner & ~MUTEX_FLAGS;
 
                if (task) {
-                       if (likely(task != curr))
-                               break;
-
-                       if (likely(!(flags & MUTEX_FLAG_PICKUP)))
+                       if (flags & MUTEX_FLAG_PICKUP) {
+                               if (task != curr)
+                                       break;
+                               flags &= ~MUTEX_FLAG_PICKUP;
+                       } else if (handoff) {
+                               if (flags & MUTEX_FLAG_HANDOFF)
+                                       break;
+                               flags |= MUTEX_FLAG_HANDOFF;
+                       } else {
                                break;
-
-                       flags &= ~MUTEX_FLAG_PICKUP;
+                       }
                } else {
-#ifdef CONFIG_DEBUG_MUTEXES
-                       DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
-#endif
+                       MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
+                       task = curr;
                }
 
-               /*
-                * We set the HANDOFF bit, we must make sure it doesn't live
-                * past the point where we acquire it. This would be possible
-                * if we (accidentally) set the bit on an unlocked mutex.
-                */
-               flags &= ~MUTEX_FLAG_HANDOFF;
-
-               old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
-               if (old == owner)
-                       return NULL;
-
-               owner = old;
+               if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
+                       if (task == curr)
+                               return NULL;
+                       break;
+               }
        }
 
        return __owner_task(owner);
 }
 
+/*
+ * Trylock or set HANDOFF
+ */
+static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
+{
+       return !__mutex_trylock_common(lock, handoff);
+}
+
 /*
  * Actual trylock that will work on any unlocked state.
  */
 static inline bool __mutex_trylock(struct mutex *lock)
 {
-       return !__mutex_trylock_or_owner(lock);
+       return !__mutex_trylock_common(lock, false);
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
@@ -168,10 +172,7 @@ static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 {
        unsigned long curr = (unsigned long)current;
 
-       if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
-               return true;
-
-       return false;
+       return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
 }
 #endif
 
@@ -226,23 +227,18 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
        unsigned long owner = atomic_long_read(&lock->owner);
 
        for (;;) {
-               unsigned long old, new;
+               unsigned long new;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-               DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+               MUTEX_WARN_ON(__owner_task(owner) != current);
+               MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
                new = (owner & MUTEX_FLAG_WAITERS);
                new |= (unsigned long)task;
                if (task)
                        new |= MUTEX_FLAG_PICKUP;
 
-               old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
-               if (old == owner)
+               if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
                        break;
-
-               owner = old;
        }
 }
 
@@ -286,218 +282,18 @@ void __sched mutex_lock(struct mutex *lock)
 EXPORT_SYMBOL(mutex_lock);
 #endif
 
-/*
- * Wait-Die:
- *   The newer transactions are killed when:
- *     It (the new transaction) makes a request for a lock being held
- *     by an older transaction.
- *
- * Wound-Wait:
- *   The newer transactions are wounded when:
- *     An older transaction makes a request for a lock being held by
- *     the newer transaction.
- */
-
-/*
- * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
- * it.
- */
-static __always_inline void
-ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
-       /*
-        * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
-        * but released with a normal mutex_unlock in this call.
-        *
-        * This should never happen, always use ww_mutex_unlock.
-        */
-       DEBUG_LOCKS_WARN_ON(ww->ctx);
-
-       /*
-        * Not quite done after calling ww_acquire_done() ?
-        */
-       DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
+#include "ww_mutex.h"
 
-       if (ww_ctx->contending_lock) {
-               /*
-                * After -EDEADLK you tried to
-                * acquire a different ww_mutex? Bad!
-                */
-               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
-
-               /*
-                * You called ww_mutex_lock after receiving -EDEADLK,
-                * but 'forgot' to unlock everything else first?
-                */
-               DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
-               ww_ctx->contending_lock = NULL;
-       }
-
-       /*
-        * Naughty, using a different class will lead to undefined behavior!
-        */
-       DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
-#endif
-       ww_ctx->acquired++;
-       ww->ctx = ww_ctx;
-}
-
-/*
- * Determine if context @a is 'after' context @b. IOW, @a is a younger
- * transaction than @b and depending on algorithm either needs to wait for
- * @b or die.
- */
-static inline bool __sched
-__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
-{
-
-       return (signed long)(a->stamp - b->stamp) > 0;
-}
-
-/*
- * Wait-Die; wake a younger waiter context (when locks held) such that it can
- * die.
- *
- * Among waiters with context, only the first one can have other locks acquired
- * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
- * __ww_mutex_check_kill() wake any but the earliest context.
- */
-static bool __sched
-__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
-              struct ww_acquire_ctx *ww_ctx)
-{
-       if (!ww_ctx->is_wait_die)
-               return false;
-
-       if (waiter->ww_ctx->acquired > 0 &&
-                       __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
-               debug_mutex_wake_waiter(lock, waiter);
-               wake_up_process(waiter->task);
-       }
-
-       return true;
-}
-
-/*
- * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
- *
- * Wound the lock holder if there are waiters with older transactions than
- * the lock holders. Even if multiple waiters may wound the lock holder,
- * it's sufficient that only one does.
- */
-static bool __ww_mutex_wound(struct mutex *lock,
-                            struct ww_acquire_ctx *ww_ctx,
-                            struct ww_acquire_ctx *hold_ctx)
-{
-       struct task_struct *owner = __mutex_owner(lock);
-
-       lockdep_assert_held(&lock->wait_lock);
-
-       /*
-        * Possible through __ww_mutex_add_waiter() when we race with
-        * ww_mutex_set_context_fastpath(). In that case we'll get here again
-        * through __ww_mutex_check_waiters().
-        */
-       if (!hold_ctx)
-               return false;
-
-       /*
-        * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
-        * it cannot go away because we'll have FLAG_WAITERS set and hold
-        * wait_lock.
-        */
-       if (!owner)
-               return false;
-
-       if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
-               hold_ctx->wounded = 1;
-
-               /*
-                * wake_up_process() paired with set_current_state()
-                * inserts sufficient barriers to make sure @owner either sees
-                * it's wounded in __ww_mutex_check_kill() or has a
-                * wakeup pending to re-read the wounded state.
-                */
-               if (owner != current)
-                       wake_up_process(owner);
-
-               return true;
-       }
-
-       return false;
-}
-
-/*
- * We just acquired @lock under @ww_ctx, if there are later contexts waiting
- * behind us on the wait-list, check if they need to die, or wound us.
- *
- * See __ww_mutex_add_waiter() for the list-order construction; basically the
- * list is ordered by stamp, smallest (oldest) first.
- *
- * This relies on never mixing wait-die/wound-wait on the same wait-list;
- * which is currently ensured by that being a ww_class property.
- *
- * The current task must not be on the wait list.
- */
-static void __sched
-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
-{
-       struct mutex_waiter *cur;
-
-       lockdep_assert_held(&lock->wait_lock);
-
-       list_for_each_entry(cur, &lock->wait_list, list) {
-               if (!cur->ww_ctx)
-                       continue;
-
-               if (__ww_mutex_die(lock, cur, ww_ctx) ||
-                   __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
-                       break;
-       }
-}
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 
 /*
- * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
- * and wake up any waiters so they can recheck.
+ * Trylock variant that returns the owning task on failure.
  */
-static __always_inline void
-ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 {
-       ww_mutex_lock_acquired(lock, ctx);
-
-       /*
-        * The lock->ctx update should be visible on all cores before
-        * the WAITERS check is done, otherwise contended waiters might be
-        * missed. The contended waiters will either see ww_ctx == NULL
-        * and keep spinning, or it will acquire wait_lock, add itself
-        * to waiter list and sleep.
-        */
-       smp_mb(); /* See comments above and below. */
-
-       /*
-        * [W] ww->ctx = ctx        [W] MUTEX_FLAG_WAITERS
-        *     MB                       MB
-        * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
-        *
-        * The memory barrier above pairs with the memory barrier in
-        * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
-        * and/or !empty list.
-        */
-       if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
-               return;
-
-       /*
-        * Uh oh, we raced in fastpath, check if any of the waiters need to
-        * die or wound us.
-        */
-       spin_lock(&lock->base.wait_lock);
-       __ww_mutex_check_waiters(&lock->base, ctx);
-       spin_unlock(&lock->base.wait_lock);
+       return __mutex_trylock_common(lock, false);
 }
 
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-
 static inline
 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
                            struct mutex_waiter *waiter)
@@ -754,171 +550,11 @@ EXPORT_SYMBOL(mutex_unlock);
  */
 void __sched ww_mutex_unlock(struct ww_mutex *lock)
 {
-       /*
-        * The unlocking fastpath is the 0->1 transition from 'locked'
-        * into 'unlocked' state:
-        */
-       if (lock->ctx) {
-#ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
-#endif
-               if (lock->ctx->acquired > 0)
-                       lock->ctx->acquired--;
-               lock->ctx = NULL;
-       }
-
+       __ww_mutex_unlock(lock);
        mutex_unlock(&lock->base);
 }
 EXPORT_SYMBOL(ww_mutex_unlock);
 
-
-static __always_inline int __sched
-__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
-{
-       if (ww_ctx->acquired > 0) {
-#ifdef CONFIG_DEBUG_MUTEXES
-               struct ww_mutex *ww;
-
-               ww = container_of(lock, struct ww_mutex, base);
-               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
-               ww_ctx->contending_lock = ww;
-#endif
-               return -EDEADLK;
-       }
-
-       return 0;
-}
-
-
-/*
- * Check the wound condition for the current lock acquire.
- *
- * Wound-Wait: If we're wounded, kill ourself.
- *
- * Wait-Die: If we're trying to acquire a lock already held by an older
- *           context, kill ourselves.
- *
- * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
- * look at waiters before us in the wait-list.
- */
-static inline int __sched
-__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
-                     struct ww_acquire_ctx *ctx)
-{
-       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
-       struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
-       struct mutex_waiter *cur;
-
-       if (ctx->acquired == 0)
-               return 0;
-
-       if (!ctx->is_wait_die) {
-               if (ctx->wounded)
-                       return __ww_mutex_kill(lock, ctx);
-
-               return 0;
-       }
-
-       if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
-               return __ww_mutex_kill(lock, ctx);
-
-       /*
-        * If there is a waiter in front of us that has a context, then its
-        * stamp is earlier than ours and we must kill ourself.
-        */
-       cur = waiter;
-       list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
-               if (!cur->ww_ctx)
-                       continue;
-
-               return __ww_mutex_kill(lock, ctx);
-       }
-
-       return 0;
-}
-
-/*
- * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
- * first. Such that older contexts are preferred to acquire the lock over
- * younger contexts.
- *
- * Waiters without context are interspersed in FIFO order.
- *
- * Furthermore, for Wait-Die kill ourself immediately when possible (there are
- * older contexts already waiting) to avoid unnecessary waiting and for
- * Wound-Wait ensure we wound the owning context when it is younger.
- */
-static inline int __sched
-__ww_mutex_add_waiter(struct mutex_waiter *waiter,
-                     struct mutex *lock,
-                     struct ww_acquire_ctx *ww_ctx)
-{
-       struct mutex_waiter *cur;
-       struct list_head *pos;
-       bool is_wait_die;
-
-       if (!ww_ctx) {
-               __mutex_add_waiter(lock, waiter, &lock->wait_list);
-               return 0;
-       }
-
-       is_wait_die = ww_ctx->is_wait_die;
-
-       /*
-        * Add the waiter before the first waiter with a higher stamp.
-        * Waiters without a context are skipped to avoid starving
-        * them. Wait-Die waiters may die here. Wound-Wait waiters
-        * never die here, but they are sorted in stamp order and
-        * may wound the lock holder.
-        */
-       pos = &lock->wait_list;
-       list_for_each_entry_reverse(cur, &lock->wait_list, list) {
-               if (!cur->ww_ctx)
-                       continue;
-
-               if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
-                       /*
-                        * Wait-Die: if we find an older context waiting, there
-                        * is no point in queueing behind it, as we'd have to
-                        * die the moment it would acquire the lock.
-                        */
-                       if (is_wait_die) {
-                               int ret = __ww_mutex_kill(lock, ww_ctx);
-
-                               if (ret)
-                                       return ret;
-                       }
-
-                       break;
-               }
-
-               pos = &cur->list;
-
-               /* Wait-Die: ensure younger waiters die. */
-               __ww_mutex_die(lock, cur, ww_ctx);
-       }
-
-       __mutex_add_waiter(lock, waiter, pos);
-
-       /*
-        * Wound-Wait: if we're blocking on a mutex owned by a younger context,
-        * wound that such that we might proceed.
-        */
-       if (!is_wait_die) {
-               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
-
-               /*
-                * See ww_mutex_set_context_fastpath(). Orders setting
-                * MUTEX_FLAG_WAITERS vs the ww->ctx load,
-                * such that either we or the fastpath will wound @ww->ctx.
-                */
-               smp_mb();
-               __ww_mutex_wound(lock, ww_ctx, ww->ctx);
-       }
-
-       return 0;
-}
-
 /*
  * Lock a mutex (possibly interruptible), slowpath:
  */
@@ -928,7 +564,6 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
        struct mutex_waiter waiter;
-       bool first = false;
        struct ww_mutex *ww;
        int ret;
 
@@ -937,9 +572,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
 
        might_sleep();
 
-#ifdef CONFIG_DEBUG_MUTEXES
-       DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+       MUTEX_WARN_ON(lock->magic != lock);
 
        ww = container_of(lock, struct ww_mutex, base);
        if (ww_ctx) {
@@ -953,6 +586,10 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                 */
                if (ww_ctx->acquired == 0)
                        ww_ctx->wounded = 0;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+               nest_lock = &ww_ctx->dep_map;
+#endif
        }
 
        preempt_disable();
@@ -968,7 +605,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                return 0;
        }
 
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
        /*
         * After waiting to acquire the wait_lock, try again.
         */
@@ -980,17 +617,15 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
        }
 
        debug_mutex_lock_common(lock, &waiter);
+       waiter.task = current;
+       if (use_ww_ctx)
+               waiter.ww_ctx = ww_ctx;
 
        lock_contended(&lock->dep_map, ip);
 
        if (!use_ww_ctx) {
                /* add waiting tasks to the end of the waitqueue (FIFO): */
                __mutex_add_waiter(lock, &waiter, &lock->wait_list);
-
-
-#ifdef CONFIG_DEBUG_MUTEXES
-               waiter.ww_ctx = MUTEX_POISON_WW_CTX;
-#endif
        } else {
                /*
                 * Add in stamp order, waking up waiters that must kill
@@ -999,14 +634,12 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
                if (ret)
                        goto err_early_kill;
-
-               waiter.ww_ctx = ww_ctx;
        }
 
-       waiter.task = current;
-
        set_current_state(state);
        for (;;) {
+               bool first;
+
                /*
                 * Once we hold wait_lock, we're serialized against
                 * mutex_unlock() handing the lock off to us, do a trylock
@@ -1032,18 +665,10 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                                goto err;
                }
 
-               spin_unlock(&lock->wait_lock);
+               raw_spin_unlock(&lock->wait_lock);
                schedule_preempt_disabled();
 
-               /*
-                * ww_mutex needs to always recheck its position since its waiter
-                * list is not FIFO ordered.
-                */
-               if (ww_ctx || !first) {
-                       first = __mutex_waiter_is_first(lock, &waiter);
-                       if (first)
-                               __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
-               }
+               first = __mutex_waiter_is_first(lock, &waiter);
 
                set_current_state(state);
                /*
@@ -1051,13 +676,13 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                 * state back to RUNNING and fall through the next schedule(),
                 * or we must see its unlock and acquire.
                 */
-               if (__mutex_trylock(lock) ||
+               if (__mutex_trylock_or_handoff(lock, first) ||
                    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
                        break;
 
-               spin_lock(&lock->wait_lock);
+               raw_spin_lock(&lock->wait_lock);
        }
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
 acquired:
        __set_current_state(TASK_RUNNING);
 
@@ -1082,7 +707,7 @@ skip_wait:
        if (ww_ctx)
                ww_mutex_lock_acquired(ww, ww_ctx);
 
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
        preempt_enable();
        return 0;
 
@@ -1090,7 +715,7 @@ err:
        __set_current_state(TASK_RUNNING);
        __mutex_remove_waiter(lock, &waiter);
 err_early_kill:
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, ip);
        preempt_enable();
@@ -1106,10 +731,9 @@ __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 
 static int __sched
 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
-               struct lockdep_map *nest_lock, unsigned long ip,
-               struct ww_acquire_ctx *ww_ctx)
+               unsigned long ip, struct ww_acquire_ctx *ww_ctx)
 {
-       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
+       return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1189,8 +813,7 @@ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 
        might_sleep();
        ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
-                              0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                              ctx);
+                              0, _RET_IP_, ctx);
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
@@ -1205,8 +828,7 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 
        might_sleep();
        ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
-                             0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                             ctx);
+                             0, _RET_IP_, ctx);
 
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
@@ -1237,29 +859,21 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
         */
        owner = atomic_long_read(&lock->owner);
        for (;;) {
-               unsigned long old;
-
-#ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-               DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+               MUTEX_WARN_ON(__owner_task(owner) != current);
+               MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
                if (owner & MUTEX_FLAG_HANDOFF)
                        break;
 
-               old = atomic_long_cmpxchg_release(&lock->owner, owner,
-                                                 __owner_flags(owner));
-               if (old == owner) {
+               if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
                        if (owner & MUTEX_FLAG_WAITERS)
                                break;
 
                        return;
                }
-
-               owner = old;
        }
 
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
        debug_mutex_unlock(lock);
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
@@ -1276,7 +890,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
 
        wake_up_q(&wake_q);
 }
@@ -1380,7 +994,7 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock)
 static noinline int __sched
 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
+       return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
                               _RET_IP_, ctx);
 }
 
@@ -1388,7 +1002,7 @@ static noinline int __sched
 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
                                            struct ww_acquire_ctx *ctx)
 {
-       return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
+       return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
                               _RET_IP_, ctx);
 }
 
@@ -1412,9 +1026,7 @@ int __sched mutex_trylock(struct mutex *lock)
 {
        bool locked;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-       DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+       MUTEX_WARN_ON(lock->magic != lock);
 
        locked = __mutex_trylock(lock);
        if (locked)
@@ -1455,7 +1067,8 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 }
 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
 
-#endif
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* !CONFIG_PREEMPT_RT */
 
 /**
  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
index f0c710b..0b2a79c 100644 (file)
@@ -5,19 +5,41 @@
  * started by Ingo Molnar:
  *
  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * This file contains mutex debugging related internal prototypes, for the
- * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
  */
 
-#define debug_mutex_wake_waiter(lock, waiter)          do { } while (0)
-#define debug_mutex_free_waiter(waiter)                        do { } while (0)
-#define debug_mutex_add_waiter(lock, waiter, ti)       do { } while (0)
-#define debug_mutex_remove_waiter(lock, waiter, ti)     do { } while (0)
-#define debug_mutex_unlock(lock)                       do { } while (0)
-#define debug_mutex_init(lock, name, key)              do { } while (0)
+/*
+ * This is the control structure for tasks blocked on mutex, which resides
+ * on the blocked task's kernel stack:
+ */
+struct mutex_waiter {
+       struct list_head        list;
+       struct task_struct      *task;
+       struct ww_acquire_ctx   *ww_ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+       void                    *magic;
+#endif
+};
 
-static inline void
-debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
-{
-}
+#ifdef CONFIG_DEBUG_MUTEXES
+extern void debug_mutex_lock_common(struct mutex *lock,
+                                   struct mutex_waiter *waiter);
+extern void debug_mutex_wake_waiter(struct mutex *lock,
+                                   struct mutex_waiter *waiter);
+extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+extern void debug_mutex_add_waiter(struct mutex *lock,
+                                  struct mutex_waiter *waiter,
+                                  struct task_struct *task);
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+                                     struct task_struct *task);
+extern void debug_mutex_unlock(struct mutex *lock);
+extern void debug_mutex_init(struct mutex *lock, const char *name,
+                            struct lock_class_key *key);
+#else /* CONFIG_DEBUG_MUTEXES */
+# define debug_mutex_lock_common(lock, waiter)         do { } while (0)
+# define debug_mutex_wake_waiter(lock, waiter)         do { } while (0)
+# define debug_mutex_free_waiter(waiter)               do { } while (0)
+# define debug_mutex_add_waiter(lock, waiter, ti)      do { } while (0)
+# define debug_mutex_remove_waiter(lock, waiter, ti)   do { } while (0)
+# define debug_mutex_unlock(lock)                      do { } while (0)
+# define debug_mutex_init(lock, name, key)             do { } while (0)
+#endif /* !CONFIG_DEBUG_MUTEXES */
index b5d9bb5..8eabdc7 100644 (file)
@@ -8,20 +8,58 @@
  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  *  Copyright (C) 2006 Esben Nielsen
+ * Adaptive Spinlocks:
+ *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
+ *                                  and Peter Morreale,
+ * Adaptive Spinlocks simplification:
+ *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
  *
  *  See Documentation/locking/rt-mutex-design.rst for details.
  */
-#include <linux/spinlock.h>
-#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/deadline.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/rt.h>
-#include <linux/sched/deadline.h>
 #include <linux/sched/wake_q.h>
-#include <linux/sched/debug.h>
-#include <linux/timer.h>
+#include <linux/ww_mutex.h>
 
 #include "rtmutex_common.h"
 
+#ifndef WW_RT
+# define build_ww_mutex()      (false)
+# define ww_container_of(rtm)  NULL
+
+static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
+                                       struct rt_mutex *lock,
+                                       struct ww_acquire_ctx *ww_ctx)
+{
+       return 0;
+}
+
+static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
+                                           struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
+                                         struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
+                                       struct rt_mutex_waiter *waiter,
+                                       struct ww_acquire_ctx *ww_ctx)
+{
+       return 0;
+}
+
+#else
+# define build_ww_mutex()      (true)
+# define ww_container_of(rtm)  container_of(rtm, struct ww_mutex, base)
+# include "ww_mutex.h"
+#endif
+
 /*
  * lock->owner state tracking:
  *
@@ -50,7 +88,7 @@
  */
 
 static __always_inline void
-rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
+rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
 {
        unsigned long val = (unsigned long)owner;
 
@@ -60,13 +98,13 @@ rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
        WRITE_ONCE(lock->owner, (struct task_struct *)val);
 }
 
-static __always_inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
 }
 
-static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        unsigned long owner, *p = (unsigned long *) &lock->owner;
 
@@ -141,15 +179,26 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock)
  * set up.
  */
 #ifndef CONFIG_DEBUG_RT_MUTEXES
-# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
-# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
+static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return try_cmpxchg_acquire(&lock->owner, &old, new);
+}
+
+static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return try_cmpxchg_release(&lock->owner, &old, new);
+}
 
 /*
  * Callers must hold the ->wait_lock -- which is the whole purpose as we force
  * all future threads that attempt to [Rmw] the lock to the slowpath. As such
  * relaxed semantics suffice.
  */
-static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        unsigned long owner, *p = (unsigned long *) &lock->owner;
 
@@ -165,7 +214,7 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  * 2) Drop lock->wait_lock
  * 3) Try to unlock the lock with cmpxchg
  */
-static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
                                                 unsigned long flags)
        __releases(lock->wait_lock)
 {
@@ -201,10 +250,22 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
 }
 
 #else
-# define rt_mutex_cmpxchg_acquire(l,c,n)       (0)
-# define rt_mutex_cmpxchg_release(l,c,n)       (0)
+static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return false;
+
+}
+
+static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return false;
+}
 
-static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
@@ -213,7 +274,7 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
 /*
  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
  */
-static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
                                                 unsigned long flags)
        __releases(lock->wait_lock)
 {
@@ -223,11 +284,28 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
 }
 #endif
 
+static __always_inline int __waiter_prio(struct task_struct *task)
+{
+       int prio = task->prio;
+
+       if (!rt_prio(prio))
+               return DEFAULT_PRIO;
+
+       return prio;
+}
+
+static __always_inline void
+waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+{
+       waiter->prio = __waiter_prio(task);
+       waiter->deadline = task->dl.deadline;
+}
+
 /*
  * Only use with rt_mutex_waiter_{less,equal}()
  */
 #define task_to_waiter(p)      \
-       &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
+       &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
 
 static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
                                                struct rt_mutex_waiter *right)
@@ -265,22 +343,63 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
        return 1;
 }
 
+static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+                                 struct rt_mutex_waiter *top_waiter)
+{
+       if (rt_mutex_waiter_less(waiter, top_waiter))
+               return true;
+
+#ifdef RT_MUTEX_BUILD_SPINLOCKS
+       /*
+        * Note that RT tasks are excluded from same priority (lateral)
+        * steals to prevent the introduction of an unbounded latency.
+        */
+       if (rt_prio(waiter->prio) || dl_prio(waiter->prio))
+               return false;
+
+       return rt_mutex_waiter_equal(waiter, top_waiter);
+#else
+       return false;
+#endif
+}
+
 #define __node_2_waiter(node) \
        rb_entry((node), struct rt_mutex_waiter, tree_entry)
 
 static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
 {
-       return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
+       struct rt_mutex_waiter *aw = __node_2_waiter(a);
+       struct rt_mutex_waiter *bw = __node_2_waiter(b);
+
+       if (rt_mutex_waiter_less(aw, bw))
+               return 1;
+
+       if (!build_ww_mutex())
+               return 0;
+
+       if (rt_mutex_waiter_less(bw, aw))
+               return 0;
+
+       /* NOTE: relies on waiter->ww_ctx being set before insertion */
+       if (aw->ww_ctx) {
+               if (!bw->ww_ctx)
+                       return 1;
+
+               return (signed long)(aw->ww_ctx->stamp -
+                                    bw->ww_ctx->stamp) < 0;
+       }
+
+       return 0;
 }
 
 static __always_inline void
-rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
 {
        rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
 }
 
 static __always_inline void
-rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
 {
        if (RB_EMPTY_NODE(&waiter->tree_entry))
                return;
@@ -326,6 +445,35 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
        rt_mutex_setprio(p, pi_task);
 }
 
+/* RT mutex specific wake_q wrappers */
+static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
+                                               struct rt_mutex_waiter *w)
+{
+       if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) {
+               if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+                       WARN_ON_ONCE(wqh->rtlock_task);
+               get_task_struct(w->task);
+               wqh->rtlock_task = w->task;
+       } else {
+               wake_q_add(&wqh->head, w->task);
+       }
+}
+
+static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
+{
+       if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
+               wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
+               put_task_struct(wqh->rtlock_task);
+               wqh->rtlock_task = NULL;
+       }
+
+       if (!wake_q_empty(&wqh->head))
+               wake_up_q(&wqh->head);
+
+       /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
+       preempt_enable();
+}
+
 /*
  * Deadlock detection is conditional:
  *
@@ -343,17 +491,12 @@ static __always_inline bool
 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
                              enum rtmutex_chainwalk chwalk)
 {
-       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX))
+       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
                return waiter != NULL;
        return chwalk == RT_MUTEX_FULL_CHAINWALK;
 }
 
-/*
- * Max number of times we'll walk the boosting chain:
- */
-int max_lock_depth = 1024;
-
-static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
 {
        return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
 }
@@ -423,15 +566,15 @@ static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct
  */
 static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
                                              enum rtmutex_chainwalk chwalk,
-                                             struct rt_mutex *orig_lock,
-                                             struct rt_mutex *next_lock,
+                                             struct rt_mutex_base *orig_lock,
+                                             struct rt_mutex_base *next_lock,
                                              struct rt_mutex_waiter *orig_waiter,
                                              struct task_struct *top_task)
 {
        struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
        struct rt_mutex_waiter *prerequeue_top_waiter;
        int ret = 0, depth = 0;
-       struct rt_mutex *lock;
+       struct rt_mutex_base *lock;
        bool detect_deadlock;
        bool requeue = true;
 
@@ -513,6 +656,31 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (next_lock != waiter->lock)
                goto out_unlock_pi;
 
+       /*
+        * There could be 'spurious' loops in the lock graph due to ww_mutex,
+        * consider:
+        *
+        *   P1: A, ww_A, ww_B
+        *   P2: ww_B, ww_A
+        *   P3: A
+        *
+        * P3 should not return -EDEADLK because it gets trapped in the cycle
+        * created by P1 and P2 (which will resolve -- and runs into
+        * max_lock_depth above). Therefore disable detect_deadlock such that
+        * the below termination condition can trigger once all relevant tasks
+        * are boosted.
+        *
+        * Even when we start with ww_mutex we can disable deadlock detection,
+        * since we would supress a ww_mutex induced deadlock at [6] anyway.
+        * Supressing it here however is not sufficient since we might still
+        * hit [6] due to adjustment driven iteration.
+        *
+        * NOTE: if someone were to create a deadlock between 2 ww_classes we'd
+        * utterly fail to report it; lockdep should.
+        */
+       if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
+               detect_deadlock = false;
+
        /*
         * Drop out, when the task has no waiters. Note,
         * top_waiter can be NULL, when we are in the deboosting
@@ -574,8 +742,21 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
         * walk, we detected a deadlock.
         */
        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
-               raw_spin_unlock(&lock->wait_lock);
                ret = -EDEADLK;
+
+               /*
+                * When the deadlock is due to ww_mutex; also see above. Don't
+                * report the deadlock and instead let the ww_mutex wound/die
+                * logic pick which of the contending threads gets -EDEADLK.
+                *
+                * NOTE: assumes the cycle only contains a single ww_class; any
+                * other configuration and we fail to report; also, see
+                * lockdep.
+                */
+               if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter->ww_ctx)
+                       ret = 0;
+
+               raw_spin_unlock(&lock->wait_lock);
                goto out_unlock_pi;
        }
 
@@ -653,8 +834,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
         * serializes all pi_waiters access and rb_erase() does not care about
         * the values of the node being removed.
         */
-       waiter->prio = task->prio;
-       waiter->deadline = task->dl.deadline;
+       waiter_update_prio(waiter, task);
 
        rt_mutex_enqueue(lock, waiter);
 
@@ -676,7 +856,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
                 * to get the lock.
                 */
                if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
-                       wake_up_process(rt_mutex_top_waiter(lock)->task);
+                       wake_up_state(waiter->task, waiter->wake_state);
                raw_spin_unlock_irq(&lock->wait_lock);
                return 0;
        }
@@ -779,7 +959,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
  *         callsite called task_blocked_on_lock(), otherwise NULL
  */
 static int __sched
-try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
                     struct rt_mutex_waiter *waiter)
 {
        lockdep_assert_held(&lock->wait_lock);
@@ -815,19 +995,21 @@ try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
         * trylock attempt.
         */
        if (waiter) {
-               /*
-                * If waiter is not the highest priority waiter of
-                * @lock, give up.
-                */
-               if (waiter != rt_mutex_top_waiter(lock))
-                       return 0;
+               struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
 
                /*
-                * We can acquire the lock. Remove the waiter from the
-                * lock waiters tree.
+                * If waiter is the highest priority waiter of @lock,
+                * or allowed to steal it, take it over.
                 */
-               rt_mutex_dequeue(lock, waiter);
-
+               if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
+                       /*
+                        * We can acquire the lock. Remove the waiter from the
+                        * lock waiters tree.
+                        */
+                       rt_mutex_dequeue(lock, waiter);
+               } else {
+                       return 0;
+               }
        } else {
                /*
                 * If the lock has waiters already we check whether @task is
@@ -838,13 +1020,9 @@ try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
                 * not need to be dequeued.
                 */
                if (rt_mutex_has_waiters(lock)) {
-                       /*
-                        * If @task->prio is greater than or equal to
-                        * the top waiter priority (kernel view),
-                        * @task lost.
-                        */
-                       if (!rt_mutex_waiter_less(task_to_waiter(task),
-                                                 rt_mutex_top_waiter(lock)))
+                       /* Check whether the trylock can steal it. */
+                       if (!rt_mutex_steal(task_to_waiter(task),
+                                           rt_mutex_top_waiter(lock)))
                                return 0;
 
                        /*
@@ -897,14 +1075,15 @@ takeit:
  *
  * This must be called with lock->wait_lock held and interrupts disabled
  */
-static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
+static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
                                           struct rt_mutex_waiter *waiter,
                                           struct task_struct *task,
+                                          struct ww_acquire_ctx *ww_ctx,
                                           enum rtmutex_chainwalk chwalk)
 {
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex_waiter *top_waiter = waiter;
-       struct rt_mutex *next_lock;
+       struct rt_mutex_base *next_lock;
        int chain_walk = 0, res;
 
        lockdep_assert_held(&lock->wait_lock);
@@ -924,8 +1103,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
        raw_spin_lock(&task->pi_lock);
        waiter->task = task;
        waiter->lock = lock;
-       waiter->prio = task->prio;
-       waiter->deadline = task->dl.deadline;
+       waiter_update_prio(waiter, task);
 
        /* Get the top priority waiter on the lock */
        if (rt_mutex_has_waiters(lock))
@@ -936,6 +1114,21 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        raw_spin_unlock(&task->pi_lock);
 
+       if (build_ww_mutex() && ww_ctx) {
+               struct rt_mutex *rtm;
+
+               /* Check whether the waiter should back out immediately */
+               rtm = container_of(lock, struct rt_mutex, rtmutex);
+               res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
+               if (res) {
+                       raw_spin_lock(&task->pi_lock);
+                       rt_mutex_dequeue(lock, waiter);
+                       task->pi_blocked_on = NULL;
+                       raw_spin_unlock(&task->pi_lock);
+                       return res;
+               }
+       }
+
        if (!owner)
                return 0;
 
@@ -986,8 +1179,8 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
  *
  * Called with lock->wait_lock held and interrupts disabled.
  */
-static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q,
-                                           struct rt_mutex *lock)
+static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
+                                           struct rt_mutex_base *lock)
 {
        struct rt_mutex_waiter *waiter;
 
@@ -1023,25 +1216,201 @@ static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q,
         * deboost but before waking our donor task, hence the preempt_disable()
         * before unlock.
         *
-        * Pairs with preempt_enable() in rt_mutex_postunlock();
+        * Pairs with preempt_enable() in rt_mutex_wake_up_q();
         */
        preempt_disable();
-       wake_q_add(wake_q, waiter->task);
+       rt_mutex_wake_q_add(wqh, waiter);
        raw_spin_unlock(&current->pi_lock);
 }
 
+static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+{
+       int ret = try_to_take_rt_mutex(lock, current, NULL);
+
+       /*
+        * try_to_take_rt_mutex() sets the lock waiters bit
+        * unconditionally. Clean this up.
+        */
+       fixup_rt_mutex_waiters(lock);
+
+       return ret;
+}
+
+/*
+ * Slow path try-lock function:
+ */
+static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+{
+       unsigned long flags;
+       int ret;
+
+       /*
+        * If the lock already has an owner we fail to get the lock.
+        * This can be done without taking the @lock->wait_lock as
+        * it is only being read, and this is a trylock anyway.
+        */
+       if (rt_mutex_owner(lock))
+               return 0;
+
+       /*
+        * The mutex has currently no owner. Lock the wait lock and try to
+        * acquire the lock. We use irqsave here to support early boot calls.
+        */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+       ret = __rt_mutex_slowtrylock(lock);
+
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+       return ret;
+}
+
+static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
+{
+       if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+               return 1;
+
+       return rt_mutex_slowtrylock(lock);
+}
+
+/*
+ * Slow path to release a rt-mutex.
+ */
+static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
+{
+       DEFINE_RT_WAKE_Q(wqh);
+       unsigned long flags;
+
+       /* irqsave required to support early boot calls */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+       debug_rt_mutex_unlock(lock);
+
+       /*
+        * We must be careful here if the fast path is enabled. If we
+        * have no waiters queued we cannot set owner to NULL here
+        * because of:
+        *
+        * foo->lock->owner = NULL;
+        *                      rtmutex_lock(foo->lock);   <- fast path
+        *                      free = atomic_dec_and_test(foo->refcnt);
+        *                      rtmutex_unlock(foo->lock); <- fast path
+        *                      if (free)
+        *                              kfree(foo);
+        * raw_spin_unlock(foo->lock->wait_lock);
+        *
+        * So for the fastpath enabled kernel:
+        *
+        * Nothing can set the waiters bit as long as we hold
+        * lock->wait_lock. So we do the following sequence:
+        *
+        *      owner = rt_mutex_owner(lock);
+        *      clear_rt_mutex_waiters(lock);
+        *      raw_spin_unlock(&lock->wait_lock);
+        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
+        *              return;
+        *      goto retry;
+        *
+        * The fastpath disabled variant is simple as all access to
+        * lock->owner is serialized by lock->wait_lock:
+        *
+        *      lock->owner = NULL;
+        *      raw_spin_unlock(&lock->wait_lock);
+        */
+       while (!rt_mutex_has_waiters(lock)) {
+               /* Drops lock->wait_lock ! */
+               if (unlock_rt_mutex_safe(lock, flags) == true)
+                       return;
+               /* Relock the rtmutex and try again */
+               raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       }
+
+       /*
+        * The wakeup next waiter path does not suffer from the above
+        * race. See the comments there.
+        *
+        * Queue the next waiter for wakeup once we release the wait_lock.
+        */
+       mark_wakeup_next_waiter(&wqh, lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+       rt_mutex_wake_up_q(&wqh);
+}
+
+static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
+{
+       if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+               return;
+
+       rt_mutex_slowunlock(lock);
+}
+
+#ifdef CONFIG_SMP
+static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
+                                 struct rt_mutex_waiter *waiter,
+                                 struct task_struct *owner)
+{
+       bool res = true;
+
+       rcu_read_lock();
+       for (;;) {
+               /* If owner changed, trylock again. */
+               if (owner != rt_mutex_owner(lock))
+                       break;
+               /*
+                * Ensure that @owner is dereferenced after checking that
+                * the lock owner still matches @owner. If that fails,
+                * @owner might point to freed memory. If it still matches,
+                * the rcu_read_lock() ensures the memory stays valid.
+                */
+               barrier();
+               /*
+                * Stop spinning when:
+                *  - the lock owner has been scheduled out
+                *  - current is not longer the top waiter
+                *  - current is requested to reschedule (redundant
+                *    for CONFIG_PREEMPT_RCU=y)
+                *  - the VCPU on which owner runs is preempted
+                */
+               if (!owner->on_cpu || need_resched() ||
+                   rt_mutex_waiter_is_top_waiter(lock, waiter) ||
+                   vcpu_is_preempted(task_cpu(owner))) {
+                       res = false;
+                       break;
+               }
+               cpu_relax();
+       }
+       rcu_read_unlock();
+       return res;
+}
+#else
+static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
+                                 struct rt_mutex_waiter *waiter,
+                                 struct task_struct *owner)
+{
+       return false;
+}
+#endif
+
+#ifdef RT_MUTEX_BUILD_MUTEX
+/*
+ * Functions required for:
+ *     - rtmutex, futex on all kernels
+ *     - mutex and rwsem substitutions on RT kernels
+ */
+
 /*
  * Remove a waiter from a lock and give up
  *
- * Must be called with lock->wait_lock held and interrupts disabled. I must
+ * Must be called with lock->wait_lock held and interrupts disabled. It must
  * have just failed to try_to_take_rt_mutex().
  */
-static void __sched remove_waiter(struct rt_mutex *lock,
+static void __sched remove_waiter(struct rt_mutex_base *lock,
                                  struct rt_mutex_waiter *waiter)
 {
        bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
-       struct rt_mutex *next_lock;
+       struct rt_mutex_base *next_lock;
 
        lockdep_assert_held(&lock->wait_lock);
 
@@ -1089,56 +1458,25 @@ static void __sched remove_waiter(struct rt_mutex *lock,
        raw_spin_lock_irq(&lock->wait_lock);
 }
 
-/*
- * Recheck the pi chain, in case we got a priority setting
+/**
+ * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
+ * @lock:               the rt_mutex to take
+ * @ww_ctx:             WW mutex context pointer
+ * @state:              the state the task should block in (TASK_INTERRUPTIBLE
+ *                      or TASK_UNINTERRUPTIBLE)
+ * @timeout:            the pre-initialized and started timer, or NULL for none
+ * @waiter:             the pre-initialized rt_mutex_waiter
  *
- * Called from sched_setscheduler
+ * Must be called with lock->wait_lock held and interrupts disabled
  */
-void __sched rt_mutex_adjust_pi(struct task_struct *task)
-{
-       struct rt_mutex_waiter *waiter;
-       struct rt_mutex *next_lock;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
-
-       waiter = task->pi_blocked_on;
-       if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-               return;
-       }
-       next_lock = waiter->lock;
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
-       /* gets dropped in rt_mutex_adjust_prio_chain()! */
-       get_task_struct(task);
-
-       rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
-                                  next_lock, NULL, task);
-}
-
-void __sched rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
-{
-       debug_rt_mutex_init_waiter(waiter);
-       RB_CLEAR_NODE(&waiter->pi_tree_entry);
-       RB_CLEAR_NODE(&waiter->tree_entry);
-       waiter->task = NULL;
-}
-
-/**
- * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
- * @lock:               the rt_mutex to take
- * @state:              the state the task should block in (TASK_INTERRUPTIBLE
- *                      or TASK_UNINTERRUPTIBLE)
- * @timeout:            the pre-initialized and started timer, or NULL for none
- * @waiter:             the pre-initialized rt_mutex_waiter
- *
- * Must be called with lock->wait_lock held and interrupts disabled
- */
-static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
-                                      struct hrtimer_sleeper *timeout,
-                                      struct rt_mutex_waiter *waiter)
+static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
+                                          struct ww_acquire_ctx *ww_ctx,
+                                          unsigned int state,
+                                          struct hrtimer_sleeper *timeout,
+                                          struct rt_mutex_waiter *waiter)
 {
+       struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
+       struct task_struct *owner;
        int ret = 0;
 
        for (;;) {
@@ -1155,9 +1493,20 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state
                        break;
                }
 
+               if (build_ww_mutex() && ww_ctx) {
+                       ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
+                       if (ret)
+                               break;
+               }
+
+               if (waiter == rt_mutex_top_waiter(lock))
+                       owner = rt_mutex_owner(lock);
+               else
+                       owner = NULL;
                raw_spin_unlock_irq(&lock->wait_lock);
 
-               schedule();
+               if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
+                       schedule();
 
                raw_spin_lock_irq(&lock->wait_lock);
                set_current_state(state);
@@ -1177,6 +1526,9 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
        if (res != -EDEADLOCK || detect_deadlock)
                return;
 
+       if (build_ww_mutex() && w->ww_ctx)
+               return;
+
        /*
         * Yell loudly and stop the task right here.
         */
@@ -1187,51 +1539,52 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
        }
 }
 
-/*
- * Slow path lock function:
+/**
+ * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
+ * @lock:      The rtmutex to block lock
+ * @ww_ctx:    WW mutex context pointer
+ * @state:     The task state for sleeping
+ * @chwalk:    Indicator whether full or partial chainwalk is requested
+ * @waiter:    Initializer waiter for blocking
  */
-static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
-                                    struct hrtimer_sleeper *timeout,
-                                    enum rtmutex_chainwalk chwalk)
+static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+                                      struct ww_acquire_ctx *ww_ctx,
+                                      unsigned int state,
+                                      enum rtmutex_chainwalk chwalk,
+                                      struct rt_mutex_waiter *waiter)
 {
-       struct rt_mutex_waiter waiter;
-       unsigned long flags;
-       int ret = 0;
-
-       rt_mutex_init_waiter(&waiter);
+       struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
+       struct ww_mutex *ww = ww_container_of(rtm);
+       int ret;
 
-       /*
-        * Technically we could use raw_spin_[un]lock_irq() here, but this can
-        * be called in early boot if the cmpxchg() fast path is disabled
-        * (debug, no architecture support). In this case we will acquire the
-        * rtmutex with lock->wait_lock held. But we cannot unconditionally
-        * enable interrupts in that early boot case. So we need to use the
-        * irqsave/restore variants.
-        */
-       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       lockdep_assert_held(&lock->wait_lock);
 
        /* Try to acquire the lock again: */
        if (try_to_take_rt_mutex(lock, current, NULL)) {
-               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+               if (build_ww_mutex() && ww_ctx) {
+                       __ww_mutex_check_waiters(rtm, ww_ctx);
+                       ww_mutex_lock_acquired(ww, ww_ctx);
+               }
                return 0;
        }
 
        set_current_state(state);
 
-       /* Setup the timer, when timeout != NULL */
-       if (unlikely(timeout))
-               hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-
-       ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
-
+       ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
        if (likely(!ret))
-               /* sleep on the mutex */
-               ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
-
-       if (unlikely(ret)) {
+               ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
+
+       if (likely(!ret)) {
+               /* acquired the lock */
+               if (build_ww_mutex() && ww_ctx) {
+                       if (!ww_ctx->is_wait_die)
+                               __ww_mutex_check_waiters(rtm, ww_ctx);
+                       ww_mutex_lock_acquired(ww, ww_ctx);
+               }
+       } else {
                __set_current_state(TASK_RUNNING);
-               remove_waiter(lock, &waiter);
-               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+               remove_waiter(lock, waiter);
+               rt_mutex_handle_deadlock(ret, chwalk, waiter);
        }
 
        /*
@@ -1239,547 +1592,126 @@ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
         * unconditionally. We might have to fix that up.
         */
        fixup_rt_mutex_waiters(lock);
-
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-       /* Remove pending timer: */
-       if (unlikely(timeout))
-               hrtimer_cancel(&timeout->timer);
-
-       debug_rt_mutex_free_waiter(&waiter);
-
        return ret;
 }
 
-static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock)
+static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
+                                            struct ww_acquire_ctx *ww_ctx,
+                                            unsigned int state)
 {
-       int ret = try_to_take_rt_mutex(lock, current, NULL);
+       struct rt_mutex_waiter waiter;
+       int ret;
 
-       /*
-        * try_to_take_rt_mutex() sets the lock waiters bit
-        * unconditionally. Clean this up.
-        */
-       fixup_rt_mutex_waiters(lock);
+       rt_mutex_init_waiter(&waiter);
+       waiter.ww_ctx = ww_ctx;
+
+       ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
+                                 &waiter);
 
+       debug_rt_mutex_free_waiter(&waiter);
        return ret;
 }
 
 /*
- * Slow path try-lock function:
+ * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
+ * @lock:      The rtmutex to block lock
+ * @ww_ctx:    WW mutex context pointer
+ * @state:     The task state for sleeping
  */
-static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock)
+static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+                                    struct ww_acquire_ctx *ww_ctx,
+                                    unsigned int state)
 {
        unsigned long flags;
        int ret;
 
        /*
-        * If the lock already has an owner we fail to get the lock.
-        * This can be done without taking the @lock->wait_lock as
-        * it is only being read, and this is a trylock anyway.
-        */
-       if (rt_mutex_owner(lock))
-               return 0;
-
-       /*
-        * The mutex has currently no owner. Lock the wait lock and try to
-        * acquire the lock. We use irqsave here to support early boot calls.
+        * Technically we could use raw_spin_[un]lock_irq() here, but this can
+        * be called in early boot if the cmpxchg() fast path is disabled
+        * (debug, no architecture support). In this case we will acquire the
+        * rtmutex with lock->wait_lock held. But we cannot unconditionally
+        * enable interrupts in that early boot case. So we need to use the
+        * irqsave/restore variants.
         */
        raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
-       ret = __rt_mutex_slowtrylock(lock);
-
+       ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
        raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        return ret;
 }
 
-/*
- * Performs the wakeup of the top-waiter and re-enables preemption.
- */
-void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
-{
-       wake_up_q(wake_q);
-
-       /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
-       preempt_enable();
-}
-
-/*
- * Slow path to release a rt-mutex.
- *
- * Return whether the current task needs to call rt_mutex_postunlock().
- */
-static void __sched rt_mutex_slowunlock(struct rt_mutex *lock)
-{
-       DEFINE_WAKE_Q(wake_q);
-       unsigned long flags;
-
-       /* irqsave required to support early boot calls */
-       raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
-       debug_rt_mutex_unlock(lock);
-
-       /*
-        * We must be careful here if the fast path is enabled. If we
-        * have no waiters queued we cannot set owner to NULL here
-        * because of:
-        *
-        * foo->lock->owner = NULL;
-        *                      rtmutex_lock(foo->lock);   <- fast path
-        *                      free = atomic_dec_and_test(foo->refcnt);
-        *                      rtmutex_unlock(foo->lock); <- fast path
-        *                      if (free)
-        *                              kfree(foo);
-        * raw_spin_unlock(foo->lock->wait_lock);
-        *
-        * So for the fastpath enabled kernel:
-        *
-        * Nothing can set the waiters bit as long as we hold
-        * lock->wait_lock. So we do the following sequence:
-        *
-        *      owner = rt_mutex_owner(lock);
-        *      clear_rt_mutex_waiters(lock);
-        *      raw_spin_unlock(&lock->wait_lock);
-        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
-        *              return;
-        *      goto retry;
-        *
-        * The fastpath disabled variant is simple as all access to
-        * lock->owner is serialized by lock->wait_lock:
-        *
-        *      lock->owner = NULL;
-        *      raw_spin_unlock(&lock->wait_lock);
-        */
-       while (!rt_mutex_has_waiters(lock)) {
-               /* Drops lock->wait_lock ! */
-               if (unlock_rt_mutex_safe(lock, flags) == true)
-                       return;
-               /* Relock the rtmutex and try again */
-               raw_spin_lock_irqsave(&lock->wait_lock, flags);
-       }
-
-       /*
-        * The wakeup next waiter path does not suffer from the above
-        * race. See the comments there.
-        *
-        * Queue the next waiter for wakeup once we release the wait_lock.
-        */
-       mark_wakeup_next_waiter(&wake_q, lock);
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-       rt_mutex_postunlock(&wake_q);
-}
-
-/*
- * debug aware fast / slowpath lock,trylock,unlock
- *
- * The atomic acquire/release ops are compiled away, when either the
- * architecture does not support cmpxchg or when debugging is enabled.
- */
-static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state,
-                                          unsigned int subclass)
+static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
+                                          unsigned int state)
 {
-       int ret;
-
-       might_sleep();
-       mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-
        if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
                return 0;
 
-       ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
-       if (ret)
-               mutex_release(&lock->dep_map, _RET_IP_);
-       return ret;
+       return rt_mutex_slowlock(lock, NULL, state);
 }
+#endif /* RT_MUTEX_BUILD_MUTEX */
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-/**
- * rt_mutex_lock_nested - lock a rt_mutex
- *
- * @lock: the rt_mutex to be locked
- * @subclass: the lockdep subclass
- */
-void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
-{
-       __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
-
-#else /* !CONFIG_DEBUG_LOCK_ALLOC */
-
-/**
- * rt_mutex_lock - lock a rt_mutex
- *
- * @lock: the rt_mutex to be locked
- */
-void __sched rt_mutex_lock(struct rt_mutex *lock)
-{
-       __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock);
-#endif
-
-/**
- * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
- *
- * @lock:              the rt_mutex to be locked
- *
- * Returns:
- *  0          on success
- * -EINTR      when interrupted by a signal
- */
-int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
-{
-       return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-
-/**
- * rt_mutex_trylock - try to lock a rt_mutex
- *
- * @lock:      the rt_mutex to be locked
- *
- * This function can only be called in thread context. It's safe to call it
- * from atomic regions, but not from hard or soft interrupt context.
- *
- * Returns:
- *  1 on success
- *  0 on contention
- */
-int __sched rt_mutex_trylock(struct rt_mutex *lock)
-{
-       int ret;
-
-       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
-               return 0;
-
-       /*
-        * No lockdep annotation required because lockdep disables the fast
-        * path.
-        */
-       if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-               return 1;
-
-       ret = rt_mutex_slowtrylock(lock);
-       if (ret)
-               mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rt_mutex_trylock);
-
-/**
- * rt_mutex_unlock - unlock a rt_mutex
- *
- * @lock: the rt_mutex to be unlocked
- */
-void __sched rt_mutex_unlock(struct rt_mutex *lock)
-{
-       mutex_release(&lock->dep_map, _RET_IP_);
-       if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
-               return;
-
-       rt_mutex_slowunlock(lock);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_unlock);
-
+#ifdef RT_MUTEX_BUILD_SPINLOCKS
 /*
- * Futex variants, must not use fastpath.
+ * Functions required for spin/rw_lock substitution on RT kernels
  */
-int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-       return rt_mutex_slowtrylock(lock);
-}
-
-int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-       return __rt_mutex_slowtrylock(lock);
-}
 
 /**
- * __rt_mutex_futex_unlock - Futex variant, that since futex variants
- * do not use the fast-path, can be simple and will not need to retry.
- *
- * @lock:      The rt_mutex to be unlocked
- * @wake_q:    The wake queue head from which to get the next lock waiter
+ * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
+ * @lock:      The underlying RT mutex
  */
-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
-                                    struct wake_q_head *wake_q)
+static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
 {
-       lockdep_assert_held(&lock->wait_lock);
-
-       debug_rt_mutex_unlock(lock);
-
-       if (!rt_mutex_has_waiters(lock)) {
-               lock->owner = NULL;
-               return false; /* done */
-       }
-
-       /*
-        * We've already deboosted, mark_wakeup_next_waiter() will
-        * retain preempt_disabled when we drop the wait_lock, to
-        * avoid inversion prior to the wakeup.  preempt_disable()
-        * therein pairs with rt_mutex_postunlock().
-        */
-       mark_wakeup_next_waiter(wake_q, lock);
-
-       return true; /* call postunlock() */
-}
-
-void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
-{
-       DEFINE_WAKE_Q(wake_q);
-       unsigned long flags;
-       bool postunlock;
-
-       raw_spin_lock_irqsave(&lock->wait_lock, flags);
-       postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-       if (postunlock)
-               rt_mutex_postunlock(&wake_q);
-}
+       struct rt_mutex_waiter waiter;
+       struct task_struct *owner;
 
-/**
- * __rt_mutex_init - initialize the rt_mutex
- *
- * @lock:      The rt_mutex to be initialized
- * @name:      The lock name used for debugging
- * @key:       The lock class key used for debugging
- *
- * Initialize the rt_mutex to unlocked state.
- *
- * Initializing of a locked rt_mutex is not allowed
- */
-void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
-                    struct lock_class_key *key)
-{
-       debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-       lockdep_init_map(&lock->dep_map, name, key, 0);
+       lockdep_assert_held(&lock->wait_lock);
 
-       __rt_mutex_basic_init(lock);
-}
-EXPORT_SYMBOL_GPL(__rt_mutex_init);
+       if (try_to_take_rt_mutex(lock, current, NULL))
+               return;
 
-/**
- * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
- *                             proxy owner
- *
- * @lock:      the rt_mutex to be locked
- * @proxy_owner:the task to set as owner
- *
- * No locking. Caller has to do serializing itself
- *
- * Special API call for PI-futex support. This initializes the rtmutex and
- * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
- * possible at this point because the pi_state which contains the rtmutex
- * is not yet visible to other tasks.
- */
-void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock,
-                                       struct task_struct *proxy_owner)
-{
-       __rt_mutex_basic_init(lock);
-       rt_mutex_set_owner(lock, proxy_owner);
-}
+       rt_mutex_init_rtlock_waiter(&waiter);
 
-/**
- * rt_mutex_proxy_unlock - release a lock on behalf of owner
- *
- * @lock:      the rt_mutex to be locked
- *
- * No locking. Caller has to do serializing itself
- *
- * Special API call for PI-futex support. This merrily cleans up the rtmutex
- * (debugging) state. Concurrent operations on this rt_mutex are not
- * possible because it belongs to the pi_state which is about to be freed
- * and it is not longer visible to other tasks.
- */
-void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock)
-{
-       debug_rt_mutex_proxy_unlock(lock);
-       rt_mutex_set_owner(lock, NULL);
-}
+       /* Save current state and set state to TASK_RTLOCK_WAIT */
+       current_save_and_set_rtlock_wait_state();
 
-/**
- * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
- * @lock:              the rt_mutex to take
- * @waiter:            the pre-initialized rt_mutex_waiter
- * @task:              the task to prepare
- *
- * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
- * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
- *
- * NOTE: does _NOT_ remove the @waiter on failure; must either call
- * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
- *
- * Returns:
- *  0 - task blocked on lock
- *  1 - acquired the lock for task, caller should wake it up
- * <0 - error
- *
- * Special API call for PI-futex support.
- */
-int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                       struct rt_mutex_waiter *waiter,
-                                       struct task_struct *task)
-{
-       int ret;
+       task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
 
-       lockdep_assert_held(&lock->wait_lock);
+       for (;;) {
+               /* Try to acquire the lock again */
+               if (try_to_take_rt_mutex(lock, current, &waiter))
+                       break;
 
-       if (try_to_take_rt_mutex(lock, task, NULL))
-               return 1;
+               if (&waiter == rt_mutex_top_waiter(lock))
+                       owner = rt_mutex_owner(lock);
+               else
+                       owner = NULL;
+               raw_spin_unlock_irq(&lock->wait_lock);
 
-       /* We enforce deadlock detection for futexes */
-       ret = task_blocks_on_rt_mutex(lock, waiter, task,
-                                     RT_MUTEX_FULL_CHAINWALK);
+               if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
+                       schedule_rtlock();
 
-       if (ret && !rt_mutex_owner(lock)) {
-               /*
-                * Reset the return value. We might have
-                * returned with -EDEADLK and the owner
-                * released the lock while we were walking the
-                * pi chain.  Let the waiter sort it out.
-                */
-               ret = 0;
+               raw_spin_lock_irq(&lock->wait_lock);
+               set_current_state(TASK_RTLOCK_WAIT);
        }
 
-       return ret;
-}
-
-/**
- * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
- * @lock:              the rt_mutex to take
- * @waiter:            the pre-initialized rt_mutex_waiter
- * @task:              the task to prepare
- *
- * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
- * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
- *
- * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
- * on failure.
- *
- * Returns:
- *  0 - task blocked on lock
- *  1 - acquired the lock for task, caller should wake it up
- * <0 - error
- *
- * Special API call for PI-futex support.
- */
-int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                     struct rt_mutex_waiter *waiter,
-                                     struct task_struct *task)
-{
-       int ret;
-
-       raw_spin_lock_irq(&lock->wait_lock);
-       ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
-       if (unlikely(ret))
-               remove_waiter(lock, waiter);
-       raw_spin_unlock_irq(&lock->wait_lock);
-
-       return ret;
-}
-
-/**
- * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
- * @lock:              the rt_mutex we were woken on
- * @to:                        the timeout, null if none. hrtimer should already have
- *                     been started.
- * @waiter:            the pre-initialized rt_mutex_waiter
- *
- * Wait for the lock acquisition started on our behalf by
- * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
- * rt_mutex_cleanup_proxy_lock().
- *
- * Returns:
- *  0 - success
- * <0 - error, one of -EINTR, -ETIMEDOUT
- *
- * Special API call for PI-futex support
- */
-int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
-                                    struct hrtimer_sleeper *to,
-                                    struct rt_mutex_waiter *waiter)
-{
-       int ret;
+       /* Restore the task state */
+       current_restore_rtlock_saved_state();
 
-       raw_spin_lock_irq(&lock->wait_lock);
-       /* sleep on the mutex */
-       set_current_state(TASK_INTERRUPTIBLE);
-       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
        /*
-        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
-        * have to fix that up.
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally.
+        * We might have to fix that up:
         */
        fixup_rt_mutex_waiters(lock);
-       raw_spin_unlock_irq(&lock->wait_lock);
-
-       return ret;
+       debug_rt_mutex_free_waiter(&waiter);
 }
 
-/**
- * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
- * @lock:              the rt_mutex we were woken on
- * @waiter:            the pre-initialized rt_mutex_waiter
- *
- * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
- * rt_mutex_wait_proxy_lock().
- *
- * Unless we acquired the lock; we're still enqueued on the wait-list and can
- * in fact still be granted ownership until we're removed. Therefore we can
- * find we are in fact the owner and must disregard the
- * rt_mutex_wait_proxy_lock() failure.
- *
- * Returns:
- *  true  - did the cleanup, we done.
- *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
- *          caller should disregards its return value.
- *
- * Special API call for PI-futex support
- */
-bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
-                                        struct rt_mutex_waiter *waiter)
+static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
 {
-       bool cleanup = false;
-
-       raw_spin_lock_irq(&lock->wait_lock);
-       /*
-        * Do an unconditional try-lock, this deals with the lock stealing
-        * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
-        * sets a NULL owner.
-        *
-        * We're not interested in the return value, because the subsequent
-        * test on rt_mutex_owner() will infer that. If the trylock succeeded,
-        * we will own the lock and it will have removed the waiter. If we
-        * failed the trylock, we're still not owner and we need to remove
-        * ourselves.
-        */
-       try_to_take_rt_mutex(lock, current, waiter);
-       /*
-        * Unless we're the owner; we're still enqueued on the wait_list.
-        * So check if we became owner, if not, take us off the wait_list.
-        */
-       if (rt_mutex_owner(lock) != current) {
-               remove_waiter(lock, waiter);
-               cleanup = true;
-       }
-       /*
-        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
-        * have to fix that up.
-        */
-       fixup_rt_mutex_waiters(lock);
-
-       raw_spin_unlock_irq(&lock->wait_lock);
+       unsigned long flags;
 
-       return cleanup;
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       rtlock_slowlock_locked(lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 }
 
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-void rt_mutex_debug_task_free(struct task_struct *task)
-{
-       DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
-       DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
-}
-#endif
+#endif /* RT_MUTEX_BUILD_SPINLOCKS */
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
new file mode 100644 (file)
index 0000000..5c9299a
--- /dev/null
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rtmutex API
+ */
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define RT_MUTEX_BUILD_MUTEX
+#include "rtmutex.c"
+
+/*
+ * Max number of times we'll walk the boosting chain:
+ */
+int max_lock_depth = 1024;
+
+/*
+ * Debug aware fast / slowpath lock,trylock,unlock
+ *
+ * The atomic acquire/release ops are compiled away, when either the
+ * architecture does not support cmpxchg or when debugging is enabled.
+ */
+static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
+                                                 unsigned int state,
+                                                 unsigned int subclass)
+{
+       int ret;
+
+       might_sleep();
+       mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       ret = __rt_mutex_lock(&lock->rtmutex, state);
+       if (ret)
+               mutex_release(&lock->dep_map, _RET_IP_);
+       return ret;
+}
+
+void rt_mutex_base_init(struct rt_mutex_base *rtb)
+{
+       __rt_mutex_base_init(rtb);
+}
+EXPORT_SYMBOL(rt_mutex_base_init);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/**
+ * rt_mutex_lock_nested - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ * @subclass: the lockdep subclass
+ */
+void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+{
+       __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+
+#else /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+/**
+ * rt_mutex_lock - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ */
+void __sched rt_mutex_lock(struct rt_mutex *lock)
+{
+       __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock);
+#endif
+
+/**
+ * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+ *
+ * @lock:              the rt_mutex to be locked
+ *
+ * Returns:
+ *  0          on success
+ * -EINTR      when interrupted by a signal
+ */
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+{
+       return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+/**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+ * @lock:      the rt_mutex to be locked
+ *
+ * This function can only be called in thread context. It's safe to call it
+ * from atomic regions, but not from hard or soft interrupt context.
+ *
+ * Returns:
+ *  1 on success
+ *  0 on contention
+ */
+int __sched rt_mutex_trylock(struct rt_mutex *lock)
+{
+       int ret;
+
+       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
+               return 0;
+
+       ret = __rt_mutex_trylock(&lock->rtmutex);
+       if (ret)
+               mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rt_mutex_trylock);
+
+/**
+ * rt_mutex_unlock - unlock a rt_mutex
+ *
+ * @lock: the rt_mutex to be unlocked
+ */
+void __sched rt_mutex_unlock(struct rt_mutex *lock)
+{
+       mutex_release(&lock->dep_map, _RET_IP_);
+       __rt_mutex_unlock(&lock->rtmutex);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+
+/*
+ * Futex variants, must not use fastpath.
+ */
+int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
+{
+       return rt_mutex_slowtrylock(lock);
+}
+
+int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
+{
+       return __rt_mutex_slowtrylock(lock);
+}
+
+/**
+ * __rt_mutex_futex_unlock - Futex variant, that since futex variants
+ * do not use the fast-path, can be simple and will not need to retry.
+ *
+ * @lock:      The rt_mutex to be unlocked
+ * @wqh:       The wake queue head from which to get the next lock waiter
+ */
+bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
+                                    struct rt_wake_q_head *wqh)
+{
+       lockdep_assert_held(&lock->wait_lock);
+
+       debug_rt_mutex_unlock(lock);
+
+       if (!rt_mutex_has_waiters(lock)) {
+               lock->owner = NULL;
+               return false; /* done */
+       }
+
+       /*
+        * We've already deboosted, mark_wakeup_next_waiter() will
+        * retain preempt_disabled when we drop the wait_lock, to
+        * avoid inversion prior to the wakeup.  preempt_disable()
+        * therein pairs with rt_mutex_postunlock().
+        */
+       mark_wakeup_next_waiter(wqh, lock);
+
+       return true; /* call postunlock() */
+}
+
+void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
+{
+       DEFINE_RT_WAKE_Q(wqh);
+       unsigned long flags;
+       bool postunlock;
+
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       postunlock = __rt_mutex_futex_unlock(lock, &wqh);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+       if (postunlock)
+               rt_mutex_postunlock(&wqh);
+}
+
+/**
+ * __rt_mutex_init - initialize the rt_mutex
+ *
+ * @lock:      The rt_mutex to be initialized
+ * @name:      The lock name used for debugging
+ * @key:       The lock class key used for debugging
+ *
+ * Initialize the rt_mutex to unlocked state.
+ *
+ * Initializing of a locked rt_mutex is not allowed
+ */
+void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
+                            struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+       __rt_mutex_base_init(&lock->rtmutex);
+       lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL_GPL(__rt_mutex_init);
+
+/**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+ *                             proxy owner
+ *
+ * @lock:      the rt_mutex to be locked
+ * @proxy_owner:the task to set as owner
+ *
+ * No locking. Caller has to do serializing itself
+ *
+ * Special API call for PI-futex support. This initializes the rtmutex and
+ * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
+ * possible at this point because the pi_state which contains the rtmutex
+ * is not yet visible to other tasks.
+ */
+void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
+                                       struct task_struct *proxy_owner)
+{
+       static struct lock_class_key pi_futex_key;
+
+       __rt_mutex_base_init(lock);
+       /*
+        * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
+        * and rtmutex based. That causes a lockdep false positive, because
+        * some of the futex functions invoke spin_unlock(&hb->lock) with
+        * the wait_lock of the rtmutex associated to the pi_futex held.
+        * spin_unlock() in turn takes wait_lock of the rtmutex on which
+        * the spinlock is based, which makes lockdep notice a lock
+        * recursion. Give the futex/rtmutex wait_lock a separate key.
+        */
+       lockdep_set_class(&lock->wait_lock, &pi_futex_key);
+       rt_mutex_set_owner(lock, proxy_owner);
+}
+
+/**
+ * rt_mutex_proxy_unlock - release a lock on behalf of owner
+ *
+ * @lock:      the rt_mutex to be locked
+ *
+ * No locking. Caller has to do serializing itself
+ *
+ * Special API call for PI-futex support. This just cleans up the rtmutex
+ * (debugging) state. Concurrent operations on this rt_mutex are not
+ * possible because it belongs to the pi_state which is about to be freed
+ * and it is not longer visible to other tasks.
+ */
+void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
+{
+       debug_rt_mutex_proxy_unlock(lock);
+       rt_mutex_set_owner(lock, NULL);
+}
+
+/**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock:              the rt_mutex to take
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ * @task:              the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
+ *
+ * Returns:
+ *  0 - task blocked on lock
+ *  1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
+int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                       struct rt_mutex_waiter *waiter,
+                                       struct task_struct *task)
+{
+       int ret;
+
+       lockdep_assert_held(&lock->wait_lock);
+
+       if (try_to_take_rt_mutex(lock, task, NULL))
+               return 1;
+
+       /* We enforce deadlock detection for futexes */
+       ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
+                                     RT_MUTEX_FULL_CHAINWALK);
+
+       if (ret && !rt_mutex_owner(lock)) {
+               /*
+                * Reset the return value. We might have
+                * returned with -EDEADLK and the owner
+                * released the lock while we were walking the
+                * pi chain.  Let the waiter sort it out.
+                */
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/**
+ * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock:              the rt_mutex to take
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ * @task:              the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
+ * on failure.
+ *
+ * Returns:
+ *  0 - task blocked on lock
+ *  1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
+int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                     struct rt_mutex_waiter *waiter,
+                                     struct task_struct *task)
+{
+       int ret;
+
+       raw_spin_lock_irq(&lock->wait_lock);
+       ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+       if (unlikely(ret))
+               remove_waiter(lock, waiter);
+       raw_spin_unlock_irq(&lock->wait_lock);
+
+       return ret;
+}
+
+/**
+ * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
+ * @lock:              the rt_mutex we were woken on
+ * @to:                        the timeout, null if none. hrtimer should already have
+ *                     been started.
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ *
+ * Wait for the lock acquisition started on our behalf by
+ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
+ * rt_mutex_cleanup_proxy_lock().
+ *
+ * Returns:
+ *  0 - success
+ * <0 - error, one of -EINTR, -ETIMEDOUT
+ *
+ * Special API call for PI-futex support
+ */
+int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
+                                    struct hrtimer_sleeper *to,
+                                    struct rt_mutex_waiter *waiter)
+{
+       int ret;
+
+       raw_spin_lock_irq(&lock->wait_lock);
+       /* sleep on the mutex */
+       set_current_state(TASK_INTERRUPTIBLE);
+       ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+        * have to fix that up.
+        */
+       fixup_rt_mutex_waiters(lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
+
+       return ret;
+}
+
+/**
+ * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
+ * @lock:              the rt_mutex we were woken on
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ *
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
+ * rt_mutex_wait_proxy_lock().
+ *
+ * Unless we acquired the lock; we're still enqueued on the wait-list and can
+ * in fact still be granted ownership until we're removed. Therefore we can
+ * find we are in fact the owner and must disregard the
+ * rt_mutex_wait_proxy_lock() failure.
+ *
+ * Returns:
+ *  true  - did the cleanup, we done.
+ *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
+ *          caller should disregards its return value.
+ *
+ * Special API call for PI-futex support
+ */
+bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
+                                        struct rt_mutex_waiter *waiter)
+{
+       bool cleanup = false;
+
+       raw_spin_lock_irq(&lock->wait_lock);
+       /*
+        * Do an unconditional try-lock, this deals with the lock stealing
+        * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
+        * sets a NULL owner.
+        *
+        * We're not interested in the return value, because the subsequent
+        * test on rt_mutex_owner() will infer that. If the trylock succeeded,
+        * we will own the lock and it will have removed the waiter. If we
+        * failed the trylock, we're still not owner and we need to remove
+        * ourselves.
+        */
+       try_to_take_rt_mutex(lock, current, waiter);
+       /*
+        * Unless we're the owner; we're still enqueued on the wait_list.
+        * So check if we became owner, if not, take us off the wait_list.
+        */
+       if (rt_mutex_owner(lock) != current) {
+               remove_waiter(lock, waiter);
+               cleanup = true;
+       }
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+        * have to fix that up.
+        */
+       fixup_rt_mutex_waiters(lock);
+
+       raw_spin_unlock_irq(&lock->wait_lock);
+
+       return cleanup;
+}
+
+/*
+ * Recheck the pi chain, in case we got a priority setting
+ *
+ * Called from sched_setscheduler
+ */
+void __sched rt_mutex_adjust_pi(struct task_struct *task)
+{
+       struct rt_mutex_waiter *waiter;
+       struct rt_mutex_base *next_lock;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+       waiter = task->pi_blocked_on;
+       if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               return;
+       }
+       next_lock = waiter->lock;
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+       /* gets dropped in rt_mutex_adjust_prio_chain()! */
+       get_task_struct(task);
+
+       rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+                                  next_lock, NULL, task);
+}
+
+/*
+ * Performs the wakeup of the top-waiter and re-enables preemption.
+ */
+void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
+{
+       rt_mutex_wake_up_q(wqh);
+}
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+void rt_mutex_debug_task_free(struct task_struct *task)
+{
+       DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
+       DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
+}
+#endif
+
+#ifdef CONFIG_PREEMPT_RT
+/* Mutexes */
+void __mutex_rt_init(struct mutex *mutex, const char *name,
+                    struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
+       lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL(__mutex_rt_init);
+
+static __always_inline int __mutex_lock_common(struct mutex *lock,
+                                              unsigned int state,
+                                              unsigned int subclass,
+                                              struct lockdep_map *nest_lock,
+                                              unsigned long ip)
+{
+       int ret;
+
+       might_sleep();
+       mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+       ret = __rt_mutex_lock(&lock->rtmutex, state);
+       if (ret)
+               mutex_release(&lock->dep_map, ip);
+       else
+               lock_acquired(&lock->dep_map, ip);
+       return ret;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
+{
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_nested);
+
+void __sched _mutex_lock_nest_lock(struct mutex *lock,
+                                  struct lockdep_map *nest_lock)
+{
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
+
+int __sched mutex_lock_interruptible_nested(struct mutex *lock,
+                                           unsigned int subclass)
+{
+       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
+
+int __sched mutex_lock_killable_nested(struct mutex *lock,
+                                           unsigned int subclass)
+{
+       return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
+
+void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
+{
+       int token;
+
+       might_sleep();
+
+       token = io_schedule_prepare();
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+
+#else /* CONFIG_DEBUG_LOCK_ALLOC */
+
+void __sched mutex_lock(struct mutex *lock)
+{
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL(mutex_lock);
+
+int __sched mutex_lock_interruptible(struct mutex *lock)
+{
+       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL(mutex_lock_interruptible);
+
+int __sched mutex_lock_killable(struct mutex *lock)
+{
+       return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL(mutex_lock_killable);
+
+void __sched mutex_lock_io(struct mutex *lock)
+{
+       int token = io_schedule_prepare();
+
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL(mutex_lock_io);
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+int __sched mutex_trylock(struct mutex *lock)
+{
+       int ret;
+
+       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
+               return 0;
+
+       ret = __rt_mutex_trylock(&lock->rtmutex);
+       if (ret)
+               mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+       return ret;
+}
+EXPORT_SYMBOL(mutex_trylock);
+
+void __sched mutex_unlock(struct mutex *lock)
+{
+       mutex_release(&lock->dep_map, _RET_IP_);
+       __rt_mutex_unlock(&lock->rtmutex);
+}
+EXPORT_SYMBOL(mutex_unlock);
+
+#endif /* CONFIG_PREEMPT_RT */
index a90c22a..c47e836 100644 (file)
  * @pi_tree_entry:     pi node to enqueue into the mutex owner waiters tree
  * @task:              task reference to the blocked task
  * @lock:              Pointer to the rt_mutex on which the waiter blocks
+ * @wake_state:                Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
  * @prio:              Priority of the waiter
  * @deadline:          Deadline of the waiter if applicable
+ * @ww_ctx:            WW context pointer
  */
 struct rt_mutex_waiter {
        struct rb_node          tree_entry;
        struct rb_node          pi_tree_entry;
        struct task_struct      *task;
-       struct rt_mutex         *lock;
+       struct rt_mutex_base    *lock;
+       unsigned int            wake_state;
        int                     prio;
        u64                     deadline;
+       struct ww_acquire_ctx   *ww_ctx;
 };
 
+/**
+ * rt_wake_q_head - Wrapper around regular wake_q_head to support
+ *                 "sleeping" spinlocks on RT
+ * @head:              The regular wake_q_head for sleeping lock variants
+ * @rtlock_task:       Task pointer for RT lock (spin/rwlock) wakeups
+ */
+struct rt_wake_q_head {
+       struct wake_q_head      head;
+       struct task_struct      *rtlock_task;
+};
+
+#define DEFINE_RT_WAKE_Q(name)                                         \
+       struct rt_wake_q_head name = {                                  \
+               .head           = WAKE_Q_HEAD_INITIALIZER(name.head),   \
+               .rtlock_task    = NULL,                                 \
+       }
+
+/*
+ * PI-futex support (proxy locking functions, etc.):
+ */
+extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
+                                      struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
+extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                    struct rt_mutex_waiter *waiter,
+                                    struct task_struct *task);
+extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                    struct rt_mutex_waiter *waiter,
+                                    struct task_struct *task);
+extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
+                              struct hrtimer_sleeper *to,
+                              struct rt_mutex_waiter *waiter);
+extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
+                                struct rt_mutex_waiter *waiter);
+
+extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
+extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
+
+extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
+extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
+                               struct rt_wake_q_head *wqh);
+
+extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
+
 /*
  * Must be guarded because this header is included from rcu/tree_plugin.h
  * unconditionally.
  */
 #ifdef CONFIG_RT_MUTEXES
-static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
+static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
 {
        return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
 }
 
-static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex *lock)
+/*
+ * Lockless speculative check whether @waiter is still the top waiter on
+ * @lock. This is solely comparing pointers and not derefencing the
+ * leftmost entry which might be about to vanish.
+ */
+static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
+                                                struct rt_mutex_waiter *waiter)
+{
+       struct rb_node *leftmost = rb_first_cached(&lock->waiters);
+
+       return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter;
+}
+
+static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
 {
        struct rb_node *leftmost = rb_first_cached(&lock->waiters);
        struct rt_mutex_waiter *w = NULL;
@@ -72,19 +133,12 @@ static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
 
 #define RT_MUTEX_HAS_WAITERS   1UL
 
-static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
 {
        unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
 
        return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
 }
-#else /* CONFIG_RT_MUTEXES */
-/* Used in rcu/tree_plugin.h */
-static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
-{
-       return NULL;
-}
-#endif  /* !CONFIG_RT_MUTEXES */
 
 /*
  * Constants for rt mutex functions which have a selectable deadlock
@@ -101,49 +155,21 @@ enum rtmutex_chainwalk {
        RT_MUTEX_FULL_CHAINWALK,
 };
 
-static inline void __rt_mutex_basic_init(struct rt_mutex *lock)
+static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
 {
-       lock->owner = NULL;
        raw_spin_lock_init(&lock->wait_lock);
        lock->waiters = RB_ROOT_CACHED;
+       lock->owner = NULL;
 }
 
-/*
- * PI-futex support (proxy locking functions, etc.):
- */
-extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
-                                      struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
-extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
-extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                    struct rt_mutex_waiter *waiter,
-                                    struct task_struct *task);
-extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                    struct rt_mutex_waiter *waiter,
-                                    struct task_struct *task);
-extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
-                              struct hrtimer_sleeper *to,
-                              struct rt_mutex_waiter *waiter);
-extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
-                                struct rt_mutex_waiter *waiter);
-
-extern int rt_mutex_futex_trylock(struct rt_mutex *l);
-extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
-
-extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
-extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
-                                struct wake_q_head *wqh);
-
-extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
-
 /* Debug functions */
-static inline void debug_rt_mutex_unlock(struct rt_mutex *lock)
+static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
 {
        if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
                DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
 }
 
-static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
+static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
 {
        if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
                DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
@@ -161,4 +187,27 @@ static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
                memset(waiter, 0x22, sizeof(*waiter));
 }
 
+static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+{
+       debug_rt_mutex_init_waiter(waiter);
+       RB_CLEAR_NODE(&waiter->pi_tree_entry);
+       RB_CLEAR_NODE(&waiter->tree_entry);
+       waiter->wake_state = TASK_NORMAL;
+       waiter->task = NULL;
+}
+
+static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
+{
+       rt_mutex_init_waiter(waiter);
+       waiter->wake_state = TASK_RTLOCK_WAIT;
+}
+
+#else /* CONFIG_RT_MUTEXES */
+/* Used in rcu/tree_plugin.h */
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
+{
+       return NULL;
+}
+#endif  /* !CONFIG_RT_MUTEXES */
+
 #endif
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
new file mode 100644 (file)
index 0000000..4ba1508
--- /dev/null
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * RT-specific reader/writer semaphores and reader/writer locks
+ *
+ * down_write/write_lock()
+ *  1) Lock rtmutex
+ *  2) Remove the reader BIAS to force readers into the slow path
+ *  3) Wait until all readers have left the critical section
+ *  4) Mark it write locked
+ *
+ * up_write/write_unlock()
+ *  1) Remove the write locked marker
+ *  2) Set the reader BIAS, so readers can use the fast path again
+ *  3) Unlock rtmutex, to release blocked readers
+ *
+ * down_read/read_lock()
+ *  1) Try fast path acquisition (reader BIAS is set)
+ *  2) Take tmutex::wait_lock, which protects the writelocked flag
+ *  3) If !writelocked, acquire it for read
+ *  4) If writelocked, block on tmutex
+ *  5) unlock rtmutex, goto 1)
+ *
+ * up_read/read_unlock()
+ *  1) Try fast path release (reader count != 1)
+ *  2) Wake the writer waiting in down_write()/write_lock() #3
+ *
+ * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
+ * locks on RT are not writer fair, but writers, which should be avoided in
+ * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
+ * inheritance mechanism.
+ *
+ * It's possible to make the rw primitives writer fair by keeping a list of
+ * active readers. A blocked writer would force all newly incoming readers
+ * to block on the rtmutex, but the rtmutex would have to be proxy locked
+ * for one reader after the other. We can't use multi-reader inheritance
+ * because there is no way to support that with SCHED_DEADLINE.
+ * Implementing the one by one reader boosting/handover mechanism is a
+ * major surgery for a very dubious value.
+ *
+ * The risk of writer starvation is there, but the pathological use cases
+ * which trigger it are not necessarily the typical RT workloads.
+ *
+ * Common code shared between RT rw_semaphore and rwlock
+ */
+
+static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
+{
+       int r;
+
+       /*
+        * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
+        * set.
+        */
+       for (r = atomic_read(&rwb->readers); r < 0;) {
+               if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
+                       return 1;
+       }
+       return 0;
+}
+
+static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
+                                     unsigned int state)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       int ret;
+
+       raw_spin_lock_irq(&rtm->wait_lock);
+       /*
+        * Allow readers, as long as the writer has not completely
+        * acquired the semaphore for write.
+        */
+       if (atomic_read(&rwb->readers) != WRITER_BIAS) {
+               atomic_inc(&rwb->readers);
+               raw_spin_unlock_irq(&rtm->wait_lock);
+               return 0;
+       }
+
+       /*
+        * Call into the slow lock path with the rtmutex->wait_lock
+        * held, so this can't result in the following race:
+        *
+        * Reader1              Reader2         Writer
+        *                      down_read()
+        *                                      down_write()
+        *                                      rtmutex_lock(m)
+        *                                      wait()
+        * down_read()
+        * unlock(m->wait_lock)
+        *                      up_read()
+        *                      wake(Writer)
+        *                                      lock(m->wait_lock)
+        *                                      sem->writelocked=true
+        *                                      unlock(m->wait_lock)
+        *
+        *                                      up_write()
+        *                                      sem->writelocked=false
+        *                                      rtmutex_unlock(m)
+        *                      down_read()
+        *                                      down_write()
+        *                                      rtmutex_lock(m)
+        *                                      wait()
+        * rtmutex_lock(m)
+        *
+        * That would put Reader1 behind the writer waiting on
+        * Reader2 to call up_read(), which might be unbound.
+        */
+
+       /*
+        * For rwlocks this returns 0 unconditionally, so the below
+        * !ret conditionals are optimized out.
+        */
+       ret = rwbase_rtmutex_slowlock_locked(rtm, state);
+
+       /*
+        * On success the rtmutex is held, so there can't be a writer
+        * active. Increment the reader count and immediately drop the
+        * rtmutex again.
+        *
+        * rtmutex->wait_lock has to be unlocked in any case of course.
+        */
+       if (!ret)
+               atomic_inc(&rwb->readers);
+       raw_spin_unlock_irq(&rtm->wait_lock);
+       if (!ret)
+               rwbase_rtmutex_unlock(rtm);
+       return ret;
+}
+
+static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
+                                           unsigned int state)
+{
+       if (rwbase_read_trylock(rwb))
+               return 0;
+
+       return __rwbase_read_lock(rwb, state);
+}
+
+static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
+                                        unsigned int state)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       struct task_struct *owner;
+
+       raw_spin_lock_irq(&rtm->wait_lock);
+       /*
+        * Wake the writer, i.e. the rtmutex owner. It might release the
+        * rtmutex concurrently in the fast path (due to a signal), but to
+        * clean up rwb->readers it needs to acquire rtm->wait_lock. The
+        * worst case which can happen is a spurious wakeup.
+        */
+       owner = rt_mutex_owner(rtm);
+       if (owner)
+               wake_up_state(owner, state);
+
+       raw_spin_unlock_irq(&rtm->wait_lock);
+}
+
+static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
+                                              unsigned int state)
+{
+       /*
+        * rwb->readers can only hit 0 when a writer is waiting for the
+        * active readers to leave the critical section.
+        */
+       if (unlikely(atomic_dec_and_test(&rwb->readers)))
+               __rwbase_read_unlock(rwb, state);
+}
+
+static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
+                                        unsigned long flags)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+
+       atomic_add(READER_BIAS - bias, &rwb->readers);
+       raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+       rwbase_rtmutex_unlock(rtm);
+}
+
+static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
+}
+
+static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       /* Release it and account current as reader */
+       __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
+}
+
+static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
+                                    unsigned int state)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       /* Take the rtmutex as a first step */
+       if (rwbase_rtmutex_lock_state(rtm, state))
+               return -EINTR;
+
+       /* Force readers into slow path */
+       atomic_sub(READER_BIAS, &rwb->readers);
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       /*
+        * set_current_state() for rw_semaphore
+        * current_save_and_set_rtlock_wait_state() for rwlock
+        */
+       rwbase_set_and_save_current_state(state);
+
+       /* Block until all readers have left the critical section. */
+       for (; atomic_read(&rwb->readers);) {
+               /* Optimized out for rwlocks */
+               if (rwbase_signal_pending_state(state, current)) {
+                       __set_current_state(TASK_RUNNING);
+                       __rwbase_write_unlock(rwb, 0, flags);
+                       return -EINTR;
+               }
+               raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+
+               /*
+                * Schedule and wait for the readers to leave the critical
+                * section. The last reader leaving it wakes the waiter.
+                */
+               if (atomic_read(&rwb->readers) != 0)
+                       rwbase_schedule();
+               set_current_state(state);
+               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       }
+
+       atomic_set(&rwb->readers, WRITER_BIAS);
+       rwbase_restore_current_state();
+       raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+       return 0;
+}
+
+static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       if (!rwbase_rtmutex_trylock(rtm))
+               return 0;
+
+       atomic_sub(READER_BIAS, &rwb->readers);
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       if (!atomic_read(&rwb->readers)) {
+               atomic_set(&rwb->readers, WRITER_BIAS);
+               raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+               return 1;
+       }
+       __rwbase_write_unlock(rwb, 0, flags);
+       return 0;
+}
index 16bfbb1..9215b4d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/rwsem.h>
 #include <linux/atomic.h>
 
+#ifndef CONFIG_PREEMPT_RT
 #include "lock_events.h"
 
 /*
@@ -1165,7 +1166,7 @@ out_nolock:
  * handle waking up a waiter on the semaphore
  * - up_read/up_write has decremented the active part of count if we come here
  */
-static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
+static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
        DEFINE_WAKE_Q(wake_q);
@@ -1297,7 +1298,7 @@ static inline void __up_read(struct rw_semaphore *sem)
        if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
                      RWSEM_FLAG_WAITERS)) {
                clear_nonspinnable(sem);
-               rwsem_wake(sem, tmp);
+               rwsem_wake(sem);
        }
 }
 
@@ -1319,7 +1320,7 @@ static inline void __up_write(struct rw_semaphore *sem)
        rwsem_clear_owner(sem);
        tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
        if (unlikely(tmp & RWSEM_FLAG_WAITERS))
-               rwsem_wake(sem, tmp);
+               rwsem_wake(sem);
 }
 
 /*
@@ -1344,6 +1345,114 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
+#else /* !CONFIG_PREEMPT_RT */
+
+#define RT_MUTEX_BUILD_MUTEX
+#include "rtmutex.c"
+
+#define rwbase_set_and_save_current_state(state)       \
+       set_current_state(state)
+
+#define rwbase_restore_current_state()                 \
+       __set_current_state(TASK_RUNNING)
+
+#define rwbase_rtmutex_lock_state(rtm, state)          \
+       __rt_mutex_lock(rtm, state)
+
+#define rwbase_rtmutex_slowlock_locked(rtm, state)     \
+       __rt_mutex_slowlock_locked(rtm, NULL, state)
+
+#define rwbase_rtmutex_unlock(rtm)                     \
+       __rt_mutex_unlock(rtm)
+
+#define rwbase_rtmutex_trylock(rtm)                    \
+       __rt_mutex_trylock(rtm)
+
+#define rwbase_signal_pending_state(state, current)    \
+       signal_pending_state(state, current)
+
+#define rwbase_schedule()                              \
+       schedule()
+
+#include "rwbase_rt.c"
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __rwsem_init(struct rw_semaphore *sem, const char *name,
+                 struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+       lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL(__rwsem_init);
+#endif
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+       rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
+}
+
+static inline int __down_read_interruptible(struct rw_semaphore *sem)
+{
+       return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+       return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+       return rwbase_read_trylock(&sem->rwbase);
+}
+
+static inline void __up_read(struct rw_semaphore *sem)
+{
+       rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
+}
+
+static inline void __sched __down_write(struct rw_semaphore *sem)
+{
+       rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
+}
+
+static inline int __sched __down_write_killable(struct rw_semaphore *sem)
+{
+       return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+       return rwbase_write_trylock(&sem->rwbase);
+}
+
+static inline void __up_write(struct rw_semaphore *sem)
+{
+       rwbase_write_unlock(&sem->rwbase);
+}
+
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+       rwbase_write_downgrade(&sem->rwbase);
+}
+
+/* Debug stubs for the common API */
+#define DEBUG_RWSEMS_WARN_ON(c, sem)
+
+static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
+                                           struct task_struct *owner)
+{
+}
+
+static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
+{
+       int count = atomic_read(&sem->rwbase.readers);
+
+       return count < 0 && count != READER_BIAS;
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
 /*
  * lock for reading
  */
index 9aa855a..9ee381e 100644 (file)
@@ -54,6 +54,7 @@ void down(struct semaphore *sem)
 {
        unsigned long flags;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
@@ -77,6 +78,7 @@ int down_interruptible(struct semaphore *sem)
        unsigned long flags;
        int result = 0;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
@@ -103,6 +105,7 @@ int down_killable(struct semaphore *sem)
        unsigned long flags;
        int result = 0;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
@@ -157,6 +160,7 @@ int down_timeout(struct semaphore *sem, long timeout)
        unsigned long flags;
        int result = 0;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
index c8d7ad9..c5830cf 100644 (file)
@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)           \
  *         __[spin|read|write]_lock_bh()
  */
 BUILD_LOCK_OPS(spin, raw_spinlock);
+
+#ifndef CONFIG_PREEMPT_RT
 BUILD_LOCK_OPS(read, rwlock);
 BUILD_LOCK_OPS(write, rwlock);
+#endif
 
 #endif
 
@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
 EXPORT_SYMBOL(_raw_spin_unlock_bh);
 #endif
 
+#ifndef CONFIG_PREEMPT_RT
+
 #ifndef CONFIG_INLINE_READ_TRYLOCK
 int __lockfunc _raw_read_trylock(rwlock_t *lock)
 {
@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
 EXPORT_SYMBOL(_raw_write_unlock_bh);
 #endif
 
+#endif /* !CONFIG_PREEMPT_RT */
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
index b9d9308..1423567 100644 (file)
@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 
 EXPORT_SYMBOL(__raw_spin_lock_init);
 
+#ifndef CONFIG_PREEMPT_RT
 void __rwlock_init(rwlock_t *lock, const char *name,
                   struct lock_class_key *key)
 {
@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
 }
 
 EXPORT_SYMBOL(__rwlock_init);
+#endif
 
 static void spin_dump(raw_spinlock_t *lock, const char *msg)
 {
@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
        arch_spin_unlock(&lock->raw_lock);
 }
 
+#ifndef CONFIG_PREEMPT_RT
 static void rwlock_bug(rwlock_t *lock, const char *msg)
 {
        if (!debug_locks_off())
@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock)
        debug_write_unlock(lock);
        arch_write_unlock(&lock->raw_lock);
 }
+
+#endif /* !CONFIG_PREEMPT_RT */
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
new file mode 100644 (file)
index 0000000..d2912e4
--- /dev/null
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PREEMPT_RT substitution for spin/rw_locks
+ *
+ * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
+ * resemble the non RT semantics:
+ *
+ * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
+ *   preserving. The task state is saved before blocking on the underlying
+ *   rtmutex, and restored when the lock has been acquired. Regular wakeups
+ *   during that time are redirected to the saved state so no wake up is
+ *   missed.
+ *
+ * - Non RT spin/rwlocks disable preemption and eventually interrupts.
+ *   Disabling preemption has the side effect of disabling migration and
+ *   preventing RCU grace periods.
+ *
+ *   The RT substitutions explicitly disable migration and take
+ *   rcu_read_lock() across the lock held section.
+ */
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define RT_MUTEX_BUILD_SPINLOCKS
+#include "rtmutex.c"
+
+static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
+{
+       if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+               rtlock_slowlock(rtm);
+}
+
+static __always_inline void __rt_spin_lock(spinlock_t *lock)
+{
+       ___might_sleep(__FILE__, __LINE__, 0);
+       rtlock_lock(&lock->lock);
+       rcu_read_lock();
+       migrate_disable();
+}
+
+void __sched rt_spin_lock(spinlock_t *lock)
+{
+       spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+       __rt_spin_lock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
+       spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       __rt_spin_lock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+
+void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
+                                   struct lockdep_map *nest_lock)
+{
+       spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+       __rt_spin_lock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock_nest_lock);
+#endif
+
+void __sched rt_spin_unlock(spinlock_t *lock)
+{
+       spin_release(&lock->dep_map, _RET_IP_);
+       migrate_enable();
+       rcu_read_unlock();
+
+       if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
+               rt_mutex_slowunlock(&lock->lock);
+}
+EXPORT_SYMBOL(rt_spin_unlock);
+
+/*
+ * Wait for the lock to get unlocked: instead of polling for an unlock
+ * (like raw spinlocks do), lock and unlock, to force the kernel to
+ * schedule if there's contention:
+ */
+void __sched rt_spin_lock_unlock(spinlock_t *lock)
+{
+       spin_lock(lock);
+       spin_unlock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock_unlock);
+
+static __always_inline int __rt_spin_trylock(spinlock_t *lock)
+{
+       int ret = 1;
+
+       if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
+               ret = rt_mutex_slowtrylock(&lock->lock);
+
+       if (ret) {
+               spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+               rcu_read_lock();
+               migrate_disable();
+       }
+       return ret;
+}
+
+int __sched rt_spin_trylock(spinlock_t *lock)
+{
+       return __rt_spin_trylock(lock);
+}
+EXPORT_SYMBOL(rt_spin_trylock);
+
+int __sched rt_spin_trylock_bh(spinlock_t *lock)
+{
+       int ret;
+
+       local_bh_disable();
+       ret = __rt_spin_trylock(lock);
+       if (!ret)
+               local_bh_enable();
+       return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_bh);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+                        struct lock_class_key *key, bool percpu)
+{
+       u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
+
+       debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+       lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
+                             LD_WAIT_INV, type);
+}
+EXPORT_SYMBOL(__rt_spin_lock_init);
+#endif
+
+/*
+ * RT-specific reader/writer locks
+ */
+#define rwbase_set_and_save_current_state(state)       \
+       current_save_and_set_rtlock_wait_state()
+
+#define rwbase_restore_current_state()                 \
+       current_restore_rtlock_saved_state()
+
+static __always_inline int
+rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
+{
+       if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+               rtlock_slowlock(rtm);
+       return 0;
+}
+
+static __always_inline int
+rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
+{
+       rtlock_slowlock_locked(rtm);
+       return 0;
+}
+
+static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
+{
+       if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
+               return;
+
+       rt_mutex_slowunlock(rtm);
+}
+
+static __always_inline int  rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
+{
+       if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+               return 1;
+
+       return rt_mutex_slowtrylock(rtm);
+}
+
+#define rwbase_signal_pending_state(state, current)    (0)
+
+#define rwbase_schedule()                              \
+       schedule_rtlock()
+
+#include "rwbase_rt.c"
+/*
+ * The common functions which get wrapped into the rwlock API.
+ */
+int __sched rt_read_trylock(rwlock_t *rwlock)
+{
+       int ret;
+
+       ret = rwbase_read_trylock(&rwlock->rwbase);
+       if (ret) {
+               rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+               rcu_read_lock();
+               migrate_disable();
+       }
+       return ret;
+}
+EXPORT_SYMBOL(rt_read_trylock);
+
+int __sched rt_write_trylock(rwlock_t *rwlock)
+{
+       int ret;
+
+       ret = rwbase_write_trylock(&rwlock->rwbase);
+       if (ret) {
+               rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+               rcu_read_lock();
+               migrate_disable();
+       }
+       return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock);
+
+void __sched rt_read_lock(rwlock_t *rwlock)
+{
+       ___might_sleep(__FILE__, __LINE__, 0);
+       rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+       rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+       rcu_read_lock();
+       migrate_disable();
+}
+EXPORT_SYMBOL(rt_read_lock);
+
+void __sched rt_write_lock(rwlock_t *rwlock)
+{
+       ___might_sleep(__FILE__, __LINE__, 0);
+       rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+       rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+       rcu_read_lock();
+       migrate_disable();
+}
+EXPORT_SYMBOL(rt_write_lock);
+
+void __sched rt_read_unlock(rwlock_t *rwlock)
+{
+       rwlock_release(&rwlock->dep_map, _RET_IP_);
+       migrate_enable();
+       rcu_read_unlock();
+       rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+}
+EXPORT_SYMBOL(rt_read_unlock);
+
+void __sched rt_write_unlock(rwlock_t *rwlock)
+{
+       rwlock_release(&rwlock->dep_map, _RET_IP_);
+       rcu_read_unlock();
+       migrate_enable();
+       rwbase_write_unlock(&rwlock->rwbase);
+}
+EXPORT_SYMBOL(rt_write_unlock);
+
+int __sched rt_rwlock_is_contended(rwlock_t *rwlock)
+{
+       return rw_base_is_contended(&rwlock->rwbase);
+}
+EXPORT_SYMBOL(rt_rwlock_is_contended);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+                     struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
+       lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
+#endif
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
new file mode 100644 (file)
index 0000000..56f1392
--- /dev/null
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef WW_RT
+
+#define MUTEX          mutex
+#define MUTEX_WAITER   mutex_waiter
+
+static inline struct mutex_waiter *
+__ww_waiter_first(struct mutex *lock)
+{
+       struct mutex_waiter *w;
+
+       w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
+{
+       w = list_next_entry(w, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
+{
+       w = list_prev_entry(w, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_last(struct mutex *lock)
+{
+       struct mutex_waiter *w;
+
+       w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline void
+__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
+{
+       struct list_head *p = &lock->wait_list;
+       if (pos)
+               p = &pos->list;
+       __mutex_add_waiter(lock, waiter, p);
+}
+
+static inline struct task_struct *
+__ww_mutex_owner(struct mutex *lock)
+{
+       return __mutex_owner(lock);
+}
+
+static inline bool
+__ww_mutex_has_waiters(struct mutex *lock)
+{
+       return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
+}
+
+static inline void lock_wait_lock(struct mutex *lock)
+{
+       raw_spin_lock(&lock->wait_lock);
+}
+
+static inline void unlock_wait_lock(struct mutex *lock)
+{
+       raw_spin_unlock(&lock->wait_lock);
+}
+
+static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
+{
+       lockdep_assert_held(&lock->wait_lock);
+}
+
+#else /* WW_RT */
+
+#define MUTEX          rt_mutex
+#define MUTEX_WAITER   rt_mutex_waiter
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_first(struct rt_mutex *lock)
+{
+       struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
+{
+       struct rb_node *n = rb_next(&w->tree_entry);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
+{
+       struct rb_node *n = rb_prev(&w->tree_entry);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_last(struct rt_mutex *lock)
+{
+       struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline void
+__ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
+{
+       /* RT unconditionally adds the waiter first and then removes it on error */
+}
+
+static inline struct task_struct *
+__ww_mutex_owner(struct rt_mutex *lock)
+{
+       return rt_mutex_owner(&lock->rtmutex);
+}
+
+static inline bool
+__ww_mutex_has_waiters(struct rt_mutex *lock)
+{
+       return rt_mutex_has_waiters(&lock->rtmutex);
+}
+
+static inline void lock_wait_lock(struct rt_mutex *lock)
+{
+       raw_spin_lock(&lock->rtmutex.wait_lock);
+}
+
+static inline void unlock_wait_lock(struct rt_mutex *lock)
+{
+       raw_spin_unlock(&lock->rtmutex.wait_lock);
+}
+
+static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
+{
+       lockdep_assert_held(&lock->rtmutex.wait_lock);
+}
+
+#endif /* WW_RT */
+
+/*
+ * Wait-Die:
+ *   The newer transactions are killed when:
+ *     It (the new transaction) makes a request for a lock being held
+ *     by an older transaction.
+ *
+ * Wound-Wait:
+ *   The newer transactions are wounded when:
+ *     An older transaction makes a request for a lock being held by
+ *     the newer transaction.
+ */
+
+/*
+ * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
+ * it.
+ */
+static __always_inline void
+ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
+{
+#ifdef DEBUG_WW_MUTEXES
+       /*
+        * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
+        * but released with a normal mutex_unlock in this call.
+        *
+        * This should never happen, always use ww_mutex_unlock.
+        */
+       DEBUG_LOCKS_WARN_ON(ww->ctx);
+
+       /*
+        * Not quite done after calling ww_acquire_done() ?
+        */
+       DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
+
+       if (ww_ctx->contending_lock) {
+               /*
+                * After -EDEADLK you tried to
+                * acquire a different ww_mutex? Bad!
+                */
+               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
+
+               /*
+                * You called ww_mutex_lock after receiving -EDEADLK,
+                * but 'forgot' to unlock everything else first?
+                */
+               DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
+               ww_ctx->contending_lock = NULL;
+       }
+
+       /*
+        * Naughty, using a different class will lead to undefined behavior!
+        */
+       DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
+#endif
+       ww_ctx->acquired++;
+       ww->ctx = ww_ctx;
+}
+
+/*
+ * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
+ * or, when of equal priority, a younger transaction than @b.
+ *
+ * Depending on the algorithm, @a will either need to wait for @b, or die.
+ */
+static inline bool
+__ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
+{
+/*
+ * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
+ * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
+ * isn't affected by this.
+ */
+#ifdef WW_RT
+       /* kernel prio; less is more */
+       int a_prio = a->task->prio;
+       int b_prio = b->task->prio;
+
+       if (rt_prio(a_prio) || rt_prio(b_prio)) {
+
+               if (a_prio > b_prio)
+                       return true;
+
+               if (a_prio < b_prio)
+                       return false;
+
+               /* equal static prio */
+
+               if (dl_prio(a_prio)) {
+                       if (dl_time_before(b->task->dl.deadline,
+                                          a->task->dl.deadline))
+                               return true;
+
+                       if (dl_time_before(a->task->dl.deadline,
+                                          b->task->dl.deadline))
+                               return false;
+               }
+
+               /* equal prio */
+       }
+#endif
+
+       /* FIFO order tie break -- bigger is younger */
+       return (signed long)(a->stamp - b->stamp) > 0;
+}
+
+/*
+ * Wait-Die; wake a lesser waiter context (when locks held) such that it can
+ * die.
+ *
+ * Among waiters with context, only the first one can have other locks acquired
+ * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
+ * __ww_mutex_check_kill() wake any but the earliest context.
+ */
+static bool
+__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
+              struct ww_acquire_ctx *ww_ctx)
+{
+       if (!ww_ctx->is_wait_die)
+               return false;
+
+       if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
+#ifndef WW_RT
+               debug_mutex_wake_waiter(lock, waiter);
+#endif
+               wake_up_process(waiter->task);
+       }
+
+       return true;
+}
+
+/*
+ * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
+ *
+ * Wound the lock holder if there are waiters with more important transactions
+ * than the lock holders. Even if multiple waiters may wound the lock holder,
+ * it's sufficient that only one does.
+ */
+static bool __ww_mutex_wound(struct MUTEX *lock,
+                            struct ww_acquire_ctx *ww_ctx,
+                            struct ww_acquire_ctx *hold_ctx)
+{
+       struct task_struct *owner = __ww_mutex_owner(lock);
+
+       lockdep_assert_wait_lock_held(lock);
+
+       /*
+        * Possible through __ww_mutex_add_waiter() when we race with
+        * ww_mutex_set_context_fastpath(). In that case we'll get here again
+        * through __ww_mutex_check_waiters().
+        */
+       if (!hold_ctx)
+               return false;
+
+       /*
+        * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
+        * it cannot go away because we'll have FLAG_WAITERS set and hold
+        * wait_lock.
+        */
+       if (!owner)
+               return false;
+
+       if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
+               hold_ctx->wounded = 1;
+
+               /*
+                * wake_up_process() paired with set_current_state()
+                * inserts sufficient barriers to make sure @owner either sees
+                * it's wounded in __ww_mutex_check_kill() or has a
+                * wakeup pending to re-read the wounded state.
+                */
+               if (owner != current)
+                       wake_up_process(owner);
+
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * We just acquired @lock under @ww_ctx, if there are more important contexts
+ * waiting behind us on the wait-list, check if they need to die, or wound us.
+ *
+ * See __ww_mutex_add_waiter() for the list-order construction; basically the
+ * list is ordered by stamp, smallest (oldest) first.
+ *
+ * This relies on never mixing wait-die/wound-wait on the same wait-list;
+ * which is currently ensured by that being a ww_class property.
+ *
+ * The current task must not be on the wait list.
+ */
+static void
+__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
+{
+       struct MUTEX_WAITER *cur;
+
+       lockdep_assert_wait_lock_held(lock);
+
+       for (cur = __ww_waiter_first(lock); cur;
+            cur = __ww_waiter_next(lock, cur)) {
+
+               if (!cur->ww_ctx)
+                       continue;
+
+               if (__ww_mutex_die(lock, cur, ww_ctx) ||
+                   __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
+                       break;
+       }
+}
+
+/*
+ * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
+ * and wake up any waiters so they can recheck.
+ */
+static __always_inline void
+ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       ww_mutex_lock_acquired(lock, ctx);
+
+       /*
+        * The lock->ctx update should be visible on all cores before
+        * the WAITERS check is done, otherwise contended waiters might be
+        * missed. The contended waiters will either see ww_ctx == NULL
+        * and keep spinning, or it will acquire wait_lock, add itself
+        * to waiter list and sleep.
+        */
+       smp_mb(); /* See comments above and below. */
+
+       /*
+        * [W] ww->ctx = ctx        [W] MUTEX_FLAG_WAITERS
+        *     MB                       MB
+        * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
+        *
+        * The memory barrier above pairs with the memory barrier in
+        * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
+        * and/or !empty list.
+        */
+       if (likely(!__ww_mutex_has_waiters(&lock->base)))
+               return;
+
+       /*
+        * Uh oh, we raced in fastpath, check if any of the waiters need to
+        * die or wound us.
+        */
+       lock_wait_lock(&lock->base);
+       __ww_mutex_check_waiters(&lock->base, ctx);
+       unlock_wait_lock(&lock->base);
+}
+
+static __always_inline int
+__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
+{
+       if (ww_ctx->acquired > 0) {
+#ifdef DEBUG_WW_MUTEXES
+               struct ww_mutex *ww;
+
+               ww = container_of(lock, struct ww_mutex, base);
+               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
+               ww_ctx->contending_lock = ww;
+#endif
+               return -EDEADLK;
+       }
+
+       return 0;
+}
+
+/*
+ * Check the wound condition for the current lock acquire.
+ *
+ * Wound-Wait: If we're wounded, kill ourself.
+ *
+ * Wait-Die: If we're trying to acquire a lock already held by an older
+ *           context, kill ourselves.
+ *
+ * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
+ * look at waiters before us in the wait-list.
+ */
+static inline int
+__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
+                     struct ww_acquire_ctx *ctx)
+{
+       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+       struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
+       struct MUTEX_WAITER *cur;
+
+       if (ctx->acquired == 0)
+               return 0;
+
+       if (!ctx->is_wait_die) {
+               if (ctx->wounded)
+                       return __ww_mutex_kill(lock, ctx);
+
+               return 0;
+       }
+
+       if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
+               return __ww_mutex_kill(lock, ctx);
+
+       /*
+        * If there is a waiter in front of us that has a context, then its
+        * stamp is earlier than ours and we must kill ourself.
+        */
+       for (cur = __ww_waiter_prev(lock, waiter); cur;
+            cur = __ww_waiter_prev(lock, cur)) {
+
+               if (!cur->ww_ctx)
+                       continue;
+
+               return __ww_mutex_kill(lock, ctx);
+       }
+
+       return 0;
+}
+
+/*
+ * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
+ * first. Such that older contexts are preferred to acquire the lock over
+ * younger contexts.
+ *
+ * Waiters without context are interspersed in FIFO order.
+ *
+ * Furthermore, for Wait-Die kill ourself immediately when possible (there are
+ * older contexts already waiting) to avoid unnecessary waiting and for
+ * Wound-Wait ensure we wound the owning context when it is younger.
+ */
+static inline int
+__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
+                     struct MUTEX *lock,
+                     struct ww_acquire_ctx *ww_ctx)
+{
+       struct MUTEX_WAITER *cur, *pos = NULL;
+       bool is_wait_die;
+
+       if (!ww_ctx) {
+               __ww_waiter_add(lock, waiter, NULL);
+               return 0;
+       }
+
+       is_wait_die = ww_ctx->is_wait_die;
+
+       /*
+        * Add the waiter before the first waiter with a higher stamp.
+        * Waiters without a context are skipped to avoid starving
+        * them. Wait-Die waiters may die here. Wound-Wait waiters
+        * never die here, but they are sorted in stamp order and
+        * may wound the lock holder.
+        */
+       for (cur = __ww_waiter_last(lock); cur;
+            cur = __ww_waiter_prev(lock, cur)) {
+
+               if (!cur->ww_ctx)
+                       continue;
+
+               if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
+                       /*
+                        * Wait-Die: if we find an older context waiting, there
+                        * is no point in queueing behind it, as we'd have to
+                        * die the moment it would acquire the lock.
+                        */
+                       if (is_wait_die) {
+                               int ret = __ww_mutex_kill(lock, ww_ctx);
+
+                               if (ret)
+                                       return ret;
+                       }
+
+                       break;
+               }
+
+               pos = cur;
+
+               /* Wait-Die: ensure younger waiters die. */
+               __ww_mutex_die(lock, cur, ww_ctx);
+       }
+
+       __ww_waiter_add(lock, waiter, pos);
+
+       /*
+        * Wound-Wait: if we're blocking on a mutex owned by a younger context,
+        * wound that such that we might proceed.
+        */
+       if (!is_wait_die) {
+               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+
+               /*
+                * See ww_mutex_set_context_fastpath(). Orders setting
+                * MUTEX_FLAG_WAITERS vs the ww->ctx load,
+                * such that either we or the fastpath will wound @ww->ctx.
+                */
+               smp_mb();
+               __ww_mutex_wound(lock, ww_ctx, ww->ctx);
+       }
+
+       return 0;
+}
+
+static inline void __ww_mutex_unlock(struct ww_mutex *lock)
+{
+       if (lock->ctx) {
+#ifdef DEBUG_WW_MUTEXES
+               DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
+#endif
+               if (lock->ctx->acquired > 0)
+                       lock->ctx->acquired--;
+               lock->ctx = NULL;
+       }
+}
diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
new file mode 100644 (file)
index 0000000..3f1fff7
--- /dev/null
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rtmutex API
+ */
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define RT_MUTEX_BUILD_MUTEX
+#define WW_RT
+#include "rtmutex.c"
+
+static int __sched
+__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                  unsigned int state, unsigned long ip)
+{
+       struct lockdep_map __maybe_unused *nest_lock = NULL;
+       struct rt_mutex *rtm = &lock->base;
+       int ret;
+
+       might_sleep();
+
+       if (ww_ctx) {
+               if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
+                       return -EALREADY;
+
+               /*
+                * Reset the wounded flag after a kill. No other process can
+                * race and wound us here, since they can't have a valid owner
+                * pointer if we don't have any locks held.
+                */
+               if (ww_ctx->acquired == 0)
+                       ww_ctx->wounded = 0;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+               nest_lock = &ww_ctx->dep_map;
+#endif
+       }
+       mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
+
+       if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
+               if (ww_ctx)
+                       ww_mutex_set_context_fastpath(lock, ww_ctx);
+               return 0;
+       }
+
+       ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
+
+       if (ret)
+               mutex_release(&rtm->dep_map, ip);
+       return ret;
+}
+
+int __sched
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
+}
+EXPORT_SYMBOL(ww_mutex_lock);
+
+int __sched
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
+}
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
+
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
+{
+       struct rt_mutex *rtm = &lock->base;
+
+       __ww_mutex_unlock(lock);
+
+       mutex_release(&rtm->dep_map, _RET_IP_);
+       __rt_mutex_unlock(&rtm->rtmutex);
+}
+EXPORT_SYMBOL(ww_mutex_unlock);
index 1b019cb..b8251dc 100644 (file)
@@ -172,25 +172,6 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
 }
 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
 
-int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
-               unsigned long val_up, unsigned long val_down, void *v)
-{
-       unsigned long flags;
-       int ret;
-
-       /*
-        * Musn't use RCU; because then the notifier list can
-        * change between the up and down traversal.
-        */
-       spin_lock_irqsave(&nh->lock, flags);
-       ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
-       spin_unlock_irqrestore(&nh->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(atomic_notifier_call_chain_robust);
-NOKPROBE_SYMBOL(atomic_notifier_call_chain_robust);
-
 /**
  *     atomic_notifier_call_chain - Call functions in an atomic notifier chain
  *     @nh: Pointer to head of the atomic notifier chain
index d4d3ba6..18d3a5c 100644 (file)
@@ -9,19 +9,6 @@
  *
  * Copyright (c) 2020 Oracle and/or its affiliates.
  * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #include <linux/completion.h>
@@ -211,7 +198,7 @@ int padata_do_parallel(struct padata_shell *ps,
        if ((pinst->flags & PADATA_RESET))
                goto out;
 
-       atomic_inc(&pd->refcnt);
+       refcount_inc(&pd->refcnt);
        padata->pd = pd;
        padata->cb_cpu = *cb_cpu;
 
@@ -383,7 +370,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
        }
        local_bh_enable();
 
-       if (atomic_sub_and_test(cnt, &pd->refcnt))
+       if (refcount_sub_and_test(cnt, &pd->refcnt))
                padata_free_pd(pd);
 }
 
@@ -593,7 +580,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
        padata_init_reorder_list(pd);
        padata_init_squeues(pd);
        pd->seq_nr = -1;
-       atomic_set(&pd->refcnt, 1);
+       refcount_set(&pd->refcnt, 1);
        spin_lock_init(&pd->lock);
        pd->cpu = cpumask_first(pd->cpumask.pcpu);
        INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
@@ -667,7 +654,7 @@ static int padata_replace(struct padata_instance *pinst)
        synchronize_rcu();
 
        list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
-               if (atomic_dec_and_test(&ps->opd->refcnt))
+               if (refcount_dec_and_test(&ps->opd->refcnt))
                        padata_free_pd(ps->opd);
 
        pinst->flags &= ~PADATA_RESET;
@@ -733,7 +720,7 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
        struct cpumask *serial_mask, *parallel_mask;
        int err = -EINVAL;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&pinst->lock);
 
        switch (cpumask_type) {
@@ -753,7 +740,7 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 
 out:
        mutex_unlock(&pinst->lock);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return err;
 }
@@ -992,7 +979,7 @@ struct padata_instance *padata_alloc(const char *name)
        if (!pinst->parallel_wq)
                goto err_free_inst;
 
-       get_online_cpus();
+       cpus_read_lock();
 
        pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
                                           WQ_CPU_INTENSIVE, 1, name);
@@ -1026,7 +1013,7 @@ struct padata_instance *padata_alloc(const char *name)
                                                    &pinst->cpu_dead_node);
 #endif
 
-       put_online_cpus();
+       cpus_read_unlock();
 
        return pinst;
 
@@ -1036,7 +1023,7 @@ err_free_masks:
 err_free_serial_wq:
        destroy_workqueue(pinst->serial_wq);
 err_put_cpus:
-       put_online_cpus();
+       cpus_read_unlock();
        destroy_workqueue(pinst->parallel_wq);
 err_free_inst:
        kfree(pinst);
@@ -1074,9 +1061,9 @@ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
 
        ps->pinst = pinst;
 
-       get_online_cpus();
+       cpus_read_lock();
        pd = padata_alloc_pd(ps);
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (!pd)
                goto out_free_ps;
index 2daa278..8299bd7 100644 (file)
@@ -243,6 +243,24 @@ STANDARD_PARAM_DEF(ulong,  unsigned long,          "%lu",          kstrtoul);
 STANDARD_PARAM_DEF(ullong,     unsigned long long,     "%llu",         kstrtoull);
 STANDARD_PARAM_DEF(hexint,     unsigned int,           "%#08x",        kstrtouint);
 
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+               unsigned int min, unsigned int max)
+{
+       unsigned int num;
+       int ret;
+
+       if (!val)
+               return -EINVAL;
+       ret = kstrtouint(val, 0, &num);
+       if (ret)
+               return ret;
+       if (num < min || num > max)
+               return -EINVAL;
+       *((unsigned int *)kp->arg) = num;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(param_set_uint_minmax);
+
 int param_set_charp(const char *val, const struct kernel_param *kp)
 {
        if (strlen(val) > 1024) {
index ebdf9c6..efe87db 100644 (file)
@@ -550,13 +550,21 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
  * Note, that this function can only be called after the fd table has
  * been unshared to avoid leaking the pidfd to the new process.
  *
+ * This symbol should not be explicitly exported to loadable modules.
+ *
  * Return: On success, a cloexec pidfd is returned.
  *         On error, a negative errno number will be returned.
  */
-static int pidfd_create(struct pid *pid, unsigned int flags)
+int pidfd_create(struct pid *pid, unsigned int flags)
 {
        int fd;
 
+       if (!pid || !pid_has_task(pid, PIDTYPE_TGID))
+               return -EINVAL;
+
+       if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC))
+               return -EINVAL;
+
        fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
                              flags | O_RDWR | O_CLOEXEC);
        if (fd < 0)
@@ -596,10 +604,7 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
        if (!p)
                return -ESRCH;
 
-       if (pid_has_task(p, PIDTYPE_TGID))
-               fd = pidfd_create(p, flags);
-       else
-               fd = -EINVAL;
+       fd = pidfd_create(p, flags);
 
        put_pid(p);
        return fd;
index 0f4530b..a332ccd 100644 (file)
@@ -170,7 +170,9 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
        /* Compute the cost of each performance state. */
        fmax = (u64) table[nr_states - 1].frequency;
        for (i = 0; i < nr_states; i++) {
-               table[i].cost = div64_u64(fmax * table[i].power,
+               unsigned long power_res = em_scale_power(table[i].power);
+
+               table[i].cost = div64_u64(fmax * power_res,
                                          table[i].frequency);
        }
 
index 12c7e1b..44169f3 100644 (file)
@@ -577,7 +577,7 @@ static inline void pm_print_times_init(void) {}
 
 struct kobject *power_kobj;
 
-/**
+/*
  * state - control system sleep states.
  *
  * show() returns available sleep state labels, which may be "mem", "standby",
index d8cae43..eb75f39 100644 (file)
@@ -96,7 +96,7 @@ static void s2idle_enter(void)
        s2idle_state = S2IDLE_STATE_ENTER;
        raw_spin_unlock_irq(&s2idle_lock);
 
-       get_online_cpus();
+       cpus_read_lock();
        cpuidle_resume();
 
        /* Push all the CPUs into the idle loop. */
@@ -106,7 +106,7 @@ static void s2idle_enter(void)
                    s2idle_state == S2IDLE_STATE_WAKE);
 
        cpuidle_pause();
-       put_online_cpus();
+       cpus_read_unlock();
 
        raw_spin_lock_irq(&s2idle_lock);
 
index e1ed58a..d20526c 100644 (file)
@@ -129,7 +129,7 @@ static int __init has_wakealarm(struct device *dev, const void *data)
 {
        struct rtc_device *candidate = to_rtc_device(dev);
 
-       if (!candidate->ops->set_alarm)
+       if (!test_bit(RTC_FEATURE_ALARM, candidate->features))
                return 0;
        if (!device_may_wakeup(candidate->dev.parent))
                return 0;
index dca51fe..2cc34a2 100644 (file)
@@ -487,7 +487,7 @@ retry:
        if (gp_async) {
                cur_ops->gp_barrier();
        }
-       writer_n_durations[me] = i_max;
+       writer_n_durations[me] = i_max + 1;
        torture_kthread_stopping("rcu_scale_writer");
        return 0;
 }
@@ -561,7 +561,7 @@ rcu_scale_cleanup(void)
                        wdpp = writer_durations[i];
                        if (!wdpp)
                                continue;
-                       for (j = 0; j <= writer_n_durations[i]; j++) {
+                       for (j = 0; j < writer_n_durations[i]; j++) {
                                wdp = &wdpp[j];
                                pr_alert("%s%s %4d writer-duration: %5d %llu\n",
                                        scale_type, SCALE_FLAG,
index 40ef541..ab42152 100644 (file)
@@ -2022,8 +2022,13 @@ static int rcu_torture_stall(void *args)
                          __func__, raw_smp_processor_id());
                while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
                                    stop_at))
-                       if (stall_cpu_block)
+                       if (stall_cpu_block) {
+#ifdef CONFIG_PREEMPTION
+                               preempt_schedule();
+#else
                                schedule_timeout_uninterruptible(HZ);
+#endif
+                       }
                if (stall_cpu_irqsoff)
                        local_irq_enable();
                else if (!stall_cpu_block)
index d998a76..66dc14c 100644 (file)
@@ -467,6 +467,40 @@ static struct ref_scale_ops acqrel_ops = {
        .name           = "acqrel"
 };
 
+static volatile u64 stopopts;
+
+static void ref_clock_section(const int nloops)
+{
+       u64 x = 0;
+       int i;
+
+       preempt_disable();
+       for (i = nloops; i >= 0; i--)
+               x += ktime_get_real_fast_ns();
+       preempt_enable();
+       stopopts = x;
+}
+
+static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
+{
+       u64 x = 0;
+       int i;
+
+       preempt_disable();
+       for (i = nloops; i >= 0; i--) {
+               x += ktime_get_real_fast_ns();
+               un_delay(udl, ndl);
+       }
+       preempt_enable();
+       stopopts = x;
+}
+
+static struct ref_scale_ops clock_ops = {
+       .readsection    = ref_clock_section,
+       .delaysection   = ref_clock_delay_section,
+       .name           = "clock"
+};
+
 static void rcu_scale_one_reader(void)
 {
        if (readdelay <= 0)
@@ -759,7 +793,7 @@ ref_scale_init(void)
        int firsterr = 0;
        static struct ref_scale_ops *scale_ops[] = {
                &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
-               &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
+               &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops,
        };
 
        if (!torture_init_begin(scale_type, verbose))
index 26344dc..a0ba2ed 100644 (file)
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  */
 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 {
-       int newval = ssp->srcu_lock_nesting[idx] - 1;
+       int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
 
        WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
        if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
index 8536c55..806160c 100644 (file)
@@ -643,8 +643,8 @@ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
 //
 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
 // passing an empty function to schedule_on_each_cpu().  This approach
-// provides an asynchronous call_rcu_tasks_rude() API and batching
-// of concurrent calls to the synchronous synchronize_rcu_rude() API.
+// provides an asynchronous call_rcu_tasks_rude() API and batching of
+// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
 // and induces otherwise unnecessary context switches on all online CPUs,
 // whether idle or not.
@@ -785,7 +785,10 @@ EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
 //     set that task's .need_qs flag so that task's next outermost
 //     rcu_read_unlock_trace() will report the quiescent state (in which
 //     case the count of readers is incremented).  If both attempts fail,
-//     the task is added to a "holdout" list.
+//     the task is added to a "holdout" list.  Note that IPIs are used
+//     to invoke trc_read_check_handler() in the context of running tasks
+//     in order to avoid ordering overhead on common-case shared-variable
+//     accessses.
 // rcu_tasks_trace_postscan():
 //     Initialize state and attempt to identify an immediate quiescent
 //     state as above (but only for idle tasks), unblock CPU-hotplug
@@ -847,7 +850,7 @@ static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
 /* If we are the last reader, wake up the grace-period kthread. */
 void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
 {
-       int nq = t->trc_reader_special.b.need_qs;
+       int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
 
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
            t->trc_reader_special.b.need_mb)
@@ -894,7 +897,7 @@ static void trc_read_check_handler(void *t_in)
 
        // If the task is not in a read-side critical section, and
        // if this is the last reader, awaken the grace-period kthread.
-       if (likely(!t->trc_reader_nesting)) {
+       if (likely(!READ_ONCE(t->trc_reader_nesting))) {
                if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
                        wake_up(&trc_wait);
                // Mark as checked after decrement to avoid false
@@ -903,7 +906,7 @@ static void trc_read_check_handler(void *t_in)
                goto reset_ipi;
        }
        // If we are racing with an rcu_read_unlock_trace(), try again later.
-       if (unlikely(t->trc_reader_nesting < 0)) {
+       if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
                if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
                        wake_up(&trc_wait);
                goto reset_ipi;
@@ -913,14 +916,14 @@ static void trc_read_check_handler(void *t_in)
        // Get here if the task is in a read-side critical section.  Set
        // its state so that it will awaken the grace-period kthread upon
        // exit from that critical section.
-       WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 
 reset_ipi:
        // Allow future IPIs to be sent on CPU and for task.
        // Also order this IPI handler against any later manipulations of
        // the intended task.
-       smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
+       smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
        smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
 }
 
@@ -950,6 +953,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
                        n_heavy_reader_ofl_updates++;
                in_qs = true;
        } else {
+               // The task is not running, so C-language access is safe.
                in_qs = likely(!t->trc_reader_nesting);
        }
 
@@ -964,7 +968,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
        // state so that it will awaken the grace-period kthread upon exit
        // from that critical section.
        atomic_inc(&trc_n_readers_need_end); // One more to wait on.
-       WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
        return true;
 }
@@ -982,7 +986,7 @@ static void trc_wait_for_one_reader(struct task_struct *t,
        // The current task had better be in a quiescent state.
        if (t == current) {
                t->trc_reader_checked = true;
-               WARN_ON_ONCE(t->trc_reader_nesting);
+               WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
                return;
        }
 
@@ -994,6 +998,12 @@ static void trc_wait_for_one_reader(struct task_struct *t,
        }
        put_task_struct(t);
 
+       // If this task is not yet on the holdout list, then we are in
+       // an RCU read-side critical section.  Otherwise, the invocation of
+       // rcu_add_holdout() that added it to the list did the necessary
+       // get_task_struct().  Either way, the task cannot be freed out
+       // from under this code.
+
        // If currently running, send an IPI, either way, add to list.
        trc_add_holdout(t, bhp);
        if (task_curr(t) &&
@@ -1092,8 +1102,8 @@ static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
                 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
                 ".i"[is_idle_task(t)],
                 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
-                t->trc_reader_nesting,
-                " N"[!!t->trc_reader_special.b.need_qs],
+                READ_ONCE(t->trc_reader_nesting),
+                " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
                 cpu);
        sched_show_task(t);
 }
@@ -1187,7 +1197,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
 {
        WRITE_ONCE(t->trc_reader_checked, true);
-       WARN_ON_ONCE(t->trc_reader_nesting);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
        WRITE_ONCE(t->trc_reader_nesting, 0);
        if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
                rcu_read_unlock_trace_special(t, 0);
index 51f24ec..bce848e 100644 (file)
 
 /* Data structures. */
 
-/*
- * Steal a bit from the bottom of ->dynticks for idle entry/exit
- * control.  Initially this is for TLB flushing.
- */
-#define RCU_DYNTICK_CTRL_MASK 0x1
-#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
-
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
        .dynticks_nesting = 1,
        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
-       .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
+       .dynticks = ATOMIC_INIT(1),
 #ifdef CONFIG_RCU_NOCB_CPU
        .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
 #endif
@@ -258,6 +251,15 @@ void rcu_softirq_qs(void)
        rcu_tasks_qs(current, false);
 }
 
+/*
+ * Increment the current CPU's rcu_data structure's ->dynticks field
+ * with ordering.  Return the new value.
+ */
+static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
+{
+       return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
+}
+
 /*
  * Record entry into an extended quiescent state.  This is only to be
  * called when not already in an extended quiescent state, that is,
@@ -266,7 +268,6 @@ void rcu_softirq_qs(void)
  */
 static noinstr void rcu_dynticks_eqs_enter(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        int seq;
 
        /*
@@ -275,13 +276,9 @@ static noinstr void rcu_dynticks_eqs_enter(void)
         * next idle sojourn.
         */
        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
-       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = rcu_dynticks_inc(1);
        // RCU is no longer watching.  Better be in extended quiescent state!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_CTR));
-       /* Better not have special action (TLB flush) pending! */
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_MASK));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
 }
 
 /*
@@ -291,7 +288,6 @@ static noinstr void rcu_dynticks_eqs_enter(void)
  */
 static noinstr void rcu_dynticks_eqs_exit(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        int seq;
 
        /*
@@ -299,15 +295,10 @@ static noinstr void rcu_dynticks_eqs_exit(void)
         * and we also must force ordering with the next RCU read-side
         * critical section.
         */
-       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = rcu_dynticks_inc(1);
        // RCU is now watching.  Better not be in an extended quiescent state!
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    !(seq & RCU_DYNTICK_CTRL_CTR));
-       if (seq & RCU_DYNTICK_CTRL_MASK) {
-               arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
-               smp_mb__after_atomic(); /* _exit after clearing mask. */
-       }
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
 }
 
 /*
@@ -324,9 +315,9 @@ static void rcu_dynticks_eqs_online(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
+       if (atomic_read(&rdp->dynticks) & 0x1)
                return;
-       atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       rcu_dynticks_inc(1);
 }
 
 /*
@@ -336,9 +327,7 @@ static void rcu_dynticks_eqs_online(void)
  */
 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-
-       return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+       return !(atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
 }
 
 /*
@@ -347,9 +336,8 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
  */
 static int rcu_dynticks_snap(struct rcu_data *rdp)
 {
-       int snap = atomic_add_return(0, &rdp->dynticks);
-
-       return snap & ~RCU_DYNTICK_CTRL_MASK;
+       smp_mb();  // Fundamental RCU ordering guarantee.
+       return atomic_read_acquire(&rdp->dynticks);
 }
 
 /*
@@ -358,7 +346,7 @@ static int rcu_dynticks_snap(struct rcu_data *rdp)
  */
 static bool rcu_dynticks_in_eqs(int snap)
 {
-       return !(snap & RCU_DYNTICK_CTRL_CTR);
+       return !(snap & 0x1);
 }
 
 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
@@ -389,8 +377,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        int snap;
 
        // If not quiescent, force back to earlier extended quiescent state.
-       snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
-                                              RCU_DYNTICK_CTRL_CTR);
+       snap = atomic_read(&rdp->dynticks) & ~0x1;
 
        smp_rmb(); // Order ->dynticks and *vp reads.
        if (READ_ONCE(*vp))
@@ -398,32 +385,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        smp_rmb(); // Order *vp read and ->dynticks re-read.
 
        // If still in the same extended quiescent state, we are good!
-       return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
-}
-
-/*
- * Set the special (bottom) bit of the specified CPU so that it
- * will take special action (such as flushing its TLB) on the
- * next exit from an extended quiescent state.  Returns true if
- * the bit was successfully set, or false if the CPU was not in
- * an extended quiescent state.
- */
-bool rcu_eqs_special_set(int cpu)
-{
-       int old;
-       int new;
-       int new_old;
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
-
-       new_old = atomic_read(&rdp->dynticks);
-       do {
-               old = new_old;
-               if (old & RCU_DYNTICK_CTRL_CTR)
-                       return false;
-               new = old | RCU_DYNTICK_CTRL_MASK;
-               new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
-       } while (new_old != old);
-       return true;
+       return snap == atomic_read(&rdp->dynticks);
 }
 
 /*
@@ -439,13 +401,12 @@ bool rcu_eqs_special_set(int cpu)
  */
 notrace void rcu_momentary_dyntick_idle(void)
 {
-       int special;
+       int seq;
 
        raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
-       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                   &this_cpu_ptr(&rcu_data)->dynticks);
+       seq = rcu_dynticks_inc(2);
        /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
+       WARN_ON_ONCE(!(seq & 0x1));
        rcu_preempt_deferred_qs(current);
 }
 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
@@ -1325,7 +1286,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         */
        jtsq = READ_ONCE(jiffies_to_sched_qs);
        ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
-       rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
+       rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu);
        if (!READ_ONCE(*rnhqp) &&
            (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
             time_after(jiffies, rcu_state.jiffies_resched) ||
@@ -1772,7 +1733,7 @@ static void rcu_strict_gp_boundary(void *unused)
 /*
  * Initialize a new grace period.  Return false if no grace period required.
  */
-static bool rcu_gp_init(void)
+static noinline_for_stack bool rcu_gp_init(void)
 {
        unsigned long firstseq;
        unsigned long flags;
@@ -1966,7 +1927,7 @@ static void rcu_gp_fqs(bool first_time)
 /*
  * Loop doing repeated quiescent-state forcing until the grace period ends.
  */
-static void rcu_gp_fqs_loop(void)
+static noinline_for_stack void rcu_gp_fqs_loop(void)
 {
        bool first_gp_fqs;
        int gf = 0;
@@ -1993,8 +1954,8 @@ static void rcu_gp_fqs_loop(void)
                trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
                                       TPS("fqswait"));
                WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
-               ret = swait_event_idle_timeout_exclusive(
-                               rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
+               (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
+                                rcu_gp_fqs_check_wake(&gf), j);
                rcu_gp_torture_wait();
                WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
                /* Locking provides needed memory barriers. */
@@ -2471,9 +2432,6 @@ int rcutree_dead_cpu(unsigned int cpu)
        WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
        /* Adjust any no-longer-needed kthreads. */
        rcu_boost_kthread_setaffinity(rnp, -1);
-       /* Do any needed no-CB deferred wakeups from this CPU. */
-       do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
-
        // Stop-machine done, so allow nohz_full to disable tick.
        tick_dep_clear(TICK_DEP_BIT_RCU);
        return 0;
@@ -4050,7 +4008,7 @@ void rcu_barrier(void)
         */
        init_completion(&rcu_state.barrier_completion);
        atomic_set(&rcu_state.barrier_cpu_count, 2);
-       get_online_cpus();
+       cpus_read_lock();
 
        /*
         * Force each CPU with callbacks to register a new callback.
@@ -4081,7 +4039,7 @@ void rcu_barrier(void)
                                          rcu_state.barrier_sequence);
                }
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        /*
         * Now that we have an rcu_barrier_callback() callback on each
@@ -4784,4 +4742,5 @@ void __init rcu_init(void)
 
 #include "tree_stall.h"
 #include "tree_exp.h"
+#include "tree_nocb.h"
 #include "tree_plugin.h"
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
new file mode 100644 (file)
index 0000000..8fdf44f
--- /dev/null
@@ -0,0 +1,1496 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Internal non-public definitions that provide either classic
+ * or preemptible semantics.
+ *
+ * Copyright Red Hat, 2009
+ * Copyright IBM Corporation, 2009
+ * Copyright SUSE, 2021
+ *
+ * Author: Ingo Molnar <mingo@elte.hu>
+ *        Paul E. McKenney <paulmck@linux.ibm.com>
+ *        Frederic Weisbecker <frederic@kernel.org>
+ */
+
+#ifdef CONFIG_RCU_NOCB_CPU
+static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
+static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+       return lockdep_is_held(&rdp->nocb_lock);
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+       /* Race on early boot between thread creation and assignment */
+       if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
+               return true;
+
+       if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
+               if (in_task())
+                       return true;
+       return false;
+}
+
+/*
+ * Offload callback processing from the boot-time-specified set of CPUs
+ * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
+ * created that pull the callbacks from the corresponding CPU, wait for
+ * a grace period to elapse, and invoke the callbacks.  These kthreads
+ * are organized into GP kthreads, which manage incoming callbacks, wait for
+ * grace periods, and awaken CB kthreads, and the CB kthreads, which only
+ * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
+ * do a wake_up() on their GP kthread when they insert a callback into any
+ * empty list, unless the rcu_nocb_poll boot parameter has been specified,
+ * in which case each kthread actively polls its CPU.  (Which isn't so great
+ * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
+ *
+ * This is intended to be used in conjunction with Frederic Weisbecker's
+ * adaptive-idle work, which would seriously reduce OS jitter on CPUs
+ * running CPU-bound user-mode computations.
+ *
+ * Offloading of callbacks can also be used as an energy-efficiency
+ * measure because CPUs with no RCU callbacks queued are more aggressive
+ * about entering dyntick-idle mode.
+ */
+
+
+/*
+ * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
+ * If the list is invalid, a warning is emitted and all CPUs are offloaded.
+ */
+static int __init rcu_nocb_setup(char *str)
+{
+       alloc_bootmem_cpumask_var(&rcu_nocb_mask);
+       if (cpulist_parse(str, rcu_nocb_mask)) {
+               pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
+               cpumask_setall(rcu_nocb_mask);
+       }
+       return 1;
+}
+__setup("rcu_nocbs=", rcu_nocb_setup);
+
+static int __init parse_rcu_nocb_poll(char *arg)
+{
+       rcu_nocb_poll = true;
+       return 0;
+}
+early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
+
+/*
+ * Don't bother bypassing ->cblist if the call_rcu() rate is low.
+ * After all, the main point of bypassing is to avoid lock contention
+ * on ->nocb_lock, which only can happen at high call_rcu() rates.
+ */
+static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
+module_param(nocb_nobypass_lim_per_jiffy, int, 0);
+
+/*
+ * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
+ * lock isn't immediately available, increment ->nocb_lock_contended to
+ * flag the contention.
+ */
+static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+       __acquires(&rdp->nocb_bypass_lock)
+{
+       lockdep_assert_irqs_disabled();
+       if (raw_spin_trylock(&rdp->nocb_bypass_lock))
+               return;
+       atomic_inc(&rdp->nocb_lock_contended);
+       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+       smp_mb__after_atomic(); /* atomic_inc() before lock. */
+       raw_spin_lock(&rdp->nocb_bypass_lock);
+       smp_mb__before_atomic(); /* atomic_dec() after lock. */
+       atomic_dec(&rdp->nocb_lock_contended);
+}
+
+/*
+ * Spinwait until the specified rcu_data structure's ->nocb_lock is
+ * not contended.  Please note that this is extremely special-purpose,
+ * relying on the fact that at most two kthreads and one CPU contend for
+ * this lock, and also that the two kthreads are guaranteed to have frequent
+ * grace-period-duration time intervals between successive acquisitions
+ * of the lock.  This allows us to use an extremely simple throttling
+ * mechanism, and further to apply it only to the CPU doing floods of
+ * call_rcu() invocations.  Don't try this at home!
+ */
+static void rcu_nocb_wait_contended(struct rcu_data *rdp)
+{
+       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+       while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
+               cpu_relax();
+}
+
+/*
+ * Conditionally acquire the specified rcu_data structure's
+ * ->nocb_bypass_lock.
+ */
+static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       return raw_spin_trylock(&rdp->nocb_bypass_lock);
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_bypass_lock.
+ */
+static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
+       __releases(&rdp->nocb_bypass_lock)
+{
+       lockdep_assert_irqs_disabled();
+       raw_spin_unlock(&rdp->nocb_bypass_lock);
+}
+
+/*
+ * Acquire the specified rcu_data structure's ->nocb_lock, but only
+ * if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_lock(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       if (!rcu_rdp_is_offloaded(rdp))
+               return;
+       raw_spin_lock(&rdp->nocb_lock);
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_lock, but only
+ * if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_unlock(struct rcu_data *rdp)
+{
+       if (rcu_rdp_is_offloaded(rdp)) {
+               lockdep_assert_irqs_disabled();
+               raw_spin_unlock(&rdp->nocb_lock);
+       }
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_lock and restore
+ * interrupts, but only if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
+                                      unsigned long flags)
+{
+       if (rcu_rdp_is_offloaded(rdp)) {
+               lockdep_assert_irqs_disabled();
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+       } else {
+               local_irq_restore(flags);
+       }
+}
+
+/* Lockdep check that ->cblist may be safely accessed. */
+static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       if (rcu_rdp_is_offloaded(rdp))
+               lockdep_assert_held(&rdp->nocb_lock);
+}
+
+/*
+ * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
+ * grace period.
+ */
+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
+{
+       swake_up_all(sq);
+}
+
+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
+{
+       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
+}
+
+static void rcu_init_one_nocb(struct rcu_node *rnp)
+{
+       init_swait_queue_head(&rnp->nocb_gp_wq[0]);
+       init_swait_queue_head(&rnp->nocb_gp_wq[1]);
+}
+
+/* Is the specified CPU a no-CBs CPU? */
+bool rcu_is_nocb_cpu(int cpu)
+{
+       if (cpumask_available(rcu_nocb_mask))
+               return cpumask_test_cpu(cpu, rcu_nocb_mask);
+       return false;
+}
+
+static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
+                          struct rcu_data *rdp,
+                          bool force, unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
+{
+       bool needwake = false;
+
+       if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                   TPS("AlreadyAwake"));
+               return false;
+       }
+
+       if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+               del_timer(&rdp_gp->nocb_timer);
+       }
+
+       if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
+               WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
+               needwake = true;
+       }
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+       if (needwake) {
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
+               wake_up_process(rdp_gp->nocb_gp_kthread);
+       }
+
+       return needwake;
+}
+
+/*
+ * Kick the GP kthread for this NOCB group.
+ */
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return __wake_nocb_gp(rdp_gp, rdp, force, flags);
+}
+
+/*
+ * Arrange to wake the GP kthread for this NOCB group at some future
+ * time when it is safe to do so.
+ */
+static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
+                              const char *reason)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+
+       /*
+        * Bypass wakeup overrides previous deferments. In case
+        * of callback storm, no need to wake up too early.
+        */
+       if (waketype == RCU_NOCB_WAKE_BYPASS) {
+               mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       } else {
+               if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
+                       mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
+               if (rdp_gp->nocb_defer_wakeup < waketype)
+                       WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       }
+
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
+}
+
+/*
+ * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
+ * However, if there is a callback to be enqueued and if ->nocb_bypass
+ * proves to be initially empty, just return false because the no-CB GP
+ * kthread may need to be awakened in this case.
+ *
+ * Note that this function always returns true if rhp is NULL.
+ */
+static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                    unsigned long j)
+{
+       struct rcu_cblist rcl;
+
+       WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
+       rcu_lockdep_assert_cblist_protected(rdp);
+       lockdep_assert_held(&rdp->nocb_bypass_lock);
+       if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
+               raw_spin_unlock(&rdp->nocb_bypass_lock);
+               return false;
+       }
+       /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
+       if (rhp)
+               rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+       rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
+       rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
+       WRITE_ONCE(rdp->nocb_bypass_first, j);
+       rcu_nocb_bypass_unlock(rdp);
+       return true;
+}
+
+/*
+ * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
+ * However, if there is a callback to be enqueued and if ->nocb_bypass
+ * proves to be initially empty, just return false because the no-CB GP
+ * kthread may need to be awakened in this case.
+ *
+ * Note that this function always returns true if rhp is NULL.
+ */
+static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                 unsigned long j)
+{
+       if (!rcu_rdp_is_offloaded(rdp))
+               return true;
+       rcu_lockdep_assert_cblist_protected(rdp);
+       rcu_nocb_bypass_lock(rdp);
+       return rcu_nocb_do_flush_bypass(rdp, rhp, j);
+}
+
+/*
+ * If the ->nocb_bypass_lock is immediately available, flush the
+ * ->nocb_bypass queue into ->cblist.
+ */
+static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
+{
+       rcu_lockdep_assert_cblist_protected(rdp);
+       if (!rcu_rdp_is_offloaded(rdp) ||
+           !rcu_nocb_bypass_trylock(rdp))
+               return;
+       WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
+}
+
+/*
+ * See whether it is appropriate to use the ->nocb_bypass list in order
+ * to control contention on ->nocb_lock.  A limited number of direct
+ * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
+ * is non-empty, further callbacks must be placed into ->nocb_bypass,
+ * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
+ * back to direct use of ->cblist.  However, ->nocb_bypass should not be
+ * used if ->cblist is empty, because otherwise callbacks can be stranded
+ * on ->nocb_bypass because we cannot count on the current CPU ever again
+ * invoking call_rcu().  The general rule is that if ->nocb_bypass is
+ * non-empty, the corresponding no-CBs grace-period kthread must not be
+ * in an indefinite sleep state.
+ *
+ * Finally, it is not permitted to use the bypass during early boot,
+ * as doing so would confuse the auto-initialization code.  Besides
+ * which, there is no point in worrying about lock contention while
+ * there is only one CPU in operation.
+ */
+static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                               bool *was_alldone, unsigned long flags)
+{
+       unsigned long c;
+       unsigned long cur_gp_seq;
+       unsigned long j = jiffies;
+       long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+
+       lockdep_assert_irqs_disabled();
+
+       // Pure softirq/rcuc based processing: no bypassing, no
+       // locking.
+       if (!rcu_rdp_is_offloaded(rdp)) {
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false;
+       }
+
+       // In the process of (de-)offloading: no bypassing, but
+       // locking.
+       if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
+               rcu_nocb_lock(rdp);
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false; /* Not offloaded, no bypassing. */
+       }
+
+       // Don't use ->nocb_bypass during early boot.
+       if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
+               rcu_nocb_lock(rdp);
+               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false;
+       }
+
+       // If we have advanced to a new jiffy, reset counts to allow
+       // moving back from ->nocb_bypass to ->cblist.
+       if (j == rdp->nocb_nobypass_last) {
+               c = rdp->nocb_nobypass_count + 1;
+       } else {
+               WRITE_ONCE(rdp->nocb_nobypass_last, j);
+               c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
+               if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
+                                nocb_nobypass_lim_per_jiffy))
+                       c = 0;
+               else if (c > nocb_nobypass_lim_per_jiffy)
+                       c = nocb_nobypass_lim_per_jiffy;
+       }
+       WRITE_ONCE(rdp->nocb_nobypass_count, c);
+
+       // If there hasn't yet been all that many ->cblist enqueues
+       // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
+       // ->nocb_bypass first.
+       if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
+               rcu_nocb_lock(rdp);
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               if (*was_alldone)
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstQ"));
+               WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
+               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+               return false; // Caller must enqueue the callback.
+       }
+
+       // If ->nocb_bypass has been used too long or is too full,
+       // flush ->nocb_bypass to ->cblist.
+       if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
+           ncbs >= qhimark) {
+               rcu_nocb_lock(rdp);
+               if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
+                       *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+                       if (*was_alldone)
+                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                                   TPS("FirstQ"));
+                       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+                       return false; // Caller must enqueue the callback.
+               }
+               if (j != rdp->nocb_gp_adv_time &&
+                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
+                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
+                       rdp->nocb_gp_adv_time = j;
+               }
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               return true; // Callback already enqueued.
+       }
+
+       // We need to use the bypass.
+       rcu_nocb_wait_contended(rdp);
+       rcu_nocb_bypass_lock(rdp);
+       ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+       rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+       rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
+       if (!ncbs) {
+               WRITE_ONCE(rdp->nocb_bypass_first, j);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
+       }
+       rcu_nocb_bypass_unlock(rdp);
+       smp_mb(); /* Order enqueue before wake. */
+       if (ncbs) {
+               local_irq_restore(flags);
+       } else {
+               // No-CBs GP kthread might be indefinitely asleep, if so, wake.
+               rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
+               if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstBQwake"));
+                       __call_rcu_nocb_wake(rdp, true, flags);
+               } else {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstBQnoWake"));
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+               }
+       }
+       return true; // Callback already enqueued.
+}
+
+/*
+ * Awaken the no-CBs grace-period kthread if needed, either due to it
+ * legitimately being asleep or due to overload conditions.
+ *
+ * If warranted, also wake up the kthread servicing this CPUs queues.
+ */
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+                                unsigned long flags)
+                                __releases(rdp->nocb_lock)
+{
+       unsigned long cur_gp_seq;
+       unsigned long j;
+       long len;
+       struct task_struct *t;
+
+       // If we are being polled or there is no kthread, just leave.
+       t = READ_ONCE(rdp->nocb_gp_kthread);
+       if (rcu_nocb_poll || !t) {
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                   TPS("WakeNotPoll"));
+               return;
+       }
+       // Need to actually to a wakeup.
+       len = rcu_segcblist_n_cbs(&rdp->cblist);
+       if (was_alldone) {
+               rdp->qlen_last_fqs_check = len;
+               if (!irqs_disabled_flags(flags)) {
+                       /* ... if queue was empty ... */
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp(rdp, false);
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("WakeEmpty"));
+               } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
+                                          TPS("WakeEmptyIsDeferred"));
+               }
+       } else if (len > rdp->qlen_last_fqs_check + qhimark) {
+               /* ... or if many callbacks queued. */
+               rdp->qlen_last_fqs_check = len;
+               j = jiffies;
+               if (j != rdp->nocb_gp_adv_time &&
+                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
+                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
+                       rdp->nocb_gp_adv_time = j;
+               }
+               smp_mb(); /* Enqueue before timer_pending(). */
+               if ((rdp->nocb_cb_sleep ||
+                    !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
+                   !timer_pending(&rdp->nocb_timer)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
+                                          TPS("WakeOvfIsDeferred"));
+               } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+               }
+       } else {
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+       }
+       return;
+}
+
+/*
+ * Check if we ignore this rdp.
+ *
+ * We check that without holding the nocb lock but
+ * we make sure not to miss a freshly offloaded rdp
+ * with the current ordering:
+ *
+ *  rdp_offload_toggle()        nocb_gp_enabled_cb()
+ * -------------------------   ----------------------------
+ *    WRITE flags                 LOCK nocb_gp_lock
+ *    LOCK nocb_gp_lock           READ/WRITE nocb_gp_sleep
+ *    READ/WRITE nocb_gp_sleep    UNLOCK nocb_gp_lock
+ *    UNLOCK nocb_gp_lock         READ flags
+ */
+static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
+{
+       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
+
+       return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
+                                                    bool *needwake_state)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+
+       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
+                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
+                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+                               *needwake_state = true;
+               }
+               return false;
+       }
+
+       /*
+        * De-offloading. Clear our flag and notify the de-offload worker.
+        * We will ignore this rdp until it ever gets re-offloaded.
+        */
+       WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+       rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
+       if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+               *needwake_state = true;
+       return true;
+}
+
+
+/*
+ * No-CBs GP kthreads come here to wait for additional callbacks to show up
+ * or for grace periods to end.
+ */
+static void nocb_gp_wait(struct rcu_data *my_rdp)
+{
+       bool bypass = false;
+       long bypass_ncbs;
+       int __maybe_unused cpu = my_rdp->cpu;
+       unsigned long cur_gp_seq;
+       unsigned long flags;
+       bool gotcbs = false;
+       unsigned long j = jiffies;
+       bool needwait_gp = false; // This prevents actual uninitialized use.
+       bool needwake;
+       bool needwake_gp;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+       unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
+       bool wasempty = false;
+
+       /*
+        * Each pass through the following loop checks for CBs and for the
+        * nearest grace period (if any) to wait for next.  The CB kthreads
+        * and the global grace-period kthread are awakened if needed.
+        */
+       WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
+       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
+               bool needwake_state = false;
+
+               if (!nocb_gp_enabled_cb(rdp))
+                       continue;
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
+               rcu_nocb_lock_irqsave(rdp, flags);
+               if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       if (needwake_state)
+                               swake_up_one(&rdp->nocb_state_wq);
+                       continue;
+               }
+               bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+               if (bypass_ncbs &&
+                   (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
+                    bypass_ncbs > 2 * qhimark)) {
+                       // Bypass full or old, so flush it.
+                       (void)rcu_nocb_try_flush_bypass(rdp, j);
+                       bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+               } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       if (needwake_state)
+                               swake_up_one(&rdp->nocb_state_wq);
+                       continue; /* No callbacks here, try next. */
+               }
+               if (bypass_ncbs) {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("Bypass"));
+                       bypass = true;
+               }
+               rnp = rdp->mynode;
+
+               // Advance callbacks if helpful and low contention.
+               needwake_gp = false;
+               if (!rcu_segcblist_restempty(&rdp->cblist,
+                                            RCU_NEXT_READY_TAIL) ||
+                   (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                    rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
+                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
+                       needwake_gp = rcu_advance_cbs(rnp, rdp);
+                       wasempty = rcu_segcblist_restempty(&rdp->cblist,
+                                                          RCU_NEXT_READY_TAIL);
+                       raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
+               }
+               // Need to wait on some grace period?
+               WARN_ON_ONCE(wasempty &&
+                            !rcu_segcblist_restempty(&rdp->cblist,
+                                                     RCU_NEXT_READY_TAIL));
+               if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
+                       if (!needwait_gp ||
+                           ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
+                               wait_gp_seq = cur_gp_seq;
+                       needwait_gp = true;
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("NeedWaitGP"));
+               }
+               if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
+                       needwake = rdp->nocb_cb_sleep;
+                       WRITE_ONCE(rdp->nocb_cb_sleep, false);
+                       smp_mb(); /* CB invocation -after- GP end. */
+               } else {
+                       needwake = false;
+               }
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               if (needwake) {
+                       swake_up_one(&rdp->nocb_cb_wq);
+                       gotcbs = true;
+               }
+               if (needwake_gp)
+                       rcu_gp_kthread_wake();
+               if (needwake_state)
+                       swake_up_one(&rdp->nocb_state_wq);
+       }
+
+       my_rdp->nocb_gp_bypass = bypass;
+       my_rdp->nocb_gp_gp = needwait_gp;
+       my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
+
+       if (bypass && !rcu_nocb_poll) {
+               // At least one child with non-empty ->nocb_bypass, so set
+               // timer in order to avoid stranding its callbacks.
+               wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
+                                  TPS("WakeBypassIsDeferred"));
+       }
+       if (rcu_nocb_poll) {
+               /* Polling, so trace if first poll in the series. */
+               if (gotcbs)
+                       trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
+               schedule_timeout_idle(1);
+       } else if (!needwait_gp) {
+               /* Wait for callbacks to appear. */
+               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
+               swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
+                               !READ_ONCE(my_rdp->nocb_gp_sleep));
+               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
+       } else {
+               rnp = my_rdp->mynode;
+               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
+               swait_event_interruptible_exclusive(
+                       rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
+                       rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
+                       !READ_ONCE(my_rdp->nocb_gp_sleep));
+               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
+       }
+       if (!rcu_nocb_poll) {
+               raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
+               if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+                       WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+                       del_timer(&my_rdp->nocb_timer);
+               }
+               WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
+               raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
+       }
+       my_rdp->nocb_gp_seq = -1;
+       WARN_ON(signal_pending(current));
+}
+
+/*
+ * No-CBs grace-period-wait kthread.  There is one of these per group
+ * of CPUs, but only once at least one CPU in that group has come online
+ * at least once since boot.  This kthread checks for newly posted
+ * callbacks from any of the CPUs it is responsible for, waits for a
+ * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
+ * that then have callback-invocation work to do.
+ */
+static int rcu_nocb_gp_kthread(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       for (;;) {
+               WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
+               nocb_gp_wait(rdp);
+               cond_resched_tasks_rcu_qs();
+       }
+       return 0;
+}
+
+static inline bool nocb_cb_can_run(struct rcu_data *rdp)
+{
+       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
+       return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
+{
+       return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
+}
+
+/*
+ * Invoke any ready callbacks from the corresponding no-CBs CPU,
+ * then, if there are no more, wait for more to appear.
+ */
+static void nocb_cb_wait(struct rcu_data *rdp)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long cur_gp_seq;
+       unsigned long flags;
+       bool needwake_state = false;
+       bool needwake_gp = false;
+       bool can_sleep = true;
+       struct rcu_node *rnp = rdp->mynode;
+
+       local_irq_save(flags);
+       rcu_momentary_dyntick_idle();
+       local_irq_restore(flags);
+       /*
+        * Disable BH to provide the expected environment.  Also, when
+        * transitioning to/from NOCB mode, a self-requeuing callback might
+        * be invoked from softirq.  A short grace period could cause both
+        * instances of this callback would execute concurrently.
+        */
+       local_bh_disable();
+       rcu_do_batch(rdp);
+       local_bh_enable();
+       lockdep_assert_irqs_enabled();
+       rcu_nocb_lock_irqsave(rdp, flags);
+       if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
+           rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
+           raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
+               needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+       }
+
+       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
+                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
+                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+                               needwake_state = true;
+               }
+               if (rcu_segcblist_ready_cbs(cblist))
+                       can_sleep = false;
+       } else {
+               /*
+                * De-offloading. Clear our flag and notify the de-offload worker.
+                * We won't touch the callbacks and keep sleeping until we ever
+                * get re-offloaded.
+                */
+               WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
+               rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+                       needwake_state = true;
+       }
+
+       WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
+
+       if (rdp->nocb_cb_sleep)
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
+
+       rcu_nocb_unlock_irqrestore(rdp, flags);
+       if (needwake_gp)
+               rcu_gp_kthread_wake();
+
+       if (needwake_state)
+               swake_up_one(&rdp->nocb_state_wq);
+
+       do {
+               swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+                                                   nocb_cb_wait_cond(rdp));
+
+               // VVV Ensure CB invocation follows _sleep test.
+               if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
+                       WARN_ON(signal_pending(current));
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+               }
+       } while (!nocb_cb_can_run(rdp));
+}
+
+/*
+ * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
+ * nocb_cb_wait() to do the dirty work.
+ */
+static int rcu_nocb_cb_kthread(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       // Each pass through this loop does one callback batch, and,
+       // if there are no more ready callbacks, waits for them.
+       for (;;) {
+               nocb_cb_wait(rdp);
+               cond_resched_tasks_rcu_qs();
+       }
+       return 0;
+}
+
+/* Is a deferred wakeup of rcu_nocb_kthread() required? */
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
+{
+       return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
+}
+
+/* Do a deferred wakeup of rcu_nocb_kthread(). */
+static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
+                                          struct rcu_data *rdp, int level,
+                                          unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
+{
+       int ndw;
+       int ret;
+
+       if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+               return false;
+       }
+
+       ndw = rdp_gp->nocb_defer_wakeup;
+       ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
+
+       return ret;
+}
+
+/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
+static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
+{
+       unsigned long flags;
+       struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
+
+       WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
+
+       raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
+       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
+       do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
+}
+
+/*
+ * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
+ * This means we do an inexact common-case check.  Note that if
+ * we miss, ->nocb_timer will eventually clean things up.
+ */
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
+               return false;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
+}
+
+void rcu_nocb_flush_deferred_wakeup(void)
+{
+       do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
+
+static int rdp_offload_toggle(struct rcu_data *rdp,
+                              bool offload, unsigned long flags)
+       __releases(rdp->nocb_lock)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+       bool wake_gp = false;
+
+       rcu_segcblist_offload(cblist, offload);
+
+       if (rdp->nocb_cb_sleep)
+               rdp->nocb_cb_sleep = false;
+       rcu_nocb_unlock_irqrestore(rdp, flags);
+
+       /*
+        * Ignore former value of nocb_cb_sleep and force wake up as it could
+        * have been spuriously set to false already.
+        */
+       swake_up_one(&rdp->nocb_cb_wq);
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       if (rdp_gp->nocb_gp_sleep) {
+               rdp_gp->nocb_gp_sleep = false;
+               wake_gp = true;
+       }
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
+       if (wake_gp)
+               wake_up_process(rdp_gp->nocb_gp_kthread);
+
+       return 0;
+}
+
+static long rcu_nocb_rdp_deoffload(void *arg)
+{
+       struct rcu_data *rdp = arg;
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long flags;
+       int ret;
+
+       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+
+       pr_info("De-offloading %d\n", rdp->cpu);
+
+       rcu_nocb_lock_irqsave(rdp, flags);
+       /*
+        * Flush once and for all now. This suffices because we are
+        * running on the target CPU holding ->nocb_lock (thus having
+        * interrupts disabled), and because rdp_offload_toggle()
+        * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
+        * Thus future calls to rcu_segcblist_completely_offloaded() will
+        * return false, which means that future calls to rcu_nocb_try_bypass()
+        * will refuse to put anything into the bypass.
+        */
+       WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
+       ret = rdp_offload_toggle(rdp, false, flags);
+       swait_event_exclusive(rdp->nocb_state_wq,
+                             !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
+                                                       SEGCBLIST_KTHREAD_GP));
+       /*
+        * Lock one last time to acquire latest callback updates from kthreads
+        * so we can later handle callbacks locally without locking.
+        */
+       rcu_nocb_lock_irqsave(rdp, flags);
+       /*
+        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
+        * lock is released but how about being paranoid for once?
+        */
+       rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
+       /*
+        * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
+        * rcu_nocb_unlock_irqrestore() anymore.
+        */
+       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+
+       /* Sanity check */
+       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+
+
+       return ret;
+}
+
+int rcu_nocb_cpu_deoffload(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       int ret = 0;
+
+       mutex_lock(&rcu_state.barrier_mutex);
+       cpus_read_lock();
+       if (rcu_rdp_is_offloaded(rdp)) {
+               if (cpu_online(cpu)) {
+                       ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
+                       if (!ret)
+                               cpumask_clear_cpu(cpu, rcu_nocb_mask);
+               } else {
+                       pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
+                       ret = -EINVAL;
+               }
+       }
+       cpus_read_unlock();
+       mutex_unlock(&rcu_state.barrier_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
+
+static long rcu_nocb_rdp_offload(void *arg)
+{
+       struct rcu_data *rdp = arg;
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long flags;
+       int ret;
+
+       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+       /*
+        * For now we only support re-offload, ie: the rdp must have been
+        * offloaded on boot first.
+        */
+       if (!rdp->nocb_gp_rdp)
+               return -EINVAL;
+
+       pr_info("Offloading %d\n", rdp->cpu);
+       /*
+        * Can't use rcu_nocb_lock_irqsave() while we are in
+        * SEGCBLIST_SOFTIRQ_ONLY mode.
+        */
+       raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+
+       /*
+        * We didn't take the nocb lock while working on the
+        * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
+        * Every modifications that have been done previously on
+        * rdp->cblist must be visible remotely by the nocb kthreads
+        * upon wake up after reading the cblist flags.
+        *
+        * The layout against nocb_lock enforces that ordering:
+        *
+        *  __rcu_nocb_rdp_offload()   nocb_cb_wait()/nocb_gp_wait()
+        * -------------------------   ----------------------------
+        *      WRITE callbacks           rcu_nocb_lock()
+        *      rcu_nocb_lock()           READ flags
+        *      WRITE flags               READ callbacks
+        *      rcu_nocb_unlock()         rcu_nocb_unlock()
+        */
+       ret = rdp_offload_toggle(rdp, true, flags);
+       swait_event_exclusive(rdp->nocb_state_wq,
+                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
+                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+
+       return ret;
+}
+
+int rcu_nocb_cpu_offload(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       int ret = 0;
+
+       mutex_lock(&rcu_state.barrier_mutex);
+       cpus_read_lock();
+       if (!rcu_rdp_is_offloaded(rdp)) {
+               if (cpu_online(cpu)) {
+                       ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
+                       if (!ret)
+                               cpumask_set_cpu(cpu, rcu_nocb_mask);
+               } else {
+                       pr_info("NOCB: Can't CB-offload an offline CPU\n");
+                       ret = -EINVAL;
+               }
+       }
+       cpus_read_unlock();
+       mutex_unlock(&rcu_state.barrier_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
+
+void __init rcu_init_nohz(void)
+{
+       int cpu;
+       bool need_rcu_nocb_mask = false;
+       struct rcu_data *rdp;
+
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
+               need_rcu_nocb_mask = true;
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
+               if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
+                       pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
+                       return;
+               }
+       }
+       if (!cpumask_available(rcu_nocb_mask))
+               return;
+
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running)
+               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
+               pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
+               cpumask_and(rcu_nocb_mask, cpu_possible_mask,
+                           rcu_nocb_mask);
+       }
+       if (cpumask_empty(rcu_nocb_mask))
+               pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
+       else
+               pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
+                       cpumask_pr_args(rcu_nocb_mask));
+       if (rcu_nocb_poll)
+               pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
+
+       for_each_cpu(cpu, rcu_nocb_mask) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (rcu_segcblist_empty(&rdp->cblist))
+                       rcu_segcblist_init(&rdp->cblist);
+               rcu_segcblist_offload(&rdp->cblist, true);
+               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
+               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
+       }
+       rcu_organize_nocb_kthreads();
+}
+
+/* Initialize per-rcu_data variables for no-CBs CPUs. */
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+       init_swait_queue_head(&rdp->nocb_cb_wq);
+       init_swait_queue_head(&rdp->nocb_gp_wq);
+       init_swait_queue_head(&rdp->nocb_state_wq);
+       raw_spin_lock_init(&rdp->nocb_lock);
+       raw_spin_lock_init(&rdp->nocb_bypass_lock);
+       raw_spin_lock_init(&rdp->nocb_gp_lock);
+       timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
+       rcu_cblist_init(&rdp->nocb_bypass);
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
+ * for this CPU's group has not yet been created, spawn it as well.
+ */
+static void rcu_spawn_one_nocb_kthread(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       struct rcu_data *rdp_gp;
+       struct task_struct *t;
+
+       /*
+        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
+        * then nothing to do.
+        */
+       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
+               return;
+
+       /* If we didn't spawn the GP kthread first, reorganize! */
+       rdp_gp = rdp->nocb_gp_rdp;
+       if (!rdp_gp->nocb_gp_kthread) {
+               t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
+                               "rcuog/%d", rdp_gp->cpu);
+               if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
+                       return;
+               WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
+       }
+
+       /* Spawn the kthread for this CPU. */
+       t = kthread_run(rcu_nocb_cb_kthread, rdp,
+                       "rcuo%c/%d", rcu_state.abbr, cpu);
+       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
+               return;
+       WRITE_ONCE(rdp->nocb_cb_kthread, t);
+       WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthread, spawn it.
+ */
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
+{
+       if (rcu_scheduler_fully_active)
+               rcu_spawn_one_nocb_kthread(cpu);
+}
+
+/*
+ * Once the scheduler is running, spawn rcuo kthreads for all online
+ * no-CBs CPUs.  This assumes that the early_initcall()s happen before
+ * non-boot CPUs come online -- if this changes, we will need to add
+ * some mutual exclusion.
+ */
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               rcu_spawn_cpu_nocb_kthread(cpu);
+}
+
+/* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
+static int rcu_nocb_gp_stride = -1;
+module_param(rcu_nocb_gp_stride, int, 0444);
+
+/*
+ * Initialize GP-CB relationships for all no-CBs CPU.
+ */
+static void __init rcu_organize_nocb_kthreads(void)
+{
+       int cpu;
+       bool firsttime = true;
+       bool gotnocbs = false;
+       bool gotnocbscbs = true;
+       int ls = rcu_nocb_gp_stride;
+       int nl = 0;  /* Next GP kthread. */
+       struct rcu_data *rdp;
+       struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
+       struct rcu_data *rdp_prev = NULL;
+
+       if (!cpumask_available(rcu_nocb_mask))
+               return;
+       if (ls == -1) {
+               ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
+               rcu_nocb_gp_stride = ls;
+       }
+
+       /*
+        * Each pass through this loop sets up one rcu_data structure.
+        * Should the corresponding CPU come online in the future, then
+        * we will spawn the needed set of rcu_nocb_kthread() kthreads.
+        */
+       for_each_cpu(cpu, rcu_nocb_mask) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (rdp->cpu >= nl) {
+                       /* New GP kthread, set up for CBs & next GP. */
+                       gotnocbs = true;
+                       nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
+                       rdp->nocb_gp_rdp = rdp;
+                       rdp_gp = rdp;
+                       if (dump_tree) {
+                               if (!firsttime)
+                                       pr_cont("%s\n", gotnocbscbs
+                                                       ? "" : " (self only)");
+                               gotnocbscbs = false;
+                               firsttime = false;
+                               pr_alert("%s: No-CB GP kthread CPU %d:",
+                                        __func__, cpu);
+                       }
+               } else {
+                       /* Another CB kthread, link to previous GP kthread. */
+                       gotnocbscbs = true;
+                       rdp->nocb_gp_rdp = rdp_gp;
+                       rdp_prev->nocb_next_cb_rdp = rdp;
+                       if (dump_tree)
+                               pr_cont(" %d", cpu);
+               }
+               rdp_prev = rdp;
+       }
+       if (gotnocbs && dump_tree)
+               pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
+}
+
+/*
+ * Bind the current task to the offloaded CPUs.  If there are no offloaded
+ * CPUs, leave the task unbound.  Splat if the bind attempt fails.
+ */
+void rcu_bind_current_to_nocb(void)
+{
+       if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
+               WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
+}
+EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
+
+// The ->on_cpu field is available only in CONFIG_SMP=y, so...
+#ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+       return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
+}
+#else // #ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+       return "";
+}
+#endif // #else #ifdef CONFIG_SMP
+
+/*
+ * Dump out nocb grace-period kthread state for the specified rcu_data
+ * structure.
+ */
+static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
+{
+       struct rcu_node *rnp = rdp->mynode;
+
+       pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
+               rdp->cpu,
+               "kK"[!!rdp->nocb_gp_kthread],
+               "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "tT"[timer_pending(&rdp->nocb_timer)],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[swait_active(&rdp->nocb_gp_wq)],
+               ".W"[swait_active(&rnp->nocb_gp_wq[0])],
+               ".W"[swait_active(&rnp->nocb_gp_wq[1])],
+               ".B"[!!rdp->nocb_gp_bypass],
+               ".G"[!!rdp->nocb_gp_gp],
+               (long)rdp->nocb_gp_seq,
+               rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
+               rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
+               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
+}
+
+/* Dump out nocb kthread state for the specified rcu_data structure. */
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+       char bufw[20];
+       char bufr[20];
+       struct rcu_segcblist *rsclp = &rdp->cblist;
+       bool waslocked;
+       bool wassleep;
+
+       if (rdp->nocb_gp_rdp == rdp)
+               show_rcu_nocb_gp_state(rdp);
+
+       sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
+       sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
+       pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+               rdp->cpu, rdp->nocb_gp_rdp->cpu,
+               rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
+               "kK"[!!rdp->nocb_cb_kthread],
+               "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
+               "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
+               "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
+               "sS"[!!rdp->nocb_cb_sleep],
+               ".W"[swait_active(&rdp->nocb_cb_wq)],
+               jiffies - rdp->nocb_bypass_first,
+               jiffies - rdp->nocb_nobypass_last,
+               rdp->nocb_nobypass_count,
+               ".D"[rcu_segcblist_ready_cbs(rsclp)],
+               ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
+               rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
+               ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
+               rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
+               ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
+               ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
+               rcu_segcblist_n_cbs(&rdp->cblist),
+               rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
+               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
+
+       /* It is OK for GP kthreads to have GP state. */
+       if (rdp->nocb_gp_rdp == rdp)
+               return;
+
+       waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
+       wassleep = swait_active(&rdp->nocb_gp_wq);
+       if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
+               return;  /* Nothing untoward. */
+
+       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
+               "lL"[waslocked],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[wassleep]);
+}
+
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+       return 0;
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+       return false;
+}
+
+/* No ->nocb_lock to acquire.  */
+static void rcu_nocb_lock(struct rcu_data *rdp)
+{
+}
+
+/* No ->nocb_lock to release.  */
+static void rcu_nocb_unlock(struct rcu_data *rdp)
+{
+}
+
+/* No ->nocb_lock to release.  */
+static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
+                                      unsigned long flags)
+{
+       local_irq_restore(flags);
+}
+
+/* Lockdep check that ->cblist may be safely accessed. */
+static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+}
+
+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
+{
+}
+
+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
+{
+       return NULL;
+}
+
+static void rcu_init_one_nocb(struct rcu_node *rnp)
+{
+}
+
+static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                 unsigned long j)
+{
+       return true;
+}
+
+static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                               bool *was_alldone, unsigned long flags)
+{
+       return false;
+}
+
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+                                unsigned long flags)
+{
+       WARN_ON_ONCE(1);  /* Should be dead code! */
+}
+
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+}
+
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
+{
+       return false;
+}
+
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+       return false;
+}
+
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
+{
+}
+
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+}
+
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
index de1dc3b..d070059 100644 (file)
 
 #include "../locking/rtmutex_common.h"
 
-#ifdef CONFIG_RCU_NOCB_CPU
-static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
-static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
-       return lockdep_is_held(&rdp->nocb_lock);
-}
-
-static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
-{
-       /* Race on early boot between thread creation and assignment */
-       if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
-               return true;
-
-       if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
-               if (in_task())
-                       return true;
-       return false;
-}
-
-#else
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
-       return 0;
-}
-
-static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
-{
-       return false;
-}
-
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-
 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
 {
        /*
@@ -346,7 +313,7 @@ void rcu_note_context_switch(bool preempt)
 
        trace_rcu_utilization(TPS("Start context switch"));
        lockdep_assert_irqs_disabled();
-       WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
+       WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
        if (rcu_preempt_depth() > 0 &&
            !t->rcu_read_unlock_special.b.blocked) {
 
@@ -405,17 +372,20 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 
 static void rcu_preempt_read_enter(void)
 {
-       current->rcu_read_lock_nesting++;
+       WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
 }
 
 static int rcu_preempt_read_exit(void)
 {
-       return --current->rcu_read_lock_nesting;
+       int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
+
+       WRITE_ONCE(current->rcu_read_lock_nesting, ret);
+       return ret;
 }
 
 static void rcu_preempt_depth_set(int val)
 {
-       current->rcu_read_lock_nesting = val;
+       WRITE_ONCE(current->rcu_read_lock_nesting, val);
 }
 
 /*
@@ -559,7 +529,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
                        WRITE_ONCE(rnp->exp_tasks, np);
                if (IS_ENABLED(CONFIG_RCU_BOOST)) {
                        /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
-                       drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
+                       drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
                        if (&t->rcu_node_entry == rnp->boost_tasks)
                                WRITE_ONCE(rnp->boost_tasks, np);
                }
@@ -586,7 +556,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 
                /* Unboost if we were boosted. */
                if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
-                       rt_mutex_futex_unlock(&rnp->boost_mtx);
+                       rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
 
                /*
                 * If this was the last task on the expedited lists,
@@ -1083,7 +1053,7 @@ static int rcu_boost(struct rcu_node *rnp)
         * section.
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
-       rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
+       rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        /* Lock only for side effect: boosts task t's priority. */
        rt_mutex_lock(&rnp->boost_mtx);
@@ -1479,1460 +1449,6 @@ static void rcu_cleanup_after_idle(void)
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#ifdef CONFIG_RCU_NOCB_CPU
-
-/*
- * Offload callback processing from the boot-time-specified set of CPUs
- * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
- * created that pull the callbacks from the corresponding CPU, wait for
- * a grace period to elapse, and invoke the callbacks.  These kthreads
- * are organized into GP kthreads, which manage incoming callbacks, wait for
- * grace periods, and awaken CB kthreads, and the CB kthreads, which only
- * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
- * do a wake_up() on their GP kthread when they insert a callback into any
- * empty list, unless the rcu_nocb_poll boot parameter has been specified,
- * in which case each kthread actively polls its CPU.  (Which isn't so great
- * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
- *
- * This is intended to be used in conjunction with Frederic Weisbecker's
- * adaptive-idle work, which would seriously reduce OS jitter on CPUs
- * running CPU-bound user-mode computations.
- *
- * Offloading of callbacks can also be used as an energy-efficiency
- * measure because CPUs with no RCU callbacks queued are more aggressive
- * about entering dyntick-idle mode.
- */
-
-
-/*
- * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
- * If the list is invalid, a warning is emitted and all CPUs are offloaded.
- */
-static int __init rcu_nocb_setup(char *str)
-{
-       alloc_bootmem_cpumask_var(&rcu_nocb_mask);
-       if (cpulist_parse(str, rcu_nocb_mask)) {
-               pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
-               cpumask_setall(rcu_nocb_mask);
-       }
-       return 1;
-}
-__setup("rcu_nocbs=", rcu_nocb_setup);
-
-static int __init parse_rcu_nocb_poll(char *arg)
-{
-       rcu_nocb_poll = true;
-       return 0;
-}
-early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
-
-/*
- * Don't bother bypassing ->cblist if the call_rcu() rate is low.
- * After all, the main point of bypassing is to avoid lock contention
- * on ->nocb_lock, which only can happen at high call_rcu() rates.
- */
-static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
-module_param(nocb_nobypass_lim_per_jiffy, int, 0);
-
-/*
- * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
- * lock isn't immediately available, increment ->nocb_lock_contended to
- * flag the contention.
- */
-static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
-       __acquires(&rdp->nocb_bypass_lock)
-{
-       lockdep_assert_irqs_disabled();
-       if (raw_spin_trylock(&rdp->nocb_bypass_lock))
-               return;
-       atomic_inc(&rdp->nocb_lock_contended);
-       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
-       smp_mb__after_atomic(); /* atomic_inc() before lock. */
-       raw_spin_lock(&rdp->nocb_bypass_lock);
-       smp_mb__before_atomic(); /* atomic_dec() after lock. */
-       atomic_dec(&rdp->nocb_lock_contended);
-}
-
-/*
- * Spinwait until the specified rcu_data structure's ->nocb_lock is
- * not contended.  Please note that this is extremely special-purpose,
- * relying on the fact that at most two kthreads and one CPU contend for
- * this lock, and also that the two kthreads are guaranteed to have frequent
- * grace-period-duration time intervals between successive acquisitions
- * of the lock.  This allows us to use an extremely simple throttling
- * mechanism, and further to apply it only to the CPU doing floods of
- * call_rcu() invocations.  Don't try this at home!
- */
-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
-{
-       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
-       while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
-               cpu_relax();
-}
-
-/*
- * Conditionally acquire the specified rcu_data structure's
- * ->nocb_bypass_lock.
- */
-static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       return raw_spin_trylock(&rdp->nocb_bypass_lock);
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_bypass_lock.
- */
-static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
-       __releases(&rdp->nocb_bypass_lock)
-{
-       lockdep_assert_irqs_disabled();
-       raw_spin_unlock(&rdp->nocb_bypass_lock);
-}
-
-/*
- * Acquire the specified rcu_data structure's ->nocb_lock, but only
- * if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_lock(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       if (!rcu_rdp_is_offloaded(rdp))
-               return;
-       raw_spin_lock(&rdp->nocb_lock);
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_lock, but only
- * if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_unlock(struct rcu_data *rdp)
-{
-       if (rcu_rdp_is_offloaded(rdp)) {
-               lockdep_assert_irqs_disabled();
-               raw_spin_unlock(&rdp->nocb_lock);
-       }
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_lock and restore
- * interrupts, but only if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
-                                      unsigned long flags)
-{
-       if (rcu_rdp_is_offloaded(rdp)) {
-               lockdep_assert_irqs_disabled();
-               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-       } else {
-               local_irq_restore(flags);
-       }
-}
-
-/* Lockdep check that ->cblist may be safely accessed. */
-static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       if (rcu_rdp_is_offloaded(rdp))
-               lockdep_assert_held(&rdp->nocb_lock);
-}
-
-/*
- * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
- * grace period.
- */
-static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
-{
-       swake_up_all(sq);
-}
-
-static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
-{
-       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
-}
-
-static void rcu_init_one_nocb(struct rcu_node *rnp)
-{
-       init_swait_queue_head(&rnp->nocb_gp_wq[0]);
-       init_swait_queue_head(&rnp->nocb_gp_wq[1]);
-}
-
-/* Is the specified CPU a no-CBs CPU? */
-bool rcu_is_nocb_cpu(int cpu)
-{
-       if (cpumask_available(rcu_nocb_mask))
-               return cpumask_test_cpu(cpu, rcu_nocb_mask);
-       return false;
-}
-
-static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
-                          struct rcu_data *rdp,
-                          bool force, unsigned long flags)
-       __releases(rdp_gp->nocb_gp_lock)
-{
-       bool needwake = false;
-
-       if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
-               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                   TPS("AlreadyAwake"));
-               return false;
-       }
-
-       if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
-               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-               del_timer(&rdp_gp->nocb_timer);
-       }
-
-       if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
-               WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
-               needwake = true;
-       }
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-       if (needwake) {
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
-               wake_up_process(rdp_gp->nocb_gp_kthread);
-       }
-
-       return needwake;
-}
-
-/*
- * Kick the GP kthread for this NOCB group.
- */
-static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       return __wake_nocb_gp(rdp_gp, rdp, force, flags);
-}
-
-/*
- * Arrange to wake the GP kthread for this NOCB group at some future
- * time when it is safe to do so.
- */
-static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
-                              const char *reason)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-
-       /*
-        * Bypass wakeup overrides previous deferments. In case
-        * of callback storm, no need to wake up too early.
-        */
-       if (waketype == RCU_NOCB_WAKE_BYPASS) {
-               mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
-               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
-       } else {
-               if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
-                       mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
-               if (rdp_gp->nocb_defer_wakeup < waketype)
-                       WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
-       }
-
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
-}
-
-/*
- * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
- * However, if there is a callback to be enqueued and if ->nocb_bypass
- * proves to be initially empty, just return false because the no-CB GP
- * kthread may need to be awakened in this case.
- *
- * Note that this function always returns true if rhp is NULL.
- */
-static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                    unsigned long j)
-{
-       struct rcu_cblist rcl;
-
-       WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
-       rcu_lockdep_assert_cblist_protected(rdp);
-       lockdep_assert_held(&rdp->nocb_bypass_lock);
-       if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
-               raw_spin_unlock(&rdp->nocb_bypass_lock);
-               return false;
-       }
-       /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
-       if (rhp)
-               rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
-       rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
-       rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
-       WRITE_ONCE(rdp->nocb_bypass_first, j);
-       rcu_nocb_bypass_unlock(rdp);
-       return true;
-}
-
-/*
- * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
- * However, if there is a callback to be enqueued and if ->nocb_bypass
- * proves to be initially empty, just return false because the no-CB GP
- * kthread may need to be awakened in this case.
- *
- * Note that this function always returns true if rhp is NULL.
- */
-static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                 unsigned long j)
-{
-       if (!rcu_rdp_is_offloaded(rdp))
-               return true;
-       rcu_lockdep_assert_cblist_protected(rdp);
-       rcu_nocb_bypass_lock(rdp);
-       return rcu_nocb_do_flush_bypass(rdp, rhp, j);
-}
-
-/*
- * If the ->nocb_bypass_lock is immediately available, flush the
- * ->nocb_bypass queue into ->cblist.
- */
-static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
-{
-       rcu_lockdep_assert_cblist_protected(rdp);
-       if (!rcu_rdp_is_offloaded(rdp) ||
-           !rcu_nocb_bypass_trylock(rdp))
-               return;
-       WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
-}
-
-/*
- * See whether it is appropriate to use the ->nocb_bypass list in order
- * to control contention on ->nocb_lock.  A limited number of direct
- * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
- * is non-empty, further callbacks must be placed into ->nocb_bypass,
- * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
- * back to direct use of ->cblist.  However, ->nocb_bypass should not be
- * used if ->cblist is empty, because otherwise callbacks can be stranded
- * on ->nocb_bypass because we cannot count on the current CPU ever again
- * invoking call_rcu().  The general rule is that if ->nocb_bypass is
- * non-empty, the corresponding no-CBs grace-period kthread must not be
- * in an indefinite sleep state.
- *
- * Finally, it is not permitted to use the bypass during early boot,
- * as doing so would confuse the auto-initialization code.  Besides
- * which, there is no point in worrying about lock contention while
- * there is only one CPU in operation.
- */
-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                               bool *was_alldone, unsigned long flags)
-{
-       unsigned long c;
-       unsigned long cur_gp_seq;
-       unsigned long j = jiffies;
-       long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-
-       lockdep_assert_irqs_disabled();
-
-       // Pure softirq/rcuc based processing: no bypassing, no
-       // locking.
-       if (!rcu_rdp_is_offloaded(rdp)) {
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false;
-       }
-
-       // In the process of (de-)offloading: no bypassing, but
-       // locking.
-       if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
-               rcu_nocb_lock(rdp);
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false; /* Not offloaded, no bypassing. */
-       }
-
-       // Don't use ->nocb_bypass during early boot.
-       if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-               rcu_nocb_lock(rdp);
-               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false;
-       }
-
-       // If we have advanced to a new jiffy, reset counts to allow
-       // moving back from ->nocb_bypass to ->cblist.
-       if (j == rdp->nocb_nobypass_last) {
-               c = rdp->nocb_nobypass_count + 1;
-       } else {
-               WRITE_ONCE(rdp->nocb_nobypass_last, j);
-               c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
-               if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
-                                nocb_nobypass_lim_per_jiffy))
-                       c = 0;
-               else if (c > nocb_nobypass_lim_per_jiffy)
-                       c = nocb_nobypass_lim_per_jiffy;
-       }
-       WRITE_ONCE(rdp->nocb_nobypass_count, c);
-
-       // If there hasn't yet been all that many ->cblist enqueues
-       // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
-       // ->nocb_bypass first.
-       if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
-               rcu_nocb_lock(rdp);
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               if (*was_alldone)
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstQ"));
-               WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
-               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-               return false; // Caller must enqueue the callback.
-       }
-
-       // If ->nocb_bypass has been used too long or is too full,
-       // flush ->nocb_bypass to ->cblist.
-       if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
-           ncbs >= qhimark) {
-               rcu_nocb_lock(rdp);
-               if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
-                       *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-                       if (*was_alldone)
-                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                                   TPS("FirstQ"));
-                       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-                       return false; // Caller must enqueue the callback.
-               }
-               if (j != rdp->nocb_gp_adv_time &&
-                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
-                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
-                       rdp->nocb_gp_adv_time = j;
-               }
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               return true; // Callback already enqueued.
-       }
-
-       // We need to use the bypass.
-       rcu_nocb_wait_contended(rdp);
-       rcu_nocb_bypass_lock(rdp);
-       ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-       rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
-       rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
-       if (!ncbs) {
-               WRITE_ONCE(rdp->nocb_bypass_first, j);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
-       }
-       rcu_nocb_bypass_unlock(rdp);
-       smp_mb(); /* Order enqueue before wake. */
-       if (ncbs) {
-               local_irq_restore(flags);
-       } else {
-               // No-CBs GP kthread might be indefinitely asleep, if so, wake.
-               rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
-               if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstBQwake"));
-                       __call_rcu_nocb_wake(rdp, true, flags);
-               } else {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstBQnoWake"));
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-               }
-       }
-       return true; // Callback already enqueued.
-}
-
-/*
- * Awaken the no-CBs grace-period kthread if needed, either due to it
- * legitimately being asleep or due to overload conditions.
- *
- * If warranted, also wake up the kthread servicing this CPUs queues.
- */
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
-                                unsigned long flags)
-                                __releases(rdp->nocb_lock)
-{
-       unsigned long cur_gp_seq;
-       unsigned long j;
-       long len;
-       struct task_struct *t;
-
-       // If we are being polled or there is no kthread, just leave.
-       t = READ_ONCE(rdp->nocb_gp_kthread);
-       if (rcu_nocb_poll || !t) {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                   TPS("WakeNotPoll"));
-               return;
-       }
-       // Need to actually to a wakeup.
-       len = rcu_segcblist_n_cbs(&rdp->cblist);
-       if (was_alldone) {
-               rdp->qlen_last_fqs_check = len;
-               if (!irqs_disabled_flags(flags)) {
-                       /* ... if queue was empty ... */
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp(rdp, false);
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("WakeEmpty"));
-               } else {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
-                                          TPS("WakeEmptyIsDeferred"));
-               }
-       } else if (len > rdp->qlen_last_fqs_check + qhimark) {
-               /* ... or if many callbacks queued. */
-               rdp->qlen_last_fqs_check = len;
-               j = jiffies;
-               if (j != rdp->nocb_gp_adv_time &&
-                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
-                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
-                       rdp->nocb_gp_adv_time = j;
-               }
-               smp_mb(); /* Enqueue before timer_pending(). */
-               if ((rdp->nocb_cb_sleep ||
-                    !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
-                   !timer_pending(&rdp->nocb_timer)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
-                                          TPS("WakeOvfIsDeferred"));
-               } else {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-               }
-       } else {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-       }
-       return;
-}
-
-/*
- * Check if we ignore this rdp.
- *
- * We check that without holding the nocb lock but
- * we make sure not to miss a freshly offloaded rdp
- * with the current ordering:
- *
- *  rdp_offload_toggle()        nocb_gp_enabled_cb()
- * -------------------------   ----------------------------
- *    WRITE flags                 LOCK nocb_gp_lock
- *    LOCK nocb_gp_lock           READ/WRITE nocb_gp_sleep
- *    READ/WRITE nocb_gp_sleep    UNLOCK nocb_gp_lock
- *    UNLOCK nocb_gp_lock         READ flags
- */
-static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
-{
-       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
-
-       return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
-static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
-                                                    bool *needwake_state)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-
-       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
-                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
-                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
-                               *needwake_state = true;
-               }
-               return false;
-       }
-
-       /*
-        * De-offloading. Clear our flag and notify the de-offload worker.
-        * We will ignore this rdp until it ever gets re-offloaded.
-        */
-       WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
-       rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
-       if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
-               *needwake_state = true;
-       return true;
-}
-
-
-/*
- * No-CBs GP kthreads come here to wait for additional callbacks to show up
- * or for grace periods to end.
- */
-static void nocb_gp_wait(struct rcu_data *my_rdp)
-{
-       bool bypass = false;
-       long bypass_ncbs;
-       int __maybe_unused cpu = my_rdp->cpu;
-       unsigned long cur_gp_seq;
-       unsigned long flags;
-       bool gotcbs = false;
-       unsigned long j = jiffies;
-       bool needwait_gp = false; // This prevents actual uninitialized use.
-       bool needwake;
-       bool needwake_gp;
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-       unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
-       bool wasempty = false;
-
-       /*
-        * Each pass through the following loop checks for CBs and for the
-        * nearest grace period (if any) to wait for next.  The CB kthreads
-        * and the global grace-period kthread are awakened if needed.
-        */
-       WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
-       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
-               bool needwake_state = false;
-
-               if (!nocb_gp_enabled_cb(rdp))
-                       continue;
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
-               rcu_nocb_lock_irqsave(rdp, flags);
-               if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       if (needwake_state)
-                               swake_up_one(&rdp->nocb_state_wq);
-                       continue;
-               }
-               bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-               if (bypass_ncbs &&
-                   (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
-                    bypass_ncbs > 2 * qhimark)) {
-                       // Bypass full or old, so flush it.
-                       (void)rcu_nocb_try_flush_bypass(rdp, j);
-                       bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-               } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       if (needwake_state)
-                               swake_up_one(&rdp->nocb_state_wq);
-                       continue; /* No callbacks here, try next. */
-               }
-               if (bypass_ncbs) {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("Bypass"));
-                       bypass = true;
-               }
-               rnp = rdp->mynode;
-
-               // Advance callbacks if helpful and low contention.
-               needwake_gp = false;
-               if (!rcu_segcblist_restempty(&rdp->cblist,
-                                            RCU_NEXT_READY_TAIL) ||
-                   (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                    rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
-                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
-                       needwake_gp = rcu_advance_cbs(rnp, rdp);
-                       wasempty = rcu_segcblist_restempty(&rdp->cblist,
-                                                          RCU_NEXT_READY_TAIL);
-                       raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
-               }
-               // Need to wait on some grace period?
-               WARN_ON_ONCE(wasempty &&
-                            !rcu_segcblist_restempty(&rdp->cblist,
-                                                     RCU_NEXT_READY_TAIL));
-               if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
-                       if (!needwait_gp ||
-                           ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
-                               wait_gp_seq = cur_gp_seq;
-                       needwait_gp = true;
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("NeedWaitGP"));
-               }
-               if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
-                       needwake = rdp->nocb_cb_sleep;
-                       WRITE_ONCE(rdp->nocb_cb_sleep, false);
-                       smp_mb(); /* CB invocation -after- GP end. */
-               } else {
-                       needwake = false;
-               }
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               if (needwake) {
-                       swake_up_one(&rdp->nocb_cb_wq);
-                       gotcbs = true;
-               }
-               if (needwake_gp)
-                       rcu_gp_kthread_wake();
-               if (needwake_state)
-                       swake_up_one(&rdp->nocb_state_wq);
-       }
-
-       my_rdp->nocb_gp_bypass = bypass;
-       my_rdp->nocb_gp_gp = needwait_gp;
-       my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
-
-       if (bypass && !rcu_nocb_poll) {
-               // At least one child with non-empty ->nocb_bypass, so set
-               // timer in order to avoid stranding its callbacks.
-               wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
-                                  TPS("WakeBypassIsDeferred"));
-       }
-       if (rcu_nocb_poll) {
-               /* Polling, so trace if first poll in the series. */
-               if (gotcbs)
-                       trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
-               schedule_timeout_idle(1);
-       } else if (!needwait_gp) {
-               /* Wait for callbacks to appear. */
-               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
-               swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
-                               !READ_ONCE(my_rdp->nocb_gp_sleep));
-               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
-       } else {
-               rnp = my_rdp->mynode;
-               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
-               swait_event_interruptible_exclusive(
-                       rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
-                       rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
-                       !READ_ONCE(my_rdp->nocb_gp_sleep));
-               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
-       }
-       if (!rcu_nocb_poll) {
-               raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
-               if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
-                       WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-                       del_timer(&my_rdp->nocb_timer);
-               }
-               WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
-               raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
-       }
-       my_rdp->nocb_gp_seq = -1;
-       WARN_ON(signal_pending(current));
-}
-
-/*
- * No-CBs grace-period-wait kthread.  There is one of these per group
- * of CPUs, but only once at least one CPU in that group has come online
- * at least once since boot.  This kthread checks for newly posted
- * callbacks from any of the CPUs it is responsible for, waits for a
- * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
- * that then have callback-invocation work to do.
- */
-static int rcu_nocb_gp_kthread(void *arg)
-{
-       struct rcu_data *rdp = arg;
-
-       for (;;) {
-               WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
-               nocb_gp_wait(rdp);
-               cond_resched_tasks_rcu_qs();
-       }
-       return 0;
-}
-
-static inline bool nocb_cb_can_run(struct rcu_data *rdp)
-{
-       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
-       return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
-static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
-{
-       return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
-}
-
-/*
- * Invoke any ready callbacks from the corresponding no-CBs CPU,
- * then, if there are no more, wait for more to appear.
- */
-static void nocb_cb_wait(struct rcu_data *rdp)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long cur_gp_seq;
-       unsigned long flags;
-       bool needwake_state = false;
-       bool needwake_gp = false;
-       bool can_sleep = true;
-       struct rcu_node *rnp = rdp->mynode;
-
-       local_irq_save(flags);
-       rcu_momentary_dyntick_idle();
-       local_irq_restore(flags);
-       /*
-        * Disable BH to provide the expected environment.  Also, when
-        * transitioning to/from NOCB mode, a self-requeuing callback might
-        * be invoked from softirq.  A short grace period could cause both
-        * instances of this callback would execute concurrently.
-        */
-       local_bh_disable();
-       rcu_do_batch(rdp);
-       local_bh_enable();
-       lockdep_assert_irqs_enabled();
-       rcu_nocb_lock_irqsave(rdp, flags);
-       if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
-           rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
-           raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
-               needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
-               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
-       }
-
-       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
-                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
-                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
-                               needwake_state = true;
-               }
-               if (rcu_segcblist_ready_cbs(cblist))
-                       can_sleep = false;
-       } else {
-               /*
-                * De-offloading. Clear our flag and notify the de-offload worker.
-                * We won't touch the callbacks and keep sleeping until we ever
-                * get re-offloaded.
-                */
-               WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
-               rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
-                       needwake_state = true;
-       }
-
-       WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
-
-       if (rdp->nocb_cb_sleep)
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
-
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-       if (needwake_gp)
-               rcu_gp_kthread_wake();
-
-       if (needwake_state)
-               swake_up_one(&rdp->nocb_state_wq);
-
-       do {
-               swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
-                                                   nocb_cb_wait_cond(rdp));
-
-               // VVV Ensure CB invocation follows _sleep test.
-               if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
-                       WARN_ON(signal_pending(current));
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
-               }
-       } while (!nocb_cb_can_run(rdp));
-}
-
-/*
- * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
- * nocb_cb_wait() to do the dirty work.
- */
-static int rcu_nocb_cb_kthread(void *arg)
-{
-       struct rcu_data *rdp = arg;
-
-       // Each pass through this loop does one callback batch, and,
-       // if there are no more ready callbacks, waits for them.
-       for (;;) {
-               nocb_cb_wait(rdp);
-               cond_resched_tasks_rcu_qs();
-       }
-       return 0;
-}
-
-/* Is a deferred wakeup of rcu_nocb_kthread() required? */
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
-{
-       return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
-}
-
-/* Do a deferred wakeup of rcu_nocb_kthread(). */
-static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
-                                          struct rcu_data *rdp, int level,
-                                          unsigned long flags)
-       __releases(rdp_gp->nocb_gp_lock)
-{
-       int ndw;
-       int ret;
-
-       if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
-               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-               return false;
-       }
-
-       ndw = rdp_gp->nocb_defer_wakeup;
-       ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
-
-       return ret;
-}
-
-/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
-static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
-{
-       unsigned long flags;
-       struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
-
-       WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
-
-       raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
-       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
-       do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
-}
-
-/*
- * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
- * This means we do an inexact common-case check.  Note that if
- * we miss, ->nocb_timer will eventually clean things up.
- */
-static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
-               return false;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
-}
-
-void rcu_nocb_flush_deferred_wakeup(void)
-{
-       do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
-
-static int rdp_offload_toggle(struct rcu_data *rdp,
-                              bool offload, unsigned long flags)
-       __releases(rdp->nocb_lock)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-       bool wake_gp = false;
-
-       rcu_segcblist_offload(cblist, offload);
-
-       if (rdp->nocb_cb_sleep)
-               rdp->nocb_cb_sleep = false;
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-
-       /*
-        * Ignore former value of nocb_cb_sleep and force wake up as it could
-        * have been spuriously set to false already.
-        */
-       swake_up_one(&rdp->nocb_cb_wq);
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       if (rdp_gp->nocb_gp_sleep) {
-               rdp_gp->nocb_gp_sleep = false;
-               wake_gp = true;
-       }
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-
-       if (wake_gp)
-               wake_up_process(rdp_gp->nocb_gp_kthread);
-
-       return 0;
-}
-
-static long rcu_nocb_rdp_deoffload(void *arg)
-{
-       struct rcu_data *rdp = arg;
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long flags;
-       int ret;
-
-       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
-
-       pr_info("De-offloading %d\n", rdp->cpu);
-
-       rcu_nocb_lock_irqsave(rdp, flags);
-       /*
-        * Flush once and for all now. This suffices because we are
-        * running on the target CPU holding ->nocb_lock (thus having
-        * interrupts disabled), and because rdp_offload_toggle()
-        * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
-        * Thus future calls to rcu_segcblist_completely_offloaded() will
-        * return false, which means that future calls to rcu_nocb_try_bypass()
-        * will refuse to put anything into the bypass.
-        */
-       WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
-       ret = rdp_offload_toggle(rdp, false, flags);
-       swait_event_exclusive(rdp->nocb_state_wq,
-                             !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
-                                                       SEGCBLIST_KTHREAD_GP));
-       /*
-        * Lock one last time to acquire latest callback updates from kthreads
-        * so we can later handle callbacks locally without locking.
-        */
-       rcu_nocb_lock_irqsave(rdp, flags);
-       /*
-        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
-        * lock is released but how about being paranoid for once?
-        */
-       rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
-       /*
-        * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
-        * rcu_nocb_unlock_irqrestore() anymore.
-        */
-       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-
-       /* Sanity check */
-       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-
-
-       return ret;
-}
-
-int rcu_nocb_cpu_deoffload(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       int ret = 0;
-
-       mutex_lock(&rcu_state.barrier_mutex);
-       cpus_read_lock();
-       if (rcu_rdp_is_offloaded(rdp)) {
-               if (cpu_online(cpu)) {
-                       ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
-                       if (!ret)
-                               cpumask_clear_cpu(cpu, rcu_nocb_mask);
-               } else {
-                       pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
-                       ret = -EINVAL;
-               }
-       }
-       cpus_read_unlock();
-       mutex_unlock(&rcu_state.barrier_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
-
-static long rcu_nocb_rdp_offload(void *arg)
-{
-       struct rcu_data *rdp = arg;
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long flags;
-       int ret;
-
-       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
-       /*
-        * For now we only support re-offload, ie: the rdp must have been
-        * offloaded on boot first.
-        */
-       if (!rdp->nocb_gp_rdp)
-               return -EINVAL;
-
-       pr_info("Offloading %d\n", rdp->cpu);
-       /*
-        * Can't use rcu_nocb_lock_irqsave() while we are in
-        * SEGCBLIST_SOFTIRQ_ONLY mode.
-        */
-       raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
-
-       /*
-        * We didn't take the nocb lock while working on the
-        * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
-        * Every modifications that have been done previously on
-        * rdp->cblist must be visible remotely by the nocb kthreads
-        * upon wake up after reading the cblist flags.
-        *
-        * The layout against nocb_lock enforces that ordering:
-        *
-        *  __rcu_nocb_rdp_offload()   nocb_cb_wait()/nocb_gp_wait()
-        * -------------------------   ----------------------------
-        *      WRITE callbacks           rcu_nocb_lock()
-        *      rcu_nocb_lock()           READ flags
-        *      WRITE flags               READ callbacks
-        *      rcu_nocb_unlock()         rcu_nocb_unlock()
-        */
-       ret = rdp_offload_toggle(rdp, true, flags);
-       swait_event_exclusive(rdp->nocb_state_wq,
-                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
-                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
-
-       return ret;
-}
-
-int rcu_nocb_cpu_offload(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       int ret = 0;
-
-       mutex_lock(&rcu_state.barrier_mutex);
-       cpus_read_lock();
-       if (!rcu_rdp_is_offloaded(rdp)) {
-               if (cpu_online(cpu)) {
-                       ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
-                       if (!ret)
-                               cpumask_set_cpu(cpu, rcu_nocb_mask);
-               } else {
-                       pr_info("NOCB: Can't CB-offload an offline CPU\n");
-                       ret = -EINVAL;
-               }
-       }
-       cpus_read_unlock();
-       mutex_unlock(&rcu_state.barrier_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
-
-void __init rcu_init_nohz(void)
-{
-       int cpu;
-       bool need_rcu_nocb_mask = false;
-       struct rcu_data *rdp;
-
-#if defined(CONFIG_NO_HZ_FULL)
-       if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
-               need_rcu_nocb_mask = true;
-#endif /* #if defined(CONFIG_NO_HZ_FULL) */
-
-       if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
-               if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
-                       pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
-                       return;
-               }
-       }
-       if (!cpumask_available(rcu_nocb_mask))
-               return;
-
-#if defined(CONFIG_NO_HZ_FULL)
-       if (tick_nohz_full_running)
-               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
-#endif /* #if defined(CONFIG_NO_HZ_FULL) */
-
-       if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
-               pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
-               cpumask_and(rcu_nocb_mask, cpu_possible_mask,
-                           rcu_nocb_mask);
-       }
-       if (cpumask_empty(rcu_nocb_mask))
-               pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
-       else
-               pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
-                       cpumask_pr_args(rcu_nocb_mask));
-       if (rcu_nocb_poll)
-               pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
-
-       for_each_cpu(cpu, rcu_nocb_mask) {
-               rdp = per_cpu_ptr(&rcu_data, cpu);
-               if (rcu_segcblist_empty(&rdp->cblist))
-                       rcu_segcblist_init(&rdp->cblist);
-               rcu_segcblist_offload(&rdp->cblist, true);
-               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
-               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
-       }
-       rcu_organize_nocb_kthreads();
-}
-
-/* Initialize per-rcu_data variables for no-CBs CPUs. */
-static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
-{
-       init_swait_queue_head(&rdp->nocb_cb_wq);
-       init_swait_queue_head(&rdp->nocb_gp_wq);
-       init_swait_queue_head(&rdp->nocb_state_wq);
-       raw_spin_lock_init(&rdp->nocb_lock);
-       raw_spin_lock_init(&rdp->nocb_bypass_lock);
-       raw_spin_lock_init(&rdp->nocb_gp_lock);
-       timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
-       rcu_cblist_init(&rdp->nocb_bypass);
-}
-
-/*
- * If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
- * for this CPU's group has not yet been created, spawn it as well.
- */
-static void rcu_spawn_one_nocb_kthread(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       struct rcu_data *rdp_gp;
-       struct task_struct *t;
-
-       /*
-        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
-        * then nothing to do.
-        */
-       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
-               return;
-
-       /* If we didn't spawn the GP kthread first, reorganize! */
-       rdp_gp = rdp->nocb_gp_rdp;
-       if (!rdp_gp->nocb_gp_kthread) {
-               t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
-                               "rcuog/%d", rdp_gp->cpu);
-               if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
-                       return;
-               WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
-       }
-
-       /* Spawn the kthread for this CPU. */
-       t = kthread_run(rcu_nocb_cb_kthread, rdp,
-                       "rcuo%c/%d", rcu_state.abbr, cpu);
-       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
-               return;
-       WRITE_ONCE(rdp->nocb_cb_kthread, t);
-       WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
-}
-
-/*
- * If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo kthread, spawn it.
- */
-static void rcu_spawn_cpu_nocb_kthread(int cpu)
-{
-       if (rcu_scheduler_fully_active)
-               rcu_spawn_one_nocb_kthread(cpu);
-}
-
-/*
- * Once the scheduler is running, spawn rcuo kthreads for all online
- * no-CBs CPUs.  This assumes that the early_initcall()s happen before
- * non-boot CPUs come online -- if this changes, we will need to add
- * some mutual exclusion.
- */
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-       int cpu;
-
-       for_each_online_cpu(cpu)
-               rcu_spawn_cpu_nocb_kthread(cpu);
-}
-
-/* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
-static int rcu_nocb_gp_stride = -1;
-module_param(rcu_nocb_gp_stride, int, 0444);
-
-/*
- * Initialize GP-CB relationships for all no-CBs CPU.
- */
-static void __init rcu_organize_nocb_kthreads(void)
-{
-       int cpu;
-       bool firsttime = true;
-       bool gotnocbs = false;
-       bool gotnocbscbs = true;
-       int ls = rcu_nocb_gp_stride;
-       int nl = 0;  /* Next GP kthread. */
-       struct rcu_data *rdp;
-       struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
-       struct rcu_data *rdp_prev = NULL;
-
-       if (!cpumask_available(rcu_nocb_mask))
-               return;
-       if (ls == -1) {
-               ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
-               rcu_nocb_gp_stride = ls;
-       }
-
-       /*
-        * Each pass through this loop sets up one rcu_data structure.
-        * Should the corresponding CPU come online in the future, then
-        * we will spawn the needed set of rcu_nocb_kthread() kthreads.
-        */
-       for_each_cpu(cpu, rcu_nocb_mask) {
-               rdp = per_cpu_ptr(&rcu_data, cpu);
-               if (rdp->cpu >= nl) {
-                       /* New GP kthread, set up for CBs & next GP. */
-                       gotnocbs = true;
-                       nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
-                       rdp->nocb_gp_rdp = rdp;
-                       rdp_gp = rdp;
-                       if (dump_tree) {
-                               if (!firsttime)
-                                       pr_cont("%s\n", gotnocbscbs
-                                                       ? "" : " (self only)");
-                               gotnocbscbs = false;
-                               firsttime = false;
-                               pr_alert("%s: No-CB GP kthread CPU %d:",
-                                        __func__, cpu);
-                       }
-               } else {
-                       /* Another CB kthread, link to previous GP kthread. */
-                       gotnocbscbs = true;
-                       rdp->nocb_gp_rdp = rdp_gp;
-                       rdp_prev->nocb_next_cb_rdp = rdp;
-                       if (dump_tree)
-                               pr_cont(" %d", cpu);
-               }
-               rdp_prev = rdp;
-       }
-       if (gotnocbs && dump_tree)
-               pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
-}
-
-/*
- * Bind the current task to the offloaded CPUs.  If there are no offloaded
- * CPUs, leave the task unbound.  Splat if the bind attempt fails.
- */
-void rcu_bind_current_to_nocb(void)
-{
-       if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
-               WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
-}
-EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
-
-// The ->on_cpu field is available only in CONFIG_SMP=y, so...
-#ifdef CONFIG_SMP
-static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
-{
-       return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
-}
-#else // #ifdef CONFIG_SMP
-static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
-{
-       return "";
-}
-#endif // #else #ifdef CONFIG_SMP
-
-/*
- * Dump out nocb grace-period kthread state for the specified rcu_data
- * structure.
- */
-static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
-{
-       struct rcu_node *rnp = rdp->mynode;
-
-       pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
-               rdp->cpu,
-               "kK"[!!rdp->nocb_gp_kthread],
-               "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
-               "dD"[!!rdp->nocb_defer_wakeup],
-               "tT"[timer_pending(&rdp->nocb_timer)],
-               "sS"[!!rdp->nocb_gp_sleep],
-               ".W"[swait_active(&rdp->nocb_gp_wq)],
-               ".W"[swait_active(&rnp->nocb_gp_wq[0])],
-               ".W"[swait_active(&rnp->nocb_gp_wq[1])],
-               ".B"[!!rdp->nocb_gp_bypass],
-               ".G"[!!rdp->nocb_gp_gp],
-               (long)rdp->nocb_gp_seq,
-               rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
-               rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
-               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
-               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
-}
-
-/* Dump out nocb kthread state for the specified rcu_data structure. */
-static void show_rcu_nocb_state(struct rcu_data *rdp)
-{
-       char bufw[20];
-       char bufr[20];
-       struct rcu_segcblist *rsclp = &rdp->cblist;
-       bool waslocked;
-       bool wassleep;
-
-       if (rdp->nocb_gp_rdp == rdp)
-               show_rcu_nocb_gp_state(rdp);
-
-       sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
-       sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
-       pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
-               rdp->cpu, rdp->nocb_gp_rdp->cpu,
-               rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
-               "kK"[!!rdp->nocb_cb_kthread],
-               "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
-               "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
-               "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
-               "sS"[!!rdp->nocb_cb_sleep],
-               ".W"[swait_active(&rdp->nocb_cb_wq)],
-               jiffies - rdp->nocb_bypass_first,
-               jiffies - rdp->nocb_nobypass_last,
-               rdp->nocb_nobypass_count,
-               ".D"[rcu_segcblist_ready_cbs(rsclp)],
-               ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
-               rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
-               ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
-               rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
-               ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
-               ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
-               rcu_segcblist_n_cbs(&rdp->cblist),
-               rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
-               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
-               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
-
-       /* It is OK for GP kthreads to have GP state. */
-       if (rdp->nocb_gp_rdp == rdp)
-               return;
-
-       waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
-       wassleep = swait_active(&rdp->nocb_gp_wq);
-       if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
-               return;  /* Nothing untoward. */
-
-       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
-               "lL"[waslocked],
-               "dD"[!!rdp->nocb_defer_wakeup],
-               "sS"[!!rdp->nocb_gp_sleep],
-               ".W"[wassleep]);
-}
-
-#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-
-/* No ->nocb_lock to acquire.  */
-static void rcu_nocb_lock(struct rcu_data *rdp)
-{
-}
-
-/* No ->nocb_lock to release.  */
-static void rcu_nocb_unlock(struct rcu_data *rdp)
-{
-}
-
-/* No ->nocb_lock to release.  */
-static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
-                                      unsigned long flags)
-{
-       local_irq_restore(flags);
-}
-
-/* Lockdep check that ->cblist may be safely accessed. */
-static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-}
-
-static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
-{
-}
-
-static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
-{
-       return NULL;
-}
-
-static void rcu_init_one_nocb(struct rcu_node *rnp)
-{
-}
-
-static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                 unsigned long j)
-{
-       return true;
-}
-
-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                               bool *was_alldone, unsigned long flags)
-{
-       return false;
-}
-
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
-                                unsigned long flags)
-{
-       WARN_ON_ONCE(1);  /* Should be dead code! */
-}
-
-static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
-{
-}
-
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
-{
-       return false;
-}
-
-static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
-{
-       return false;
-}
-
-static void rcu_spawn_cpu_nocb_kthread(int cpu)
-{
-}
-
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-}
-
-static void show_rcu_nocb_state(struct rcu_data *rdp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
-
 /*
  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
  * grace-period kthread will do force_quiescent_state() processing?
@@ -2982,17 +1498,17 @@ static void noinstr rcu_dynticks_task_exit(void)
 /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
 static void rcu_dynticks_task_trace_enter(void)
 {
-#ifdef CONFIG_TASKS_RCU_TRACE
+#ifdef CONFIG_TASKS_TRACE_RCU
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
                current->trc_reader_special.b.need_mb = true;
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
 
 /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
 static void rcu_dynticks_task_trace_exit(void)
 {
-#ifdef CONFIG_TASKS_RCU_TRACE
+#ifdef CONFIG_TASKS_TRACE_RCU
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
                current->trc_reader_special.b.need_mb = false;
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
index 6c76988..677ee3d 100644 (file)
@@ -7,6 +7,8 @@
  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  */
 
+#include <linux/kvm_para.h>
+
 //////////////////////////////////////////////////////////////////////////////
 //
 // Controlling CPU stall warnings, including delay calculation.
@@ -117,17 +119,14 @@ static void panic_on_rcu_stall(void)
 }
 
 /**
- * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
- *
- * Set the stall-warning timeout way off into the future, thus preventing
- * any RCU CPU stall-warning messages from appearing in the current set of
- * RCU grace periods.
+ * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
  *
  * The caller must disable hard irqs.
  */
 void rcu_cpu_stall_reset(void)
 {
-       WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
+       WRITE_ONCE(rcu_state.jiffies_stall,
+                  jiffies + rcu_jiffies_till_stall_check());
 }
 
 //////////////////////////////////////////////////////////////////////////////
@@ -267,8 +266,10 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
        struct task_struct *ts[8];
 
        lockdep_assert_irqs_disabled();
-       if (!rcu_preempt_blocked_readers_cgp(rnp))
+       if (!rcu_preempt_blocked_readers_cgp(rnp)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return 0;
+       }
        pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
               rnp->level, rnp->grplo, rnp->grphi);
        t = list_entry(rnp->gp_tasks->prev,
@@ -280,8 +281,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
                        break;
        }
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       for (i--; i; i--) {
-               t = ts[i];
+       while (i) {
+               t = ts[--i];
                if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
                        pr_cont(" P%d", t->pid);
                else
@@ -350,7 +351,7 @@ static void rcu_dump_cpu_stacks(void)
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 
        sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
                rdp->last_accelerate & 0xffff, jiffies & 0xffff,
@@ -464,9 +465,10 @@ static void rcu_check_gp_kthread_starvation(void)
                pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
                       rcu_state.name, j,
                       (long)rcu_seq_current(&rcu_state.gp_seq),
-                      data_race(rcu_state.gp_flags),
-                      gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
-                      gpk ? gpk->__state : ~0, cpu);
+                      data_race(READ_ONCE(rcu_state.gp_flags)),
+                      gp_state_getname(rcu_state.gp_state),
+                      data_race(READ_ONCE(rcu_state.gp_state)),
+                      gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
                if (gpk) {
                        pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
                        pr_err("RCU grace-period kthread stack dump:\n");
@@ -509,7 +511,7 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void)
                       (long)rcu_seq_current(&rcu_state.gp_seq),
                       data_race(rcu_state.gp_flags),
                       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
-                      gpk->__state);
+                      data_race(READ_ONCE(gpk->__state)));
                pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
                       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
        }
@@ -568,11 +570,11 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
-                       gpa = data_race(rcu_state.gp_activity);
+                       gpa = data_race(READ_ONCE(rcu_state.gp_activity));
                        pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
                               rcu_state.name, j - gpa, j, gpa,
-                              data_race(jiffies_till_next_fqs),
-                              rcu_get_root()->qsmask);
+                              data_race(READ_ONCE(jiffies_till_next_fqs)),
+                              data_race(READ_ONCE(rcu_get_root()->qsmask)));
                }
        }
        /* Rewrite if needed in case of slow consoles. */
@@ -646,6 +648,7 @@ static void print_cpu_stall(unsigned long gps)
 
 static void check_cpu_stall(struct rcu_data *rdp)
 {
+       bool didstall = false;
        unsigned long gs1;
        unsigned long gs2;
        unsigned long gps;
@@ -691,24 +694,46 @@ static void check_cpu_stall(struct rcu_data *rdp)
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
-       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+       jn = jiffies + ULONG_MAX / 2;
        if (rcu_gp_in_progress() &&
            (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
            cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like an RCU stall. Check to see if the host
+                * stopped the vm.
+                */
+               if (kvm_check_and_clear_guest_paused())
+                       return;
+
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(gps);
                if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
                        rcu_ftrace_dump(DUMP_ALL);
+               didstall = true;
 
        } else if (rcu_gp_in_progress() &&
                   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
                   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like an RCU stall. Check to see if the host
+                * stopped the vm.
+                */
+               if (kvm_check_and_clear_guest_paused())
+                       return;
+
                /* They had a few time units to dump stack, so complain. */
                print_other_cpu_stall(gs2, gps);
                if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
                        rcu_ftrace_dump(DUMP_ALL);
+               didstall = true;
+       }
+       if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
+               jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+               WRITE_ONCE(rcu_state.jiffies_stall, jn);
        }
 }
 
@@ -742,7 +767,7 @@ bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
 
        rcu_for_each_leaf_node(rnp) {
                if (!cpup) {
-                       if (READ_ONCE(rnp->qsmask)) {
+                       if (data_race(READ_ONCE(rnp->qsmask))) {
                                return false;
                        } else {
                                if (READ_ONCE(rnp->gp_tasks))
@@ -791,32 +816,34 @@ void show_rcu_gp_kthreads(void)
        struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
 
        j = jiffies;
-       ja = j - data_race(rcu_state.gp_activity);
-       jr = j - data_race(rcu_state.gp_req_activity);
-       js = j - data_race(rcu_state.gp_start);
-       jw = j - data_race(rcu_state.gp_wake_time);
+       ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
+       jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
+       js = j - data_race(READ_ONCE(rcu_state.gp_start));
+       jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
        pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
                rcu_state.name, gp_state_getname(rcu_state.gp_state),
-               rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
-               js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
-               (long)data_race(rcu_state.gp_seq),
-               (long)data_race(rcu_get_root()->gp_seq_needed),
-               data_race(rcu_state.gp_max),
-               data_race(rcu_state.gp_flags));
+               data_race(READ_ONCE(rcu_state.gp_state)),
+               t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
+               js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
+               (long)data_race(READ_ONCE(rcu_state.gp_seq)),
+               (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
+               data_race(READ_ONCE(rcu_state.gp_max)),
+               data_race(READ_ONCE(rcu_state.gp_flags)));
        rcu_for_each_node_breadth_first(rnp) {
                if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
-                   !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) &&
-                   !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks))
+                   !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
+                   !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
                        continue;
                pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
                        rnp->grplo, rnp->grphi,
-                       (long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed),
-                       data_race(rnp->qsmask),
-                       ".b"[!!data_race(rnp->boost_kthread_task)],
-                       ".B"[!!data_race(rnp->boost_tasks)],
-                       ".E"[!!data_race(rnp->exp_tasks)],
-                       ".G"[!!data_race(rnp->gp_tasks)],
-                       data_race(rnp->n_boosts));
+                       (long)data_race(READ_ONCE(rnp->gp_seq)),
+                       (long)data_race(READ_ONCE(rnp->gp_seq_needed)),
+                       data_race(READ_ONCE(rnp->qsmask)),
+                       ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
+                       ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
+                       ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
+                       ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
+                       data_race(READ_ONCE(rnp->n_boosts)));
                if (!rcu_is_leaf_node(rnp))
                        continue;
                for_each_leaf_node_possible_cpu(rnp, cpu) {
@@ -826,12 +853,12 @@ void show_rcu_gp_kthreads(void)
                                         READ_ONCE(rdp->gp_seq_needed)))
                                continue;
                        pr_info("\tcpu %d ->gp_seq_needed %ld\n",
-                               cpu, (long)data_race(rdp->gp_seq_needed));
+                               cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
                }
        }
        for_each_possible_cpu(cpu) {
                rdp = per_cpu_ptr(&rcu_data, cpu);
-               cbs += data_race(rdp->n_cbs_invoked);
+               cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
                if (rcu_segcblist_is_offloaded(&rdp->cblist))
                        show_rcu_nocb_state(rdp);
        }
@@ -913,11 +940,11 @@ void rcu_fwd_progress_check(unsigned long j)
 
        if (rcu_gp_in_progress()) {
                pr_info("%s: GP age %lu jiffies\n",
-                       __func__, jiffies - rcu_state.gp_start);
+                       __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
                show_rcu_gp_kthreads();
        } else {
                pr_info("%s: Last GP end %lu jiffies ago\n",
-                       __func__, jiffies - rcu_state.gp_end);
+                       __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
                preempt_disable();
                rdp = this_cpu_ptr(&rcu_data);
                rcu_check_gp_start_stall(rdp->mynode, rdp, j);
index 29e8fc5..64a0828 100644 (file)
@@ -64,6 +64,7 @@ torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU
 torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
 torture_param(int, weight_resched, -1, "Testing weight for resched_cpu() operations.");
 torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations.");
+torture_param(int, weight_single_rpc, -1, "Testing weight for single-CPU RPC operations.");
 torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations.");
 torture_param(int, weight_many, -1, "Testing weight for multi-CPU no-wait operations.");
 torture_param(int, weight_many_wait, -1, "Testing weight for multi-CPU operations.");
@@ -86,6 +87,8 @@ struct scf_statistics {
        long long n_resched;
        long long n_single;
        long long n_single_ofl;
+       long long n_single_rpc;
+       long long n_single_rpc_ofl;
        long long n_single_wait;
        long long n_single_wait_ofl;
        long long n_many;
@@ -101,14 +104,17 @@ static DEFINE_PER_CPU(long long, scf_invoked_count);
 // Data for random primitive selection
 #define SCF_PRIM_RESCHED       0
 #define SCF_PRIM_SINGLE                1
-#define SCF_PRIM_MANY          2
-#define SCF_PRIM_ALL           3
-#define SCF_NPRIMS             7 // Need wait and no-wait versions of each,
-                                 //  except for SCF_PRIM_RESCHED.
+#define SCF_PRIM_SINGLE_RPC    2
+#define SCF_PRIM_MANY          3
+#define SCF_PRIM_ALL           4
+#define SCF_NPRIMS             8 // Need wait and no-wait versions of each,
+                                 //  except for SCF_PRIM_RESCHED and
+                                 //  SCF_PRIM_SINGLE_RPC.
 
 static char *scf_prim_name[] = {
        "resched_cpu",
        "smp_call_function_single",
+       "smp_call_function_single_rpc",
        "smp_call_function_many",
        "smp_call_function",
 };
@@ -128,6 +134,8 @@ struct scf_check {
        bool scfc_out;
        int scfc_cpu; // -1 for not _single().
        bool scfc_wait;
+       bool scfc_rpc;
+       struct completion scfc_completion;
 };
 
 // Use to wait for all threads to start.
@@ -158,6 +166,7 @@ static void scf_torture_stats_print(void)
                scfs.n_resched += scf_stats_p[i].n_resched;
                scfs.n_single += scf_stats_p[i].n_single;
                scfs.n_single_ofl += scf_stats_p[i].n_single_ofl;
+               scfs.n_single_rpc += scf_stats_p[i].n_single_rpc;
                scfs.n_single_wait += scf_stats_p[i].n_single_wait;
                scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl;
                scfs.n_many += scf_stats_p[i].n_many;
@@ -168,9 +177,10 @@ static void scf_torture_stats_print(void)
        if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
            atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
                bangstr = "!!! ";
-       pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld many: %lld/%lld all: %lld/%lld ",
+       pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
                 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
                 scfs.n_single, scfs.n_single_wait, scfs.n_single_ofl, scfs.n_single_wait_ofl,
+                scfs.n_single_rpc, scfs.n_single_rpc_ofl,
                 scfs.n_many, scfs.n_many_wait, scfs.n_all, scfs.n_all_wait);
        torture_onoff_stats();
        pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs),
@@ -282,10 +292,13 @@ static void scf_handler(void *scfc_in)
 out:
        if (unlikely(!scfcp))
                return;
-       if (scfcp->scfc_wait)
+       if (scfcp->scfc_wait) {
                WRITE_ONCE(scfcp->scfc_out, true);
-       else
+               if (scfcp->scfc_rpc)
+                       complete(&scfcp->scfc_completion);
+       } else {
                kfree(scfcp);
+       }
 }
 
 // As above, but check for correct CPU.
@@ -319,6 +332,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
                        scfcp->scfc_cpu = -1;
                        scfcp->scfc_wait = scfsp->scfs_wait;
                        scfcp->scfc_out = false;
+                       scfcp->scfc_rpc = false;
                }
        }
        switch (scfsp->scfs_prim) {
@@ -350,6 +364,34 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
                        scfcp = NULL;
                }
                break;
+       case SCF_PRIM_SINGLE_RPC:
+               if (!scfcp)
+                       break;
+               cpu = torture_random(trsp) % nr_cpu_ids;
+               scfp->n_single_rpc++;
+               scfcp->scfc_cpu = cpu;
+               scfcp->scfc_wait = true;
+               init_completion(&scfcp->scfc_completion);
+               scfcp->scfc_rpc = true;
+               barrier(); // Prevent race-reduction compiler optimizations.
+               scfcp->scfc_in = true;
+               ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, 0);
+               if (!ret) {
+                       if (use_cpus_read_lock)
+                               cpus_read_unlock();
+                       else
+                               preempt_enable();
+                       wait_for_completion(&scfcp->scfc_completion);
+                       if (use_cpus_read_lock)
+                               cpus_read_lock();
+                       else
+                               preempt_disable();
+               } else {
+                       scfp->n_single_rpc_ofl++;
+                       kfree(scfcp);
+                       scfcp = NULL;
+               }
+               break;
        case SCF_PRIM_MANY:
                if (scfsp->scfs_wait)
                        scfp->n_many_wait++;
@@ -379,10 +421,12 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
        }
        if (scfcp && scfsp->scfs_wait) {
                if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp->scfs_prim == SCF_PRIM_SINGLE) &&
-                                !scfcp->scfc_out))
+                                !scfcp->scfc_out)) {
+                       pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim);
                        atomic_inc(&n_mb_out_errs); // Leak rather than trash!
-               else
+               } else {
                        kfree(scfcp);
+               }
                barrier(); // Prevent race-reduction compiler optimizations.
        }
        if (use_cpus_read_lock)
@@ -453,8 +497,8 @@ static void
 scftorture_print_module_parms(const char *tag)
 {
        pr_alert(SCFTORT_FLAG
-                "--- %s:  verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
-                verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
+                "--- %s:  verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
+                verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_rpc, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
 }
 
 static void scf_cleanup_handler(void *unused)
@@ -469,7 +513,7 @@ static void scf_torture_cleanup(void)
                return;
 
        WRITE_ONCE(scfdone, true);
-       if (nthreads)
+       if (nthreads && scf_stats_p)
                for (i = 0; i < nthreads; i++)
                        torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task);
        else
@@ -497,6 +541,7 @@ static int __init scf_torture_init(void)
        int firsterr = 0;
        unsigned long weight_resched1 = weight_resched;
        unsigned long weight_single1 = weight_single;
+       unsigned long weight_single_rpc1 = weight_single_rpc;
        unsigned long weight_single_wait1 = weight_single_wait;
        unsigned long weight_many1 = weight_many;
        unsigned long weight_many_wait1 = weight_many_wait;
@@ -508,11 +553,13 @@ static int __init scf_torture_init(void)
 
        scftorture_print_module_parms("Start of test");
 
-       if (weight_resched == -1 && weight_single == -1 && weight_single_wait == -1 &&
+       if (weight_resched == -1 &&
+           weight_single == -1 && weight_single_rpc == -1 && weight_single_wait == -1 &&
            weight_many == -1 && weight_many_wait == -1 &&
            weight_all == -1 && weight_all_wait == -1) {
                weight_resched1 = 2 * nr_cpu_ids;
                weight_single1 = 2 * nr_cpu_ids;
+               weight_single_rpc1 = 2 * nr_cpu_ids;
                weight_single_wait1 = 2 * nr_cpu_ids;
                weight_many1 = 2;
                weight_many_wait1 = 2;
@@ -523,6 +570,8 @@ static int __init scf_torture_init(void)
                        weight_resched1 = 0;
                if (weight_single == -1)
                        weight_single1 = 0;
+               if (weight_single_rpc == -1)
+                       weight_single_rpc1 = 0;
                if (weight_single_wait == -1)
                        weight_single_wait1 = 0;
                if (weight_many == -1)
@@ -534,7 +583,7 @@ static int __init scf_torture_init(void)
                if (weight_all_wait == -1)
                        weight_all_wait1 = 0;
        }
-       if (weight_single1 == 0 && weight_single_wait1 == 0 &&
+       if (weight_single1 == 0 && weight_single_rpc1 == 0 && weight_single_wait1 == 0 &&
            weight_many1 == 0 && weight_many_wait1 == 0 &&
            weight_all1 == 0 && weight_all_wait1 == 0) {
                VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense");
@@ -546,6 +595,7 @@ static int __init scf_torture_init(void)
        else if (weight_resched1)
                VERBOSE_SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored");
        scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false);
+       scf_sel_add(weight_single_rpc1, SCF_PRIM_SINGLE_RPC, true);
        scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true);
        scf_sel_add(weight_many1, SCF_PRIM_MANY, false);
        scf_sel_add(weight_many_wait1, SCF_PRIM_MANY, true);
index 2d9ff40..c4462c4 100644 (file)
@@ -237,9 +237,30 @@ static DEFINE_MUTEX(sched_core_mutex);
 static atomic_t sched_core_count;
 static struct cpumask sched_core_mask;
 
+static void sched_core_lock(int cpu, unsigned long *flags)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       int t, i = 0;
+
+       local_irq_save(*flags);
+       for_each_cpu(t, smt_mask)
+               raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
+}
+
+static void sched_core_unlock(int cpu, unsigned long *flags)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       int t;
+
+       for_each_cpu(t, smt_mask)
+               raw_spin_unlock(&cpu_rq(t)->__lock);
+       local_irq_restore(*flags);
+}
+
 static void __sched_core_flip(bool enabled)
 {
-       int cpu, t, i;
+       unsigned long flags;
+       int cpu, t;
 
        cpus_read_lock();
 
@@ -250,19 +271,12 @@ static void __sched_core_flip(bool enabled)
        for_each_cpu(cpu, &sched_core_mask) {
                const struct cpumask *smt_mask = cpu_smt_mask(cpu);
 
-               i = 0;
-               local_irq_disable();
-               for_each_cpu(t, smt_mask) {
-                       /* supports up to SMT8 */
-                       raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
-               }
+               sched_core_lock(cpu, &flags);
 
                for_each_cpu(t, smt_mask)
                        cpu_rq(t)->core_enabled = enabled;
 
-               for_each_cpu(t, smt_mask)
-                       raw_spin_unlock(&cpu_rq(t)->__lock);
-               local_irq_enable();
+               sched_core_unlock(cpu, &flags);
 
                cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
        }
@@ -993,6 +1007,7 @@ int get_nohz_timer_target(void)
 {
        int i, cpu = smp_processor_id(), default_cpu = -1;
        struct sched_domain *sd;
+       const struct cpumask *hk_mask;
 
        if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
                if (!idle_cpu(cpu))
@@ -1000,10 +1015,11 @@ int get_nohz_timer_target(void)
                default_cpu = cpu;
        }
 
+       hk_mask = housekeeping_cpumask(HK_FLAG_TIMER);
+
        rcu_read_lock();
        for_each_domain(cpu, sd) {
-               for_each_cpu_and(i, sched_domain_span(sd),
-                       housekeeping_cpumask(HK_FLAG_TIMER)) {
+               for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
                        if (cpu == i)
                                continue;
 
@@ -1619,6 +1635,23 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
                uclamp_rq_dec_id(rq, p, clamp_id);
 }
 
+static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
+                                     enum uclamp_id clamp_id)
+{
+       if (!p->uclamp[clamp_id].active)
+               return;
+
+       uclamp_rq_dec_id(rq, p, clamp_id);
+       uclamp_rq_inc_id(rq, p, clamp_id);
+
+       /*
+        * Make sure to clear the idle flag if we've transiently reached 0
+        * active tasks on rq.
+        */
+       if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
+               rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
+}
+
 static inline void
 uclamp_update_active(struct task_struct *p)
 {
@@ -1642,12 +1675,8 @@ uclamp_update_active(struct task_struct *p)
         * affecting a valid clamp bucket, the next time it's enqueued,
         * it will already see the updated clamp bucket value.
         */
-       for_each_clamp_id(clamp_id) {
-               if (p->uclamp[clamp_id].active) {
-                       uclamp_rq_dec_id(rq, p, clamp_id);
-                       uclamp_rq_inc_id(rq, p, clamp_id);
-               }
-       }
+       for_each_clamp_id(clamp_id)
+               uclamp_rq_reinc_id(rq, p, clamp_id);
 
        task_rq_unlock(rq, p, &rf);
 }
@@ -1981,12 +2010,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
        dequeue_task(rq, p, flags);
 }
 
-/*
- * __normal_prio - return the priority that is based on the static prio
- */
-static inline int __normal_prio(struct task_struct *p)
+static inline int __normal_prio(int policy, int rt_prio, int nice)
 {
-       return p->static_prio;
+       int prio;
+
+       if (dl_policy(policy))
+               prio = MAX_DL_PRIO - 1;
+       else if (rt_policy(policy))
+               prio = MAX_RT_PRIO - 1 - rt_prio;
+       else
+               prio = NICE_TO_PRIO(nice);
+
+       return prio;
 }
 
 /*
@@ -1998,15 +2033,7 @@ static inline int __normal_prio(struct task_struct *p)
  */
 static inline int normal_prio(struct task_struct *p)
 {
-       int prio;
-
-       if (task_has_dl_policy(p))
-               prio = MAX_DL_PRIO-1;
-       else if (task_has_rt_policy(p))
-               prio = MAX_RT_PRIO-1 - p->rt_priority;
-       else
-               prio = __normal_prio(p);
-       return prio;
+       return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
 }
 
 /*
@@ -2163,7 +2190,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
 
        /* Non kernel threads are not allowed during either online or offline. */
        if (!(p->flags & PF_KTHREAD))
-               return cpu_active(cpu);
+               return cpu_active(cpu) && task_cpu_possible(cpu, p);
 
        /* KTHREAD_IS_PER_CPU is always allowed. */
        if (kthread_is_per_cpu(p))
@@ -2470,6 +2497,34 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
        __do_set_cpus_allowed(p, new_mask, 0);
 }
 
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+                     int node)
+{
+       if (!src->user_cpus_ptr)
+               return 0;
+
+       dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+       if (!dst->user_cpus_ptr)
+               return -ENOMEM;
+
+       cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+       return 0;
+}
+
+static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
+{
+       struct cpumask *user_mask = NULL;
+
+       swap(p->user_cpus_ptr, user_mask);
+
+       return user_mask;
+}
+
+void release_user_cpus_ptr(struct task_struct *p)
+{
+       kfree(clear_user_cpus_ptr(p));
+}
+
 /*
  * This function is wildly self concurrent; here be dragons.
  *
@@ -2687,28 +2742,26 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
 }
 
 /*
- * Change a given task's CPU affinity. Migrate the thread to a
- * proper CPU and schedule it away if the CPU it's executing on
- * is removed from the allowed bitmask.
- *
- * NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
- * call is not atomic; no spinlocks may be held.
+ * Called with both p->pi_lock and rq->lock held; drops both before returning.
  */
-static int __set_cpus_allowed_ptr(struct task_struct *p,
-                                 const struct cpumask *new_mask,
-                                 u32 flags)
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+                                        const struct cpumask *new_mask,
+                                        u32 flags,
+                                        struct rq *rq,
+                                        struct rq_flags *rf)
+       __releases(rq->lock)
+       __releases(p->pi_lock)
 {
+       const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
        const struct cpumask *cpu_valid_mask = cpu_active_mask;
+       bool kthread = p->flags & PF_KTHREAD;
+       struct cpumask *user_mask = NULL;
        unsigned int dest_cpu;
-       struct rq_flags rf;
-       struct rq *rq;
        int ret = 0;
 
-       rq = task_rq_lock(p, &rf);
        update_rq_clock(rq);
 
-       if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
+       if (kthread || is_migration_disabled(p)) {
                /*
                 * Kernel threads are allowed on online && !active CPUs,
                 * however, during cpu-hot-unplug, even these might get pushed
@@ -2722,6 +2775,11 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                cpu_valid_mask = cpu_online_mask;
        }
 
+       if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        /*
         * Must re-check here, to close a race against __kthread_bind(),
         * sched_setaffinity() is not guaranteed to observe the flag.
@@ -2756,20 +2814,178 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        __do_set_cpus_allowed(p, new_mask, flags);
 
-       return affine_move_task(rq, p, &rf, dest_cpu, flags);
+       if (flags & SCA_USER)
+               user_mask = clear_user_cpus_ptr(p);
+
+       ret = affine_move_task(rq, p, rf, dest_cpu, flags);
+
+       kfree(user_mask);
+
+       return ret;
 
 out:
-       task_rq_unlock(rq, p, &rf);
+       task_rq_unlock(rq, p, rf);
 
        return ret;
 }
 
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+                                 const struct cpumask *new_mask, u32 flags)
+{
+       struct rq_flags rf;
+       struct rq *rq;
+
+       rq = task_rq_lock(p, &rf);
+       return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf);
+}
+
 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 {
        return __set_cpus_allowed_ptr(p, new_mask, 0);
 }
 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
+/*
+ * Change a given task's CPU affinity to the intersection of its current
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
+ * and pointing @p->user_cpus_ptr to a copy of the old mask.
+ * If the resulting mask is empty, leave the affinity unchanged and return
+ * -EINVAL.
+ */
+static int restrict_cpus_allowed_ptr(struct task_struct *p,
+                                    struct cpumask *new_mask,
+                                    const struct cpumask *subset_mask)
+{
+       struct cpumask *user_mask = NULL;
+       struct rq_flags rf;
+       struct rq *rq;
+       int err;
+
+       if (!p->user_cpus_ptr) {
+               user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
+               if (!user_mask)
+                       return -ENOMEM;
+       }
+
+       rq = task_rq_lock(p, &rf);
+
+       /*
+        * Forcefully restricting the affinity of a deadline task is
+        * likely to cause problems, so fail and noisily override the
+        * mask entirely.
+        */
+       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
+               err = -EPERM;
+               goto err_unlock;
+       }
+
+       if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
+               err = -EINVAL;
+               goto err_unlock;
+       }
+
+       /*
+        * We're about to butcher the task affinity, so keep track of what
+        * the user asked for in case we're able to restore it later on.
+        */
+       if (user_mask) {
+               cpumask_copy(user_mask, p->cpus_ptr);
+               p->user_cpus_ptr = user_mask;
+       }
+
+       return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);
+
+err_unlock:
+       task_rq_unlock(rq, p, &rf);
+       kfree(user_mask);
+       return err;
+}
+
+/*
+ * Restrict the CPU affinity of task @p so that it is a subset of
+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
+ * old affinity mask. If the resulting mask is empty, we warn and walk
+ * up the cpuset hierarchy until we find a suitable mask.
+ */
+void force_compatible_cpus_allowed_ptr(struct task_struct *p)
+{
+       cpumask_var_t new_mask;
+       const struct cpumask *override_mask = task_cpu_possible_mask(p);
+
+       alloc_cpumask_var(&new_mask, GFP_KERNEL);
+
+       /*
+        * __migrate_task() can fail silently in the face of concurrent
+        * offlining of the chosen destination CPU, so take the hotplug
+        * lock to ensure that the migration succeeds.
+        */
+       cpus_read_lock();
+       if (!cpumask_available(new_mask))
+               goto out_set_mask;
+
+       if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
+               goto out_free_mask;
+
+       /*
+        * We failed to find a valid subset of the affinity mask for the
+        * task, so override it based on its cpuset hierarchy.
+        */
+       cpuset_cpus_allowed(p, new_mask);
+       override_mask = new_mask;
+
+out_set_mask:
+       if (printk_ratelimit()) {
+               printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
+                               task_pid_nr(p), p->comm,
+                               cpumask_pr_args(override_mask));
+       }
+
+       WARN_ON(set_cpus_allowed_ptr(p, override_mask));
+out_free_mask:
+       cpus_read_unlock();
+       free_cpumask_var(new_mask);
+}
+
+static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
+
+/*
+ * Restore the affinity of a task @p which was previously restricted by a
+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
+ * @p->user_cpus_ptr.
+ *
+ * It is the caller's responsibility to serialise this with any calls to
+ * force_compatible_cpus_allowed_ptr(@p).
+ */
+void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
+{
+       struct cpumask *user_mask = p->user_cpus_ptr;
+       unsigned long flags;
+
+       /*
+        * Try to restore the old affinity mask. If this fails, then
+        * we free the mask explicitly to avoid it being inherited across
+        * a subsequent fork().
+        */
+       if (!user_mask || !__sched_setaffinity(p, user_mask))
+               return;
+
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
+       user_mask = clear_user_cpus_ptr(p);
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+       kfree(user_mask);
+}
+
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
 #ifdef CONFIG_SCHED_DEBUG
@@ -3114,9 +3330,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
 
                /* Look for allowed, online CPU in same node. */
                for_each_cpu(dest_cpu, nodemask) {
-                       if (!cpu_active(dest_cpu))
-                               continue;
-                       if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
+                       if (is_cpu_allowed(p, dest_cpu))
                                return dest_cpu;
                }
        }
@@ -3133,8 +3347,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                /* No more Mr. Nice Guy. */
                switch (state) {
                case cpuset:
-                       if (IS_ENABLED(CONFIG_CPUSETS)) {
-                               cpuset_cpus_allowed_fallback(p);
+                       if (cpuset_cpus_allowed_fallback(p)) {
                                state = possible;
                                break;
                        }
@@ -3146,10 +3359,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                         *
                         * More yuck to audit.
                         */
-                       do_set_cpus_allowed(p, cpu_possible_mask);
+                       do_set_cpus_allowed(p, task_cpu_possible_mask(p));
                        state = fail;
                        break;
-
                case fail:
                        BUG();
                        break;
@@ -3563,6 +3775,55 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
        rq_unlock(rq, &rf);
 }
 
+/*
+ * Invoked from try_to_wake_up() to check whether the task can be woken up.
+ *
+ * The caller holds p::pi_lock if p != current or has preemption
+ * disabled when p == current.
+ *
+ * The rules of PREEMPT_RT saved_state:
+ *
+ *   The related locking code always holds p::pi_lock when updating
+ *   p::saved_state, which means the code is fully serialized in both cases.
+ *
+ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
+ *   bits set. This allows to distinguish all wakeup scenarios.
+ */
+static __always_inline
+bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
+               WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
+                            state != TASK_RTLOCK_WAIT);
+       }
+
+       if (READ_ONCE(p->__state) & state) {
+               *success = 1;
+               return true;
+       }
+
+#ifdef CONFIG_PREEMPT_RT
+       /*
+        * Saved state preserves the task state across blocking on
+        * an RT lock.  If the state matches, set p::saved_state to
+        * TASK_RUNNING, but do not wake the task because it waits
+        * for a lock wakeup. Also indicate success because from
+        * the regular waker's point of view this has succeeded.
+        *
+        * After acquiring the lock the task will restore p::__state
+        * from p::saved_state which ensures that the regular
+        * wakeup is not lost. The restore will also set
+        * p::saved_state to TASK_RUNNING so any further tests will
+        * not result in false positives vs. @success
+        */
+       if (p->saved_state & state) {
+               p->saved_state = TASK_RUNNING;
+               *success = 1;
+       }
+#endif
+       return false;
+}
+
 /*
  * Notes on Program-Order guarantees on SMP systems.
  *
@@ -3702,10 +3963,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                 *  - we're serialized against set_special_state() by virtue of
                 *    it disabling IRQs (this allows not taking ->pi_lock).
                 */
-               if (!(READ_ONCE(p->__state) & state))
+               if (!ttwu_state_match(p, state, &success))
                        goto out;
 
-               success = 1;
                trace_sched_waking(p);
                WRITE_ONCE(p->__state, TASK_RUNNING);
                trace_sched_wakeup(p);
@@ -3720,14 +3980,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         */
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        smp_mb__after_spinlock();
-       if (!(READ_ONCE(p->__state) & state))
+       if (!ttwu_state_match(p, state, &success))
                goto unlock;
 
        trace_sched_waking(p);
 
-       /* We're going to change ->state: */
-       success = 1;
-
        /*
         * Ensure we load p->on_rq _after_ p->state, otherwise it would
         * be possible to, falsely, observe p->on_rq == 0 and get stuck
@@ -4099,7 +4356,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                } else if (PRIO_TO_NICE(p->static_prio) < 0)
                        p->static_prio = NICE_TO_PRIO(0);
 
-               p->prio = p->normal_prio = __normal_prio(p);
+               p->prio = p->normal_prio = p->static_prio;
                set_load_weight(p, false);
 
                /*
@@ -5662,11 +5919,9 @@ static bool try_steal_cookie(int this, int that)
                if (p->core_occupation > dst->idle->core_occupation)
                        goto next;
 
-               p->on_rq = TASK_ON_RQ_MIGRATING;
                deactivate_task(src, p, 0);
                set_task_cpu(p, this);
                activate_task(dst, p, 0);
-               p->on_rq = TASK_ON_RQ_QUEUED;
 
                resched_curr(dst);
 
@@ -5738,35 +5993,109 @@ void queue_core_balance(struct rq *rq)
        queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
 }
 
-static inline void sched_core_cpu_starting(unsigned int cpu)
+static void sched_core_cpu_starting(unsigned int cpu)
 {
        const struct cpumask *smt_mask = cpu_smt_mask(cpu);
-       struct rq *rq, *core_rq = NULL;
-       int i;
+       struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
+       unsigned long flags;
+       int t;
 
-       core_rq = cpu_rq(cpu)->core;
+       sched_core_lock(cpu, &flags);
+
+       WARN_ON_ONCE(rq->core != rq);
+
+       /* if we're the first, we'll be our own leader */
+       if (cpumask_weight(smt_mask) == 1)
+               goto unlock;
 
-       if (!core_rq) {
-               for_each_cpu(i, smt_mask) {
-                       rq = cpu_rq(i);
-                       if (rq->core && rq->core == rq)
-                               core_rq = rq;
+       /* find the leader */
+       for_each_cpu(t, smt_mask) {
+               if (t == cpu)
+                       continue;
+               rq = cpu_rq(t);
+               if (rq->core == rq) {
+                       core_rq = rq;
+                       break;
                }
+       }
 
-               if (!core_rq)
-                       core_rq = cpu_rq(cpu);
+       if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
+               goto unlock;
 
-               for_each_cpu(i, smt_mask) {
-                       rq = cpu_rq(i);
+       /* install and validate core_rq */
+       for_each_cpu(t, smt_mask) {
+               rq = cpu_rq(t);
 
-                       WARN_ON_ONCE(rq->core && rq->core != core_rq);
+               if (t == cpu)
                        rq->core = core_rq;
-               }
+
+               WARN_ON_ONCE(rq->core != core_rq);
        }
+
+unlock:
+       sched_core_unlock(cpu, &flags);
 }
+
+static void sched_core_cpu_deactivate(unsigned int cpu)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
+       unsigned long flags;
+       int t;
+
+       sched_core_lock(cpu, &flags);
+
+       /* if we're the last man standing, nothing to do */
+       if (cpumask_weight(smt_mask) == 1) {
+               WARN_ON_ONCE(rq->core != rq);
+               goto unlock;
+       }
+
+       /* if we're not the leader, nothing to do */
+       if (rq->core != rq)
+               goto unlock;
+
+       /* find a new leader */
+       for_each_cpu(t, smt_mask) {
+               if (t == cpu)
+                       continue;
+               core_rq = cpu_rq(t);
+               break;
+       }
+
+       if (WARN_ON_ONCE(!core_rq)) /* impossible */
+               goto unlock;
+
+       /* copy the shared state to the new leader */
+       core_rq->core_task_seq      = rq->core_task_seq;
+       core_rq->core_pick_seq      = rq->core_pick_seq;
+       core_rq->core_cookie        = rq->core_cookie;
+       core_rq->core_forceidle     = rq->core_forceidle;
+       core_rq->core_forceidle_seq = rq->core_forceidle_seq;
+
+       /* install new leader */
+       for_each_cpu(t, smt_mask) {
+               rq = cpu_rq(t);
+               rq->core = core_rq;
+       }
+
+unlock:
+       sched_core_unlock(cpu, &flags);
+}
+
+static inline void sched_core_cpu_dying(unsigned int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+
+       if (rq->core != rq)
+               rq->core = rq;
+}
+
 #else /* !CONFIG_SCHED_CORE */
 
 static inline void sched_core_cpu_starting(unsigned int cpu) {}
+static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
+static inline void sched_core_cpu_dying(unsigned int cpu) {}
 
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
@@ -5776,6 +6105,24 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
 #endif /* CONFIG_SCHED_CORE */
 
+/*
+ * Constants for the sched_mode argument of __schedule().
+ *
+ * The mode argument allows RT enabled kernels to differentiate a
+ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
+ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
+ * optimize the AND operation out and just check for zero.
+ */
+#define SM_NONE                        0x0
+#define SM_PREEMPT             0x1
+#define SM_RTLOCK_WAIT         0x2
+
+#ifndef CONFIG_PREEMPT_RT
+# define SM_MASK_PREEMPT       (~0U)
+#else
+# define SM_MASK_PREEMPT       SM_PREEMPT
+#endif
+
 /*
  * __schedule() is the main scheduler function.
  *
@@ -5815,7 +6162,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  *
  * WARNING: must be called with preemption disabled!
  */
-static void __sched notrace __schedule(bool preempt)
+static void __sched notrace __schedule(unsigned int sched_mode)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
@@ -5828,13 +6175,13 @@ static void __sched notrace __schedule(bool preempt)
        rq = cpu_rq(cpu);
        prev = rq->curr;
 
-       schedule_debug(prev, preempt);
+       schedule_debug(prev, !!sched_mode);
 
        if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
                hrtick_clear(rq);
 
        local_irq_disable();
-       rcu_note_context_switch(preempt);
+       rcu_note_context_switch(!!sched_mode);
 
        /*
         * Make sure that signal_pending_state()->signal_pending() below
@@ -5868,7 +6215,7 @@ static void __sched notrace __schedule(bool preempt)
         *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
         */
        prev_state = READ_ONCE(prev->__state);
-       if (!preempt && prev_state) {
+       if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
                if (signal_pending_state(prev_state, prev)) {
                        WRITE_ONCE(prev->__state, TASK_RUNNING);
                } else {
@@ -5934,7 +6281,7 @@ static void __sched notrace __schedule(bool preempt)
                migrate_disable_switch(rq, prev);
                psi_sched_switch(prev, next, !task_on_rq_queued(prev));
 
-               trace_sched_switch(preempt, prev, next);
+               trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next);
 
                /* Also unlocks the rq: */
                rq = context_switch(rq, prev, next, &rf);
@@ -5955,7 +6302,7 @@ void __noreturn do_task_dead(void)
        /* Tell freezer to ignore us: */
        current->flags |= PF_NOFREEZE;
 
-       __schedule(false);
+       __schedule(SM_NONE);
        BUG();
 
        /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
@@ -6016,7 +6363,7 @@ asmlinkage __visible void __sched schedule(void)
        sched_submit_work(tsk);
        do {
                preempt_disable();
-               __schedule(false);
+               __schedule(SM_NONE);
                sched_preempt_enable_no_resched();
        } while (need_resched());
        sched_update_worker(tsk);
@@ -6044,7 +6391,7 @@ void __sched schedule_idle(void)
         */
        WARN_ON_ONCE(current->__state);
        do {
-               __schedule(false);
+               __schedule(SM_NONE);
        } while (need_resched());
 }
 
@@ -6079,6 +6426,18 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
+#ifdef CONFIG_PREEMPT_RT
+void __sched notrace schedule_rtlock(void)
+{
+       do {
+               preempt_disable();
+               __schedule(SM_RTLOCK_WAIT);
+               sched_preempt_enable_no_resched();
+       } while (need_resched());
+}
+NOKPROBE_SYMBOL(schedule_rtlock);
+#endif
+
 static void __sched notrace preempt_schedule_common(void)
 {
        do {
@@ -6097,7 +6456,7 @@ static void __sched notrace preempt_schedule_common(void)
                 */
                preempt_disable_notrace();
                preempt_latency_start(1);
-               __schedule(true);
+               __schedule(SM_PREEMPT);
                preempt_latency_stop(1);
                preempt_enable_no_resched_notrace();
 
@@ -6176,7 +6535,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
                 * an infinite recursion.
                 */
                prev_ctx = exception_enter();
-               __schedule(true);
+               __schedule(SM_PREEMPT);
                exception_exit(prev_ctx);
 
                preempt_latency_stop(1);
@@ -6325,7 +6684,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
        do {
                preempt_disable();
                local_irq_enable();
-               __schedule(true);
+               __schedule(SM_PREEMPT);
                local_irq_disable();
                sched_preempt_enable_no_resched();
        } while (need_resched());
@@ -6341,6 +6700,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
 }
 EXPORT_SYMBOL(default_wake_function);
 
+static void __setscheduler_prio(struct task_struct *p, int prio)
+{
+       if (dl_prio(prio))
+               p->sched_class = &dl_sched_class;
+       else if (rt_prio(prio))
+               p->sched_class = &rt_sched_class;
+       else
+               p->sched_class = &fair_sched_class;
+
+       p->prio = prio;
+}
+
 #ifdef CONFIG_RT_MUTEXES
 
 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
@@ -6456,22 +6827,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
                } else {
                        p->dl.pi_se = &p->dl;
                }
-               p->sched_class = &dl_sched_class;
        } else if (rt_prio(prio)) {
                if (dl_prio(oldprio))
                        p->dl.pi_se = &p->dl;
                if (oldprio < prio)
                        queue_flag |= ENQUEUE_HEAD;
-               p->sched_class = &rt_sched_class;
        } else {
                if (dl_prio(oldprio))
                        p->dl.pi_se = &p->dl;
                if (rt_prio(oldprio))
                        p->rt.timeout = 0;
-               p->sched_class = &fair_sched_class;
        }
 
-       p->prio = prio;
+       __setscheduler_prio(p, prio);
 
        if (queued)
                enqueue_task(rq, p, queue_flag);
@@ -6824,35 +7192,6 @@ static void __setscheduler_params(struct task_struct *p,
        set_load_weight(p, true);
 }
 
-/* Actually do priority change: must hold pi & rq lock. */
-static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr, bool keep_boost)
-{
-       /*
-        * If params can't change scheduling class changes aren't allowed
-        * either.
-        */
-       if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
-               return;
-
-       __setscheduler_params(p, attr);
-
-       /*
-        * Keep a potential priority boosting if called from
-        * sched_setscheduler().
-        */
-       p->prio = normal_prio(p);
-       if (keep_boost)
-               p->prio = rt_effective_prio(p, p->prio);
-
-       if (dl_prio(p->prio))
-               p->sched_class = &dl_sched_class;
-       else if (rt_prio(p->prio))
-               p->sched_class = &rt_sched_class;
-       else
-               p->sched_class = &fair_sched_class;
-}
-
 /*
  * Check the target process has a UID that matches the current process's:
  */
@@ -6873,10 +7212,8 @@ static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
                                bool user, bool pi)
 {
-       int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
-                     MAX_RT_PRIO - 1 - attr->sched_priority;
-       int retval, oldprio, oldpolicy = -1, queued, running;
-       int new_effective_prio, policy = attr->sched_policy;
+       int oldpolicy = -1, policy = attr->sched_policy;
+       int retval, oldprio, newprio, queued, running;
        const struct sched_class *prev_class;
        struct callback_head *head;
        struct rq_flags rf;
@@ -7074,6 +7411,7 @@ change:
        p->sched_reset_on_fork = reset_on_fork;
        oldprio = p->prio;
 
+       newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
        if (pi) {
                /*
                 * Take priority boosted tasks into account. If the new
@@ -7082,8 +7420,8 @@ change:
                 * the runqueue. This will be done when the task deboost
                 * itself.
                 */
-               new_effective_prio = rt_effective_prio(p, newprio);
-               if (new_effective_prio == oldprio)
+               newprio = rt_effective_prio(p, newprio);
+               if (newprio == oldprio)
                        queue_flags &= ~DEQUEUE_MOVE;
        }
 
@@ -7096,7 +7434,10 @@ change:
 
        prev_class = p->sched_class;
 
-       __setscheduler(rq, p, attr, pi);
+       if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
+               __setscheduler_params(p, attr);
+               __setscheduler_prio(p, newprio);
+       }
        __setscheduler_uclamp(p, attr);
 
        if (queued) {
@@ -7320,6 +7661,16 @@ err_size:
        return -E2BIG;
 }
 
+static void get_params(struct task_struct *p, struct sched_attr *attr)
+{
+       if (task_has_dl_policy(p))
+               __getparam_dl(p, attr);
+       else if (task_has_rt_policy(p))
+               attr->sched_priority = p->rt_priority;
+       else
+               attr->sched_nice = task_nice(p);
+}
+
 /**
  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  * @pid: the pid in question.
@@ -7381,6 +7732,8 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
        rcu_read_unlock();
 
        if (likely(p)) {
+               if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
+                       get_params(p, &attr);
                retval = sched_setattr(p, &attr);
                put_task_struct(p);
        }
@@ -7529,12 +7882,8 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
        kattr.sched_policy = p->policy;
        if (p->sched_reset_on_fork)
                kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-       if (task_has_dl_policy(p))
-               __getparam_dl(p, &kattr);
-       else if (task_has_rt_policy(p))
-               kattr.sched_priority = p->rt_priority;
-       else
-               kattr.sched_nice = task_nice(p);
+       get_params(p, &kattr);
+       kattr.sched_flags &= SCHED_FLAG_ALL;
 
 #ifdef CONFIG_UCLAMP_TASK
        /*
@@ -7555,9 +7904,76 @@ out_unlock:
        return retval;
 }
 
-long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+#ifdef CONFIG_SMP
+int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+       int ret = 0;
+
+       /*
+        * If the task isn't a deadline task or admission control is
+        * disabled then we don't care about affinity changes.
+        */
+       if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
+               return 0;
+
+       /*
+        * Since bandwidth control happens on root_domain basis,
+        * if admission test is enabled, we only admit -deadline
+        * tasks allowed to run on all the CPUs in the task's
+        * root_domain.
+        */
+       rcu_read_lock();
+       if (!cpumask_subset(task_rq(p)->rd->span, mask))
+               ret = -EBUSY;
+       rcu_read_unlock();
+       return ret;
+}
+#endif
+
+static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
 {
+       int retval;
        cpumask_var_t cpus_allowed, new_mask;
+
+       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
+               return -ENOMEM;
+
+       if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+               retval = -ENOMEM;
+               goto out_free_cpus_allowed;
+       }
+
+       cpuset_cpus_allowed(p, cpus_allowed);
+       cpumask_and(new_mask, mask, cpus_allowed);
+
+       retval = dl_task_check_affinity(p, new_mask);
+       if (retval)
+               goto out_free_new_mask;
+again:
+       retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
+       if (retval)
+               goto out_free_new_mask;
+
+       cpuset_cpus_allowed(p, cpus_allowed);
+       if (!cpumask_subset(new_mask, cpus_allowed)) {
+               /*
+                * We must have raced with a concurrent cpuset update.
+                * Just reset the cpumask to the cpuset's cpus_allowed.
+                */
+               cpumask_copy(new_mask, cpus_allowed);
+               goto again;
+       }
+
+out_free_new_mask:
+       free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+       free_cpumask_var(cpus_allowed);
+       return retval;
+}
+
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{
        struct task_struct *p;
        int retval;
 
@@ -7577,68 +7993,22 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
                retval = -EINVAL;
                goto out_put_task;
        }
-       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
-               retval = -ENOMEM;
-               goto out_put_task;
-       }
-       if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
-               retval = -ENOMEM;
-               goto out_free_cpus_allowed;
-       }
-       retval = -EPERM;
+
        if (!check_same_owner(p)) {
                rcu_read_lock();
                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
                        rcu_read_unlock();
-                       goto out_free_new_mask;
+                       retval = -EPERM;
+                       goto out_put_task;
                }
                rcu_read_unlock();
        }
 
        retval = security_task_setscheduler(p);
        if (retval)
-               goto out_free_new_mask;
-
-
-       cpuset_cpus_allowed(p, cpus_allowed);
-       cpumask_and(new_mask, in_mask, cpus_allowed);
-
-       /*
-        * Since bandwidth control happens on root_domain basis,
-        * if admission test is enabled, we only admit -deadline
-        * tasks allowed to run on all the CPUs in the task's
-        * root_domain.
-        */
-#ifdef CONFIG_SMP
-       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
-               rcu_read_lock();
-               if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
-                       retval = -EBUSY;
-                       rcu_read_unlock();
-                       goto out_free_new_mask;
-               }
-               rcu_read_unlock();
-       }
-#endif
-again:
-       retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
+               goto out_put_task;
 
-       if (!retval) {
-               cpuset_cpus_allowed(p, cpus_allowed);
-               if (!cpumask_subset(new_mask, cpus_allowed)) {
-                       /*
-                        * We must have raced with a concurrent cpuset
-                        * update. Just reset the cpus_allowed to the
-                        * cpuset's cpus_allowed
-                        */
-                       cpumask_copy(new_mask, cpus_allowed);
-                       goto again;
-               }
-       }
-out_free_new_mask:
-       free_cpumask_var(new_mask);
-out_free_cpus_allowed:
-       free_cpumask_var(cpus_allowed);
+       retval = __sched_setaffinity(p, in_mask);
 out_put_task:
        put_task_struct(p);
        return retval;
@@ -7781,6 +8151,17 @@ int __sched __cond_resched(void)
                preempt_schedule_common();
                return 1;
        }
+       /*
+        * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
+        * whether the current CPU is in an RCU read-side critical section,
+        * so the tick can report quiescent states even for CPUs looping
+        * in kernel context.  In contrast, in non-preemptible kernels,
+        * RCU readers leave no in-memory hints, which means that CPU-bound
+        * processes executing in kernel context might never report an
+        * RCU quiescent state.  Therefore, the following code causes
+        * cond_resched() to report a quiescent state, but only when RCU
+        * is in urgent need of one.
+        */
 #ifndef CONFIG_PREEMPT_RCU
        rcu_all_qs();
 #endif
@@ -8727,6 +9108,8 @@ int sched_cpu_deactivate(unsigned int cpu)
         */
        if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
                static_branch_dec_cpuslocked(&sched_smt_present);
+
+       sched_core_cpu_deactivate(cpu);
 #endif
 
        if (!sched_smp_initialized)
@@ -8831,6 +9214,7 @@ int sched_cpu_dying(unsigned int cpu)
        calc_load_migrate(rq);
        update_max_interval();
        hrtick_clear(rq);
+       sched_core_cpu_dying(cpu);
        return 0;
 }
 #endif
@@ -9042,7 +9426,7 @@ void __init sched_init(void)
                atomic_set(&rq->nr_iowait, 0);
 
 #ifdef CONFIG_SCHED_CORE
-               rq->core = NULL;
+               rq->core = rq;
                rq->core_pick = NULL;
                rq->core_enabled = 0;
                rq->core_tree = RB_ROOT;
@@ -9824,7 +10208,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
         * Prevent race between setting of cfs_rq->runtime_enabled and
         * unthrottle_offline_cfs_rqs().
         */
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&cfs_constraints_mutex);
        ret = __cfs_schedulable(tg, period, quota);
        if (ret)
@@ -9868,7 +10252,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
                cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return ret;
 }
@@ -10119,6 +10503,20 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
+                              struct cftype *cft)
+{
+       return css_tg(css)->idle;
+}
+
+static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
+                               struct cftype *cft, s64 idle)
+{
+       return sched_group_set_idle(css_tg(css), idle);
+}
+#endif
+
 static struct cftype cpu_legacy_files[] = {
 #ifdef CONFIG_FAIR_GROUP_SCHED
        {
@@ -10126,6 +10524,11 @@ static struct cftype cpu_legacy_files[] = {
                .read_u64 = cpu_shares_read_u64,
                .write_u64 = cpu_shares_write_u64,
        },
+       {
+               .name = "idle",
+               .read_s64 = cpu_idle_read_s64,
+               .write_s64 = cpu_idle_write_s64,
+       },
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
        {
@@ -10333,6 +10736,12 @@ static struct cftype cpu_files[] = {
                .read_s64 = cpu_weight_nice_read_s64,
                .write_s64 = cpu_weight_nice_write_s64,
        },
+       {
+               .name = "idle",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .read_s64 = cpu_idle_read_s64,
+               .write_s64 = cpu_idle_write_s64,
+       },
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
        {
index 5712461..e7af188 100644 (file)
@@ -537,9 +537,17 @@ static struct attribute *sugov_attrs[] = {
 };
 ATTRIBUTE_GROUPS(sugov);
 
+static void sugov_tunables_free(struct kobject *kobj)
+{
+       struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj);
+
+       kfree(to_sugov_tunables(attr_set));
+}
+
 static struct kobj_type sugov_tunables_ktype = {
        .default_groups = sugov_groups,
        .sysfs_ops = &governor_sysfs_ops,
+       .release = &sugov_tunables_free,
 };
 
 /********************** cpufreq governor interface *********************/
@@ -639,12 +647,10 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic
        return tunables;
 }
 
-static void sugov_tunables_free(struct sugov_tunables *tunables)
+static void sugov_clear_global_tunables(void)
 {
        if (!have_governor_per_policy())
                global_tunables = NULL;
-
-       kfree(tunables);
 }
 
 static int sugov_init(struct cpufreq_policy *policy)
@@ -707,7 +713,7 @@ out:
 fail:
        kobject_put(&tunables->attr_set.kobj);
        policy->governor_data = NULL;
-       sugov_tunables_free(tunables);
+       sugov_clear_global_tunables();
 
 stop_kthread:
        sugov_kthread_stop(sg_policy);
@@ -734,7 +740,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
        count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
        policy->governor_data = NULL;
        if (!count)
-               sugov_tunables_free(tunables);
+               sugov_clear_global_tunables();
 
        mutex_unlock(&global_tunables_lock);
 
index aaacd6c..e943146 100644 (file)
@@ -1733,6 +1733,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
         */
        raw_spin_rq_lock(rq);
        if (p->dl.dl_non_contending) {
+               update_rq_clock(rq);
                sub_running_bw(&p->dl, &rq->dl);
                p->dl.dl_non_contending = 0;
                /*
@@ -2741,7 +2742,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_runtime = attr->sched_runtime;
        dl_se->dl_deadline = attr->sched_deadline;
        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
-       dl_se->flags = attr->sched_flags;
+       dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
        dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
 }
@@ -2754,7 +2755,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
        attr->sched_runtime = dl_se->dl_runtime;
        attr->sched_deadline = dl_se->dl_deadline;
        attr->sched_period = dl_se->dl_period;
-       attr->sched_flags = dl_se->flags;
+       attr->sched_flags &= ~SCHED_DL_FLAGS;
+       attr->sched_flags |= dl_se->flags;
 }
 
 /*
@@ -2851,7 +2853,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
        if (dl_se->dl_runtime != attr->sched_runtime ||
            dl_se->dl_deadline != attr->sched_deadline ||
            dl_se->dl_period != attr->sched_period ||
-           dl_se->flags != attr->sched_flags)
+           dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
                return true;
 
        return false;
index 0c5ec27..4971622 100644 (file)
@@ -388,6 +388,13 @@ void update_sched_domain_debugfs(void)
 {
        int cpu, i;
 
+       /*
+        * This can unfortunately be invoked before sched_debug_init() creates
+        * the debug directory. Don't touch sd_sysctl_cpus until then.
+        */
+       if (!debugfs_sched)
+               return;
+
        if (!cpumask_available(sd_sysctl_cpus)) {
                if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
                        return;
@@ -600,6 +607,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
                        cfs_rq->nr_spread_over);
        SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
+       SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
+       SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
+                       cfs_rq->idle_h_nr_running);
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_SMP
        SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
index 44c4520..ff69f24 100644 (file)
@@ -431,6 +431,23 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
        }
 }
 
+static int tg_is_idle(struct task_group *tg)
+{
+       return tg->idle > 0;
+}
+
+static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
+{
+       return cfs_rq->idle > 0;
+}
+
+static int se_is_idle(struct sched_entity *se)
+{
+       if (entity_is_task(se))
+               return task_has_idle_policy(task_of(se));
+       return cfs_rq_is_idle(group_cfs_rq(se));
+}
+
 #else  /* !CONFIG_FAIR_GROUP_SCHED */
 
 #define for_each_sched_entity(se) \
@@ -468,6 +485,21 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 {
 }
 
+static inline int tg_is_idle(struct task_group *tg)
+{
+       return 0;
+}
+
+static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
+{
+       return 0;
+}
+
+static int se_is_idle(struct sched_entity *se)
+{
+       return 0;
+}
+
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 static __always_inline
@@ -1486,7 +1518,7 @@ static inline bool is_core_idle(int cpu)
                if (cpu == sibling)
                        continue;
 
-               if (!idle_cpu(cpu))
+               if (!idle_cpu(sibling))
                        return false;
        }
 #endif
@@ -4841,6 +4873,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
 
                dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
+
                qcfs_rq->h_nr_running -= task_delta;
                qcfs_rq->idle_h_nr_running -= idle_task_delta;
 
@@ -4860,6 +4895,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
                update_load_avg(qcfs_rq, se, 0);
                se_update_runnable(se);
 
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
+
                qcfs_rq->h_nr_running -= task_delta;
                qcfs_rq->idle_h_nr_running -= idle_task_delta;
        }
@@ -4904,39 +4942,45 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        task_delta = cfs_rq->h_nr_running;
        idle_task_delta = cfs_rq->idle_h_nr_running;
        for_each_sched_entity(se) {
+               struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+
                if (se->on_rq)
                        break;
-               cfs_rq = cfs_rq_of(se);
-               enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
+               enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
+
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
 
-               cfs_rq->h_nr_running += task_delta;
-               cfs_rq->idle_h_nr_running += idle_task_delta;
+               qcfs_rq->h_nr_running += task_delta;
+               qcfs_rq->idle_h_nr_running += idle_task_delta;
 
                /* end evaluation on encountering a throttled cfs_rq */
-               if (cfs_rq_throttled(cfs_rq))
+               if (cfs_rq_throttled(qcfs_rq))
                        goto unthrottle_throttle;
        }
 
        for_each_sched_entity(se) {
-               cfs_rq = cfs_rq_of(se);
+               struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 
-               update_load_avg(cfs_rq, se, UPDATE_TG);
+               update_load_avg(qcfs_rq, se, UPDATE_TG);
                se_update_runnable(se);
 
-               cfs_rq->h_nr_running += task_delta;
-               cfs_rq->idle_h_nr_running += idle_task_delta;
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
 
+               qcfs_rq->h_nr_running += task_delta;
+               qcfs_rq->idle_h_nr_running += idle_task_delta;
 
                /* end evaluation on encountering a throttled cfs_rq */
-               if (cfs_rq_throttled(cfs_rq))
+               if (cfs_rq_throttled(qcfs_rq))
                        goto unthrottle_throttle;
 
                /*
                 * One parent has been throttled and cfs_rq removed from the
                 * list. Add it back to not break the leaf list.
                 */
-               if (throttled_hierarchy(cfs_rq))
-                       list_add_leaf_cfs_rq(cfs_rq);
+               if (throttled_hierarchy(qcfs_rq))
+                       list_add_leaf_cfs_rq(qcfs_rq);
        }
 
        /* At this point se is NULL and we are at root level*/
@@ -4949,9 +4993,9 @@ unthrottle_throttle:
         * assertion below.
         */
        for_each_sched_entity(se) {
-               cfs_rq = cfs_rq_of(se);
+               struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 
-               if (list_add_leaf_cfs_rq(cfs_rq))
+               if (list_add_leaf_cfs_rq(qcfs_rq))
                        break;
        }
 
@@ -5574,6 +5618,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto enqueue_throttle;
@@ -5591,6 +5638,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto enqueue_throttle;
@@ -5668,6 +5718,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running--;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto dequeue_throttle;
@@ -5697,6 +5750,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running--;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto dequeue_throttle;
@@ -6249,7 +6305,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
                time = cpu_clock(this);
        }
 
-       for_each_cpu_wrap(cpu, cpus, target) {
+       for_each_cpu_wrap(cpu, cpus, target + 1) {
                if (has_idle_core) {
                        i = select_idle_core(p, cpu, cpus, &idle_cpu);
                        if ((unsigned int)i < nr_cpumask_bits)
@@ -6376,6 +6432,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 
        /* Check a recently used CPU as a potential idle candidate: */
        recent_used_cpu = p->recent_used_cpu;
+       p->recent_used_cpu = prev;
        if (recent_used_cpu != prev &&
            recent_used_cpu != target &&
            cpus_share_cache(recent_used_cpu, target) &&
@@ -6902,9 +6959,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
        } else if (wake_flags & WF_TTWU) { /* XXX always ? */
                /* Fast path */
                new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
-
-               if (want_affine)
-                       current->recent_used_cpu = cpu;
        }
        rcu_read_unlock();
 
@@ -7041,24 +7095,22 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
 
 static void set_last_buddy(struct sched_entity *se)
 {
-       if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
-               return;
-
        for_each_sched_entity(se) {
                if (SCHED_WARN_ON(!se->on_rq))
                        return;
+               if (se_is_idle(se))
+                       return;
                cfs_rq_of(se)->last = se;
        }
 }
 
 static void set_next_buddy(struct sched_entity *se)
 {
-       if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
-               return;
-
        for_each_sched_entity(se) {
                if (SCHED_WARN_ON(!se->on_rq))
                        return;
+               if (se_is_idle(se))
+                       return;
                cfs_rq_of(se)->next = se;
        }
 }
@@ -7079,6 +7131,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
        int scale = cfs_rq->nr_running >= sched_nr_latency;
        int next_buddy_marked = 0;
+       int cse_is_idle, pse_is_idle;
 
        if (unlikely(se == pse))
                return;
@@ -7123,8 +7176,21 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
                return;
 
        find_matching_se(&se, &pse);
-       update_curr(cfs_rq_of(se));
        BUG_ON(!pse);
+
+       cse_is_idle = se_is_idle(se);
+       pse_is_idle = se_is_idle(pse);
+
+       /*
+        * Preempt an idle group in favor of a non-idle group (and don't preempt
+        * in the inverse case).
+        */
+       if (cse_is_idle && !pse_is_idle)
+               goto preempt;
+       if (cse_is_idle != pse_is_idle)
+               return;
+
+       update_curr(cfs_rq_of(se));
        if (wakeup_preempt_entity(se, pse) == 1) {
                /*
                 * Bias pick_next to pick the sched entity that is
@@ -10217,9 +10283,11 @@ static inline int on_null_domain(struct rq *rq)
 static inline int find_new_ilb(void)
 {
        int ilb;
+       const struct cpumask *hk_mask;
+
+       hk_mask = housekeeping_cpumask(HK_FLAG_MISC);
 
-       for_each_cpu_and(ilb, nohz.idle_cpus_mask,
-                             housekeeping_cpumask(HK_FLAG_MISC)) {
+       for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) {
 
                if (ilb == smp_processor_id())
                        continue;
@@ -11416,10 +11484,12 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 
 static DEFINE_MUTEX(shares_mutex);
 
-int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
 {
        int i;
 
+       lockdep_assert_held(&shares_mutex);
+
        /*
         * We can't change the weight of the root cgroup.
         */
@@ -11428,9 +11498,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
 
        shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
 
-       mutex_lock(&shares_mutex);
        if (tg->shares == shares)
-               goto done;
+               return 0;
 
        tg->shares = shares;
        for_each_possible_cpu(i) {
@@ -11448,10 +11517,88 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
                rq_unlock_irqrestore(rq, &rf);
        }
 
-done:
+       return 0;
+}
+
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+{
+       int ret;
+
+       mutex_lock(&shares_mutex);
+       if (tg_is_idle(tg))
+               ret = -EINVAL;
+       else
+               ret = __sched_group_set_shares(tg, shares);
+       mutex_unlock(&shares_mutex);
+
+       return ret;
+}
+
+int sched_group_set_idle(struct task_group *tg, long idle)
+{
+       int i;
+
+       if (tg == &root_task_group)
+               return -EINVAL;
+
+       if (idle < 0 || idle > 1)
+               return -EINVAL;
+
+       mutex_lock(&shares_mutex);
+
+       if (tg->idle == idle) {
+               mutex_unlock(&shares_mutex);
+               return 0;
+       }
+
+       tg->idle = idle;
+
+       for_each_possible_cpu(i) {
+               struct rq *rq = cpu_rq(i);
+               struct sched_entity *se = tg->se[i];
+               struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i];
+               bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
+               long idle_task_delta;
+               struct rq_flags rf;
+
+               rq_lock_irqsave(rq, &rf);
+
+               grp_cfs_rq->idle = idle;
+               if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
+                       goto next_cpu;
+
+               idle_task_delta = grp_cfs_rq->h_nr_running -
+                                 grp_cfs_rq->idle_h_nr_running;
+               if (!cfs_rq_is_idle(grp_cfs_rq))
+                       idle_task_delta *= -1;
+
+               for_each_sched_entity(se) {
+                       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+                       if (!se->on_rq)
+                               break;
+
+                       cfs_rq->idle_h_nr_running += idle_task_delta;
+
+                       /* Already accounted at parent level and above. */
+                       if (cfs_rq_is_idle(cfs_rq))
+                               break;
+               }
+
+next_cpu:
+               rq_unlock_irqrestore(rq, &rf);
+       }
+
+       /* Idle groups have minimum weight. */
+       if (tg_is_idle(tg))
+               __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO));
+       else
+               __sched_group_set_shares(tg, NICE_0_LOAD);
+
        mutex_unlock(&shares_mutex);
        return 0;
 }
+
 #else /* CONFIG_FAIR_GROUP_SCHED */
 
 void free_fair_sched_group(struct task_group *tg) { }
index 14a41a2..3d3e579 100644 (file)
@@ -227,6 +227,8 @@ static inline void update_avg(u64 *avg, u64 sample)
  */
 #define SCHED_FLAG_SUGOV       0x10000000
 
+#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
+
 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
 {
 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
@@ -394,6 +396,9 @@ struct task_group {
        struct cfs_rq           **cfs_rq;
        unsigned long           shares;
 
+       /* A positive value indicates that this is a SCHED_IDLE group. */
+       int                     idle;
+
 #ifdef CONFIG_SMP
        /*
         * load_avg can be heavily contended at clock tick time, so put
@@ -503,6 +508,8 @@ extern void sched_move_task(struct task_struct *tsk);
 #ifdef CONFIG_FAIR_GROUP_SCHED
 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 
+extern int sched_group_set_idle(struct task_group *tg, long idle);
+
 #ifdef CONFIG_SMP
 extern void set_task_rq_fair(struct sched_entity *se,
                             struct cfs_rq *prev, struct cfs_rq *next);
@@ -599,6 +606,9 @@ struct cfs_rq {
        struct list_head        leaf_cfs_rq_list;
        struct task_group       *tg;    /* group that "owns" this runqueue */
 
+       /* Locally cached copy of our task_group's idle value */
+       int                     idle;
+
 #ifdef CONFIG_CFS_BANDWIDTH
        int                     runtime_enabled;
        s64                     runtime_remaining;
@@ -1093,7 +1103,7 @@ struct rq {
        unsigned int            core_sched_seq;
        struct rb_root          core_tree;
 
-       /* shared state */
+       /* shared state -- careful with sched_core_cpu_deactivate() */
        unsigned int            core_task_seq;
        unsigned int            core_pick_seq;
        unsigned long           core_cookie;
@@ -2234,6 +2244,7 @@ extern struct task_struct *pick_next_task_idle(struct rq *rq);
 #define SCA_CHECK              0x01
 #define SCA_MIGRATE_DISABLE    0x02
 #define SCA_MIGRATE_ENABLE     0x04
+#define SCA_USER               0x08
 
 #ifdef CONFIG_SMP
 
@@ -2255,6 +2266,9 @@ static inline struct task_struct *get_push_task(struct rq *rq)
        if (p->nr_cpus_allowed == 1)
                return NULL;
 
+       if (p->migration_disabled)
+               return NULL;
+
        rq->push_busy = true;
        return get_task_struct(p);
 }
@@ -2385,6 +2399,21 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
 extern const_debug unsigned int sysctl_sched_nr_migrate;
 extern const_debug unsigned int sysctl_sched_migration_cost;
 
+#ifdef CONFIG_SCHED_DEBUG
+extern unsigned int sysctl_sched_latency;
+extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern int sysctl_resched_latency_warn_ms;
+extern int sysctl_resched_latency_warn_once;
+
+extern unsigned int sysctl_sched_tunable_scaling;
+
+extern unsigned int sysctl_numa_balancing_scan_delay;
+extern unsigned int sysctl_numa_balancing_scan_period_min;
+extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_size;
+#endif
+
 #ifdef CONFIG_SCHED_HRTICK
 
 /*
index b77ad49..4e8698e 100644 (file)
@@ -1482,6 +1482,8 @@ int                               sched_max_numa_distance;
 static int                     *sched_domains_numa_distance;
 static struct cpumask          ***sched_domains_numa_masks;
 int __read_mostly              node_reclaim_distance = RECLAIM_DISTANCE;
+
+static unsigned long __read_mostly *sched_numa_onlined_nodes;
 #endif
 
 /*
@@ -1833,6 +1835,16 @@ void sched_init_numa(void)
                        sched_domains_numa_masks[i][j] = mask;
 
                        for_each_node(k) {
+                               /*
+                                * Distance information can be unreliable for
+                                * offline nodes, defer building the node
+                                * masks to its bringup.
+                                * This relies on all unique distance values
+                                * still being visible at init time.
+                                */
+                               if (!node_online(j))
+                                       continue;
+
                                if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
                                        sched_numa_warn("Node-distance not symmetric");
 
@@ -1886,6 +1898,53 @@ void sched_init_numa(void)
        sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
 
        init_numa_topology_type();
+
+       sched_numa_onlined_nodes = bitmap_alloc(nr_node_ids, GFP_KERNEL);
+       if (!sched_numa_onlined_nodes)
+               return;
+
+       bitmap_zero(sched_numa_onlined_nodes, nr_node_ids);
+       for_each_online_node(i)
+               bitmap_set(sched_numa_onlined_nodes, i, 1);
+}
+
+static void __sched_domains_numa_masks_set(unsigned int node)
+{
+       int i, j;
+
+       /*
+        * NUMA masks are not built for offline nodes in sched_init_numa().
+        * Thus, when a CPU of a never-onlined-before node gets plugged in,
+        * adding that new CPU to the right NUMA masks is not sufficient: the
+        * masks of that CPU's node must also be updated.
+        */
+       if (test_bit(node, sched_numa_onlined_nodes))
+               return;
+
+       bitmap_set(sched_numa_onlined_nodes, node, 1);
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               for (j = 0; j < nr_node_ids; j++) {
+                       if (!node_online(j) || node == j)
+                               continue;
+
+                       if (node_distance(j, node) > sched_domains_numa_distance[i])
+                               continue;
+
+                       /* Add remote nodes in our masks */
+                       cpumask_or(sched_domains_numa_masks[i][node],
+                                  sched_domains_numa_masks[i][node],
+                                  sched_domains_numa_masks[0][j]);
+               }
+       }
+
+       /*
+        * A new node has been brought up, potentially changing the topology
+        * classification.
+        *
+        * Note that this is racy vs any use of sched_numa_topology_type :/
+        */
+       init_numa_topology_type();
 }
 
 void sched_domains_numa_masks_set(unsigned int cpu)
@@ -1893,8 +1952,14 @@ void sched_domains_numa_masks_set(unsigned int cpu)
        int node = cpu_to_node(cpu);
        int i, j;
 
+       __sched_domains_numa_masks_set(node);
+
        for (i = 0; i < sched_domains_numa_levels; i++) {
                for (j = 0; j < nr_node_ids; j++) {
+                       if (!node_online(j))
+                               continue;
+
+                       /* Set ourselves in the remote node's masks */
                        if (node_distance(j, node) <= sched_domains_numa_distance[i])
                                cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
                }
index 057e17f..6469eca 100644 (file)
@@ -602,7 +602,7 @@ static inline void seccomp_sync_threads(unsigned long flags)
                smp_store_release(&thread->seccomp.filter,
                                  caller->seccomp.filter);
                atomic_set(&thread->seccomp.filter_count,
-                          atomic_read(&thread->seccomp.filter_count));
+                          atomic_read(&caller->seccomp.filter_count));
 
                /*
                 * Don't let an unprivileged task work around
index a3229ad..52b6abe 100644 (file)
@@ -1413,6 +1413,21 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
        return sighand;
 }
 
+#ifdef CONFIG_LOCKDEP
+void lockdep_assert_task_sighand_held(struct task_struct *task)
+{
+       struct sighand_struct *sighand;
+
+       rcu_read_lock();
+       sighand = rcu_dereference(task->sighand);
+       if (sighand)
+               lockdep_assert_held(&sighand->siglock);
+       else
+               WARN_ON_ONCE(1);
+       rcu_read_unlock();
+}
+#endif
+
 /*
  * send signal info to all the members of a group
  */
index 52bf159..f43ede0 100644 (file)
@@ -764,7 +764,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 EXPORT_SYMBOL(smp_call_function_single);
 
 /**
- * smp_call_function_single_async(): Run an asynchronous function on a
+ * smp_call_function_single_async() - Run an asynchronous function on a
  *                              specific CPU.
  * @cpu: The CPU to run on.
  * @csd: Pre-allocated and setup data structure
@@ -783,6 +783,8 @@ EXPORT_SYMBOL(smp_call_function_single);
  *
  * NOTE: Be careful, there is unfortunately no current debugging facility to
  * validate the correctness of this serialization.
+ *
+ * Return: %0 on success or negative errno value on error
  */
 int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
 {
@@ -974,7 +976,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
  * @mask: The set of cpus to run on (only runs on online subset).
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
- * @flags: Bitmask that controls the operation. If %SCF_WAIT is set, wait
+ * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
  *        (atomically) until function has completed on other CPUs. If
  *        %SCF_RUN_LOCAL is set, the function will also be run locally
  *        if the local CPU is set in the @cpumask.
@@ -1180,7 +1182,13 @@ void wake_up_all_idle_cpus(void)
 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
 
 /**
- * smp_call_on_cpu - Call a function on a specific cpu
+ * struct smp_call_on_cpu_struct - Call a function on a specific CPU
+ * @work: &work_struct
+ * @done: &completion to signal
+ * @func: function to call
+ * @data: function's data argument
+ * @ret: return value from @func
+ * @cpu: target CPU (%-1 for any CPU)
  *
  * Used to call a function on a specific cpu and wait for it to return.
  * Optionally make sure the call is done on a specified physical cpu via vcpu
index cf6acab..f6bc0bc 100644 (file)
@@ -291,7 +291,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        unsigned int cpu;
        int ret = 0;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smpboot_threads_lock);
        for_each_online_cpu(cpu) {
                ret = __smpboot_create_thread(plug_thread, cpu);
@@ -304,7 +304,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        list_add(&plug_thread->list, &hotplug_threads);
 out:
        mutex_unlock(&smpboot_threads_lock);
-       put_online_cpus();
+       cpus_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
@@ -317,12 +317,12 @@ EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
  */
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
 {
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smpboot_threads_lock);
        list_del(&plug_thread->list);
        smpboot_destroy_threads(plug_thread);
        mutex_unlock(&smpboot_threads_lock);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
 
index f3a0121..322b65d 100644 (file)
@@ -422,7 +422,7 @@ static inline void invoke_softirq(void)
        if (ksoftirqd_running(local_softirq_pending()))
                return;
 
-       if (!force_irqthreads || !__this_cpu_read(ksoftirqd)) {
+       if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
                /*
                 * We can safely execute softirq on the current stack if
index ef1a78f..72c7639 100644 (file)
@@ -480,7 +480,8 @@ static int set_user(struct cred *new)
         * failure to the execve() stage.
         */
        if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
-                       new_user != INIT_USER)
+                       new_user != INIT_USER &&
+                       !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
                current->flags |= PF_NPROC_EXCEEDED;
        else
                current->flags &= ~PF_NPROC_EXCEEDED;
index 272f4a2..25e49b4 100644 (file)
@@ -536,6 +536,21 @@ static void proc_put_char(void **buf, size_t *size, char c)
        }
 }
 
+static int do_proc_dobool_conv(bool *negp, unsigned long *lvalp,
+                               int *valp,
+                               int write, void *data)
+{
+       if (write) {
+               *(bool *)valp = *lvalp;
+       } else {
+               int val = *(bool *)valp;
+
+               *lvalp = (unsigned long)val;
+               *negp = false;
+       }
+       return 0;
+}
+
 static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
                                 int *valp,
                                 int write, void *data)
@@ -798,6 +813,26 @@ static int do_proc_douintvec(struct ctl_table *table, int write,
                                   buffer, lenp, ppos, conv, data);
 }
 
+/**
+ * proc_dobool - read/write a bool
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+ * values from/to the user buffer, treated as an ASCII string.
+ *
+ * Returns 0 on success.
+ */
+int proc_dobool(struct ctl_table *table, int write, void *buffer,
+               size_t *lenp, loff_t *ppos)
+{
+       return do_proc_dointvec(table, write, buffer, lenp, ppos,
+                               do_proc_dobool_conv, NULL);
+}
+
 /**
  * proc_dointvec - read a vector of integers
  * @table: the sysctl table
@@ -1630,6 +1665,12 @@ int proc_dostring(struct ctl_table *table, int write,
        return -ENOSYS;
 }
 
+int proc_dobool(struct ctl_table *table, int write,
+               void *buffer, size_t *lenp, loff_t *ppos)
+{
+       return -ENOSYS;
+}
+
 int proc_dointvec(struct ctl_table *table, int write,
                  void *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -3425,6 +3466,7 @@ int __init sysctl_init(void)
  * No sense putting this after each symbol definition, twice,
  * exception granted :-)
  */
+EXPORT_SYMBOL(proc_dobool);
 EXPORT_SYMBOL(proc_dointvec);
 EXPORT_SYMBOL(proc_douintvec);
 EXPORT_SYMBOL(proc_dointvec_jiffies);
index 01df123..df922f4 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/prandom.h>
 #include <linux/cpu.h>
 
+#include "tick-internal.h"
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
 
@@ -34,9 +36,6 @@ static u64 wdtest_jiffies_read(struct clocksource *cs)
        return (u64)jiffies;
 }
 
-/* Assume HZ > 100. */
-#define JIFFIES_SHIFT  8
-
 static struct clocksource clocksource_wdtest_jiffies = {
        .name                   = "wdtest-jiffies",
        .rating                 = 1, /* lowest valid rating*/
index b89c76e..b8a14d2 100644 (file)
@@ -306,12 +306,12 @@ void clocksource_verify_percpu(struct clocksource *cs)
                return;
        cpumask_clear(&cpus_ahead);
        cpumask_clear(&cpus_behind);
-       get_online_cpus();
+       cpus_read_lock();
        preempt_disable();
        clocksource_verify_choose_cpus();
        if (cpumask_weight(&cpus_chosen) == 0) {
                preempt_enable();
-               put_online_cpus();
+               cpus_read_unlock();
                pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
                return;
        }
@@ -337,7 +337,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
                        cs_nsec_min = cs_nsec;
        }
        preempt_enable();
-       put_online_cpus();
+       cpus_read_unlock();
        if (!cpumask_empty(&cpus_ahead))
                pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
                        cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
index 4a66725..0ea8702 100644 (file)
@@ -652,21 +652,10 @@ static inline int hrtimer_hres_active(void)
        return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
 }
 
-/*
- * Reprogram the event source with checking both queues for the
- * next event
- * Called with interrupts disabled and base->lock held
- */
-static void
-hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+                               struct hrtimer *next_timer,
+                               ktime_t expires_next)
 {
-       ktime_t expires_next;
-
-       expires_next = hrtimer_update_next_event(cpu_base);
-
-       if (skip_equal && expires_next == cpu_base->expires_next)
-               return;
-
        cpu_base->expires_next = expires_next;
 
        /*
@@ -689,7 +678,25 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
        if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
                return;
 
-       tick_program_event(cpu_base->expires_next, 1);
+       tick_program_event(expires_next, 1);
+}
+
+/*
+ * Reprogram the event source with checking both queues for the
+ * next event
+ * Called with interrupts disabled and base->lock held
+ */
+static void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+{
+       ktime_t expires_next;
+
+       expires_next = hrtimer_update_next_event(cpu_base);
+
+       if (skip_equal && expires_next == cpu_base->expires_next)
+               return;
+
+       __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next);
 }
 
 /* High resolution timer related functions */
@@ -720,23 +727,7 @@ static inline int hrtimer_is_hres_enabled(void)
        return hrtimer_hres_enabled;
 }
 
-/*
- * Retrigger next event is called after clock was set
- *
- * Called with interrupts disabled via on_each_cpu()
- */
-static void retrigger_next_event(void *arg)
-{
-       struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
-
-       if (!__hrtimer_hres_active(base))
-               return;
-
-       raw_spin_lock(&base->lock);
-       hrtimer_update_base(base);
-       hrtimer_force_reprogram(base, 0);
-       raw_spin_unlock(&base->lock);
-}
+static void retrigger_next_event(void *arg);
 
 /*
  * Switch to high resolution mode
@@ -758,29 +749,54 @@ static void hrtimer_switch_to_hres(void)
        retrigger_next_event(NULL);
 }
 
-static void clock_was_set_work(struct work_struct *work)
-{
-       clock_was_set();
-}
+#else
 
-static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+static inline int hrtimer_is_hres_enabled(void) { return 0; }
+static inline void hrtimer_switch_to_hres(void) { }
 
+#endif /* CONFIG_HIGH_RES_TIMERS */
 /*
- * Called from timekeeping and resume code to reprogram the hrtimer
- * interrupt device on all cpus.
+ * Retrigger next event is called after clock was set with interrupts
+ * disabled through an SMP function call or directly from low level
+ * resume code.
+ *
+ * This is only invoked when:
+ *     - CONFIG_HIGH_RES_TIMERS is enabled.
+ *     - CONFIG_NOHZ_COMMON is enabled
+ *
+ * For the other cases this function is empty and because the call sites
+ * are optimized out it vanishes as well, i.e. no need for lots of
+ * #ifdeffery.
  */
-void clock_was_set_delayed(void)
+static void retrigger_next_event(void *arg)
 {
-       schedule_work(&hrtimer_work);
-}
-
-#else
+       struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
-static inline int hrtimer_is_hres_enabled(void) { return 0; }
-static inline void hrtimer_switch_to_hres(void) { }
-static inline void retrigger_next_event(void *arg) { }
+       /*
+        * When high resolution mode or nohz is active, then the offsets of
+        * CLOCK_REALTIME/TAI/BOOTTIME have to be updated. Otherwise the
+        * next tick will take care of that.
+        *
+        * If high resolution mode is active then the next expiring timer
+        * must be reevaluated and the clock event device reprogrammed if
+        * necessary.
+        *
+        * In the NOHZ case the update of the offset and the reevaluation
+        * of the next expiring timer is enough. The return from the SMP
+        * function call will take care of the reprogramming in case the
+        * CPU was in a NOHZ idle sleep.
+        */
+       if (!__hrtimer_hres_active(base) && !tick_nohz_active)
+               return;
 
-#endif /* CONFIG_HIGH_RES_TIMERS */
+       raw_spin_lock(&base->lock);
+       hrtimer_update_base(base);
+       if (__hrtimer_hres_active(base))
+               hrtimer_force_reprogram(base, 0);
+       else
+               hrtimer_update_next_event(base);
+       raw_spin_unlock(&base->lock);
+}
 
 /*
  * When a timer is enqueued and expires earlier than the already enqueued
@@ -835,75 +851,161 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
        if (base->cpu_base != cpu_base)
                return;
 
+       if (expires >= cpu_base->expires_next)
+               return;
+
        /*
-        * If the hrtimer interrupt is running, then it will
-        * reevaluate the clock bases and reprogram the clock event
-        * device. The callbacks are always executed in hard interrupt
-        * context so we don't need an extra check for a running
-        * callback.
+        * If the hrtimer interrupt is running, then it will reevaluate the
+        * clock bases and reprogram the clock event device.
         */
        if (cpu_base->in_hrtirq)
                return;
 
-       if (expires >= cpu_base->expires_next)
-               return;
-
-       /* Update the pointer to the next expiring timer */
        cpu_base->next_timer = timer;
-       cpu_base->expires_next = expires;
+
+       __hrtimer_reprogram(cpu_base, timer, expires);
+}
+
+static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base,
+                            unsigned int active)
+{
+       struct hrtimer_clock_base *base;
+       unsigned int seq;
+       ktime_t expires;
 
        /*
-        * If hres is not active, hardware does not have to be
-        * programmed yet.
+        * Update the base offsets unconditionally so the following
+        * checks whether the SMP function call is required works.
         *
-        * If a hang was detected in the last timer interrupt then we
-        * do not schedule a timer which is earlier than the expiry
-        * which we enforced in the hang detection. We want the system
-        * to make progress.
+        * The update is safe even when the remote CPU is in the hrtimer
+        * interrupt or the hrtimer soft interrupt and expiring affected
+        * bases. Either it will see the update before handling a base or
+        * it will see it when it finishes the processing and reevaluates
+        * the next expiring timer.
         */
-       if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
-               return;
+       seq = cpu_base->clock_was_set_seq;
+       hrtimer_update_base(cpu_base);
+
+       /*
+        * If the sequence did not change over the update then the
+        * remote CPU already handled it.
+        */
+       if (seq == cpu_base->clock_was_set_seq)
+               return false;
+
+       /*
+        * If the remote CPU is currently handling an hrtimer interrupt, it
+        * will reevaluate the first expiring timer of all clock bases
+        * before reprogramming. Nothing to do here.
+        */
+       if (cpu_base->in_hrtirq)
+               return false;
 
        /*
-        * Program the timer hardware. We enforce the expiry for
-        * events which are already in the past.
+        * Walk the affected clock bases and check whether the first expiring
+        * timer in a clock base is moving ahead of the first expiring timer of
+        * @cpu_base. If so, the IPI must be invoked because per CPU clock
+        * event devices cannot be remotely reprogrammed.
         */
-       tick_program_event(expires, 1);
+       active &= cpu_base->active_bases;
+
+       for_each_active_base(base, cpu_base, active) {
+               struct timerqueue_node *next;
+
+               next = timerqueue_getnext(&base->active);
+               expires = ktime_sub(next->expires, base->offset);
+               if (expires < cpu_base->expires_next)
+                       return true;
+
+               /* Extra check for softirq clock bases */
+               if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT)
+                       continue;
+               if (cpu_base->softirq_activated)
+                       continue;
+               if (expires < cpu_base->softirq_expires_next)
+                       return true;
+       }
+       return false;
 }
 
 /*
- * Clock realtime was set
- *
- * Change the offset of the realtime clock vs. the monotonic
- * clock.
+ * Clock was set. This might affect CLOCK_REALTIME, CLOCK_TAI and
+ * CLOCK_BOOTTIME (for late sleep time injection).
  *
- * We might have to reprogram the high resolution timer interrupt. On
- * SMP we call the architecture specific code to retrigger _all_ high
- * resolution timer interrupts. On UP we just disable interrupts and
- * call the high resolution interrupt code.
+ * This requires to update the offsets for these clocks
+ * vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this
+ * also requires to eventually reprogram the per CPU clock event devices
+ * when the change moves an affected timer ahead of the first expiring
+ * timer on that CPU. Obviously remote per CPU clock event devices cannot
+ * be reprogrammed. The other reason why an IPI has to be sent is when the
+ * system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets
+ * in the tick, which obviously might be stopped, so this has to bring out
+ * the remote CPU which might sleep in idle to get this sorted.
  */
-void clock_was_set(void)
+void clock_was_set(unsigned int bases)
 {
-#ifdef CONFIG_HIGH_RES_TIMERS
-       /* Retrigger the CPU local events everywhere */
-       on_each_cpu(retrigger_next_event, NULL, 1);
-#endif
+       struct hrtimer_cpu_base *cpu_base = raw_cpu_ptr(&hrtimer_bases);
+       cpumask_var_t mask;
+       int cpu;
+
+       if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
+               goto out_timerfd;
+
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+               on_each_cpu(retrigger_next_event, NULL, 1);
+               goto out_timerfd;
+       }
+
+       /* Avoid interrupting CPUs if possible */
+       cpus_read_lock();
+       for_each_online_cpu(cpu) {
+               unsigned long flags;
+
+               cpu_base = &per_cpu(hrtimer_bases, cpu);
+               raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+               if (update_needs_ipi(cpu_base, bases))
+                       cpumask_set_cpu(cpu, mask);
+
+               raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+       }
+
+       preempt_disable();
+       smp_call_function_many(mask, retrigger_next_event, NULL, 1);
+       preempt_enable();
+       cpus_read_unlock();
+       free_cpumask_var(mask);
+
+out_timerfd:
        timerfd_clock_was_set();
 }
 
+static void clock_was_set_work(struct work_struct *work)
+{
+       clock_was_set(CLOCK_SET_WALL);
+}
+
+static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+
+/*
+ * Called from timekeeping code to reprogram the hrtimer interrupt device
+ * on all cpus and to notify timerfd.
+ */
+void clock_was_set_delayed(void)
+{
+       schedule_work(&hrtimer_work);
+}
+
 /*
- * During resume we might have to reprogram the high resolution timer
- * interrupt on all online CPUs.  However, all other CPUs will be
- * stopped with IRQs interrupts disabled so the clock_was_set() call
- * must be deferred.
+ * Called during resume either directly from via timekeeping_resume()
+ * or in the case of s2idle from tick_unfreeze() to ensure that the
+ * hrtimers are up to date.
  */
-void hrtimers_resume(void)
+void hrtimers_resume_local(void)
 {
        lockdep_assert_irqs_disabled();
        /* Retrigger on the local CPU */
        retrigger_next_event(NULL);
-       /* And schedule a retrigger for all others */
-       clock_was_set_delayed();
 }
 
 /*
@@ -1030,12 +1132,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
  * remove hrtimer, called with base lock held
  */
 static inline int
-remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
+remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
+              bool restart, bool keep_local)
 {
        u8 state = timer->state;
 
        if (state & HRTIMER_STATE_ENQUEUED) {
-               int reprogram;
+               bool reprogram;
 
                /*
                 * Remove the timer and force reprogramming when high
@@ -1048,8 +1151,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
                debug_deactivate(timer);
                reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
 
+               /*
+                * If the timer is not restarted then reprogramming is
+                * required if the timer is local. If it is local and about
+                * to be restarted, avoid programming it twice (on removal
+                * and a moment later when it's requeued).
+                */
                if (!restart)
                        state = HRTIMER_STATE_INACTIVE;
+               else
+                       reprogram &= !keep_local;
 
                __remove_hrtimer(timer, base, state, reprogram);
                return 1;
@@ -1103,9 +1214,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                                    struct hrtimer_clock_base *base)
 {
        struct hrtimer_clock_base *new_base;
+       bool force_local, first;
 
-       /* Remove an active timer from the queue: */
-       remove_hrtimer(timer, base, true);
+       /*
+        * If the timer is on the local cpu base and is the first expiring
+        * timer then this might end up reprogramming the hardware twice
+        * (on removal and on enqueue). To avoid that by prevent the
+        * reprogram on removal, keep the timer local to the current CPU
+        * and enforce reprogramming after it is queued no matter whether
+        * it is the new first expiring timer again or not.
+        */
+       force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
+       force_local &= base->cpu_base->next_timer == timer;
+
+       /*
+        * Remove an active timer from the queue. In case it is not queued
+        * on the current CPU, make sure that remove_hrtimer() updates the
+        * remote data correctly.
+        *
+        * If it's on the current CPU and the first expiring timer, then
+        * skip reprogramming, keep the timer local and enforce
+        * reprogramming later if it was the first expiring timer.  This
+        * avoids programming the underlying clock event twice (once at
+        * removal and once after enqueue).
+        */
+       remove_hrtimer(timer, base, true, force_local);
 
        if (mode & HRTIMER_MODE_REL)
                tim = ktime_add_safe(tim, base->get_time());
@@ -1115,9 +1248,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
        /* Switch the timer base, if necessary: */
-       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+       if (!force_local) {
+               new_base = switch_hrtimer_base(timer, base,
+                                              mode & HRTIMER_MODE_PINNED);
+       } else {
+               new_base = base;
+       }
 
-       return enqueue_hrtimer(timer, new_base, mode);
+       first = enqueue_hrtimer(timer, new_base, mode);
+       if (!force_local)
+               return first;
+
+       /*
+        * Timer was forced to stay on the current CPU to avoid
+        * reprogramming on removal and enqueue. Force reprogram the
+        * hardware by evaluating the new first expiring timer.
+        */
+       hrtimer_force_reprogram(new_base->cpu_base, 1);
+       return 0;
 }
 
 /**
@@ -1183,7 +1331,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
        base = lock_hrtimer_base(timer, &flags);
 
        if (!hrtimer_callback_running(timer))
-               ret = remove_hrtimer(timer, base, false);
+               ret = remove_hrtimer(timer, base, false, false);
 
        unlock_hrtimer_base(timer, &flags);
 
index 01935aa..bc4db9e 100644 (file)
 #include <linux/init.h>
 
 #include "timekeeping.h"
+#include "tick-internal.h"
 
 
-/* Since jiffies uses a simple TICK_NSEC multiplier
- * conversion, the .shift value could be zero. However
- * this would make NTP adjustments impossible as they are
- * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
- * shift both the nominator and denominator the same
- * amount, and give ntp adjustments in units of 1/2^8
- *
- * The value 8 is somewhat carefully chosen, as anything
- * larger can result in overflows. TICK_NSEC grows as HZ
- * shrinks, so values greater than 8 overflow 32bits when
- * HZ=100.
- */
-#if HZ < 34
-#define JIFFIES_SHIFT  6
-#elif HZ < 67
-#define JIFFIES_SHIFT  7
-#else
-#define JIFFIES_SHIFT  8
-#endif
-
 static u64 jiffies_read(struct clocksource *cs)
 {
        return (u64) jiffies;
index 517be7f..ee73686 100644 (file)
@@ -291,6 +291,8 @@ static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
        struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
 
+       lockdep_assert_task_sighand_held(tsk);
+
        /* Check if cputimer isn't running. This is accessed without locking. */
        if (!READ_ONCE(pct->timers_active)) {
                struct task_cputime sum;
@@ -405,6 +407,55 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
        return 0;
 }
 
+static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
+                                             struct task_struct *tsk)
+{
+       int clkidx = CPUCLOCK_WHICH(timer->it_clock);
+
+       if (CPUCLOCK_PERTHREAD(timer->it_clock))
+               return tsk->posix_cputimers.bases + clkidx;
+       else
+               return tsk->signal->posix_cputimers.bases + clkidx;
+}
+
+/*
+ * Force recalculating the base earliest expiration on the next tick.
+ * This will also re-evaluate the need to keep around the process wide
+ * cputime counter and tick dependency and eventually shut these down
+ * if necessary.
+ */
+static void trigger_base_recalc_expires(struct k_itimer *timer,
+                                       struct task_struct *tsk)
+{
+       struct posix_cputimer_base *base = timer_base(timer, tsk);
+
+       base->nextevt = 0;
+}
+
+/*
+ * Dequeue the timer and reset the base if it was its earliest expiration.
+ * It makes sure the next tick recalculates the base next expiration so we
+ * don't keep the costly process wide cputime counter around for a random
+ * amount of time, along with the tick dependency.
+ *
+ * If another timer gets queued between this and the next tick, its
+ * expiration will update the base next event if necessary on the next
+ * tick.
+ */
+static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
+{
+       struct cpu_timer *ctmr = &timer->it.cpu;
+       struct posix_cputimer_base *base;
+
+       if (!cpu_timer_dequeue(ctmr))
+               return;
+
+       base = timer_base(timer, p);
+       if (cpu_timer_getexpires(ctmr) == base->nextevt)
+               trigger_base_recalc_expires(timer, p);
+}
+
+
 /*
  * Clean up a CPU-clock timer that is about to be destroyed.
  * This is called from timer deletion with the timer already locked.
@@ -439,7 +490,7 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
                if (timer->it.cpu.firing)
                        ret = TIMER_RETRY;
                else
-                       cpu_timer_dequeue(ctmr);
+                       disarm_timer(timer, p);
 
                unlock_task_sighand(p, &flags);
        }
@@ -498,15 +549,9 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
  */
 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
 {
-       int clkidx = CPUCLOCK_WHICH(timer->it_clock);
+       struct posix_cputimer_base *base = timer_base(timer, p);
        struct cpu_timer *ctmr = &timer->it.cpu;
        u64 newexp = cpu_timer_getexpires(ctmr);
-       struct posix_cputimer_base *base;
-
-       if (CPUCLOCK_PERTHREAD(timer->it_clock))
-               base = p->posix_cputimers.bases + clkidx;
-       else
-               base = p->signal->posix_cputimers.bases + clkidx;
 
        if (!cpu_timer_enqueue(&base->tqhead, ctmr))
                return;
@@ -703,16 +748,29 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        timer->it_overrun_last = 0;
        timer->it_overrun = -1;
 
-       if (new_expires != 0 && !(val < new_expires)) {
+       if (val >= new_expires) {
+               if (new_expires != 0) {
+                       /*
+                        * The designated time already passed, so we notify
+                        * immediately, even if the thread never runs to
+                        * accumulate more time on this clock.
+                        */
+                       cpu_timer_fire(timer);
+               }
+
                /*
-                * The designated time already passed, so we notify
-                * immediately, even if the thread never runs to
-                * accumulate more time on this clock.
+                * Make sure we don't keep around the process wide cputime
+                * counter or the tick dependency if they are not necessary.
                 */
-               cpu_timer_fire(timer);
-       }
+               sighand = lock_task_sighand(p, &flags);
+               if (!sighand)
+                       goto out;
+
+               if (!cpu_timer_queued(ctmr))
+                       trigger_base_recalc_expires(timer, p);
 
-       ret = 0;
+               unlock_task_sighand(p, &flags);
+       }
  out:
        rcu_read_unlock();
        if (old)
@@ -1346,8 +1404,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
                        }
                }
 
-               if (!*newval)
-                       return;
                *newval += now;
        }
 
index dd5697d..3913222 100644 (file)
@@ -336,7 +336,7 @@ void posixtimer_rearm(struct kernel_siginfo *info)
 int posix_timer_event(struct k_itimer *timr, int si_private)
 {
        enum pid_type type;
-       int ret = -1;
+       int ret;
        /*
         * FIXME: if ->sigq is queued we can race with
         * dequeue_signal()->posixtimer_rearm().
index d663249..4678935 100644 (file)
@@ -470,6 +470,13 @@ void tick_resume_local(void)
                else
                        tick_resume_oneshot();
        }
+
+       /*
+        * Ensure that hrtimers are up to date and the clockevents device
+        * is reprogrammed correctly when high resolution timers are
+        * enabled.
+        */
+       hrtimers_resume_local();
 }
 
 /**
index 6a742a2..649f2b4 100644 (file)
@@ -165,3 +165,35 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
 
 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
 void timer_clear_idle(void);
+
+#define CLOCK_SET_WALL                                                 \
+       (BIT(HRTIMER_BASE_REALTIME) | BIT(HRTIMER_BASE_REALTIME_SOFT) | \
+        BIT(HRTIMER_BASE_TAI) | BIT(HRTIMER_BASE_TAI_SOFT))
+
+#define CLOCK_SET_BOOT                                                 \
+       (BIT(HRTIMER_BASE_BOOTTIME) | BIT(HRTIMER_BASE_BOOTTIME_SOFT))
+
+void clock_was_set(unsigned int bases);
+void clock_was_set_delayed(void);
+
+void hrtimers_resume_local(void);
+
+/* Since jiffies uses a simple TICK_NSEC multiplier
+ * conversion, the .shift value could be zero. However
+ * this would make NTP adjustments impossible as they are
+ * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
+ * shift both the nominator and denominator the same
+ * amount, and give ntp adjustments in units of 1/2^8
+ *
+ * The value 8 is somewhat carefully chosen, as anything
+ * larger can result in overflows. TICK_NSEC grows as HZ
+ * shrinks, so values greater than 8 overflow 32bits when
+ * HZ=100.
+ */
+#if HZ < 34
+#define JIFFIES_SHIFT  6
+#elif HZ < 67
+#define JIFFIES_SHIFT  7
+#else
+#define JIFFIES_SHIFT  8
+#endif
index 8a364aa..b348749 100644 (file)
@@ -1323,8 +1323,8 @@ out:
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
-       /* signal hrtimers about time change */
-       clock_was_set();
+       /* Signal hrtimers about time change */
+       clock_was_set(CLOCK_SET_WALL);
 
        if (!ret)
                audit_tk_injoffset(ts_delta);
@@ -1371,8 +1371,8 @@ error: /* even if we error out, we forwarded the time, so call update */
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
-       /* signal hrtimers about time change */
-       clock_was_set();
+       /* Signal hrtimers about time change */
+       clock_was_set(CLOCK_SET_WALL);
 
        return ret;
 }
@@ -1746,8 +1746,8 @@ void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
-       /* signal hrtimers about time change */
-       clock_was_set();
+       /* Signal hrtimers about time change */
+       clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
 }
 #endif
 
@@ -1810,8 +1810,10 @@ void timekeeping_resume(void)
 
        touch_softlockup_watchdog();
 
+       /* Resume the clockevent device(s) and hrtimers */
        tick_resume();
-       hrtimers_resume();
+       /* Notify timerfd as resume is equivalent to clock_was_set() */
+       timerfd_resume();
 }
 
 int timekeeping_suspend(void)
@@ -2125,7 +2127,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
  * timekeeping_advance - Updates the timekeeper to the current time and
  * current NTP tick length
  */
-static void timekeeping_advance(enum timekeeping_adv_mode mode)
+static bool timekeeping_advance(enum timekeeping_adv_mode mode)
 {
        struct timekeeper *real_tk = &tk_core.timekeeper;
        struct timekeeper *tk = &shadow_timekeeper;
@@ -2196,9 +2198,8 @@ static void timekeeping_advance(enum timekeeping_adv_mode mode)
        write_seqcount_end(&tk_core.seq);
 out:
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
-       if (clock_set)
-               /* Have to call _delayed version, since in irq context*/
-               clock_was_set_delayed();
+
+       return !!clock_set;
 }
 
 /**
@@ -2207,7 +2208,8 @@ out:
  */
 void update_wall_time(void)
 {
-       timekeeping_advance(TK_ADV_TICK);
+       if (timekeeping_advance(TK_ADV_TICK))
+               clock_was_set_delayed();
 }
 
 /**
@@ -2387,8 +2389,9 @@ int do_adjtimex(struct __kernel_timex *txc)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        struct audit_ntp_data ad;
-       unsigned long flags;
+       bool clock_set = false;
        struct timespec64 ts;
+       unsigned long flags;
        s32 orig_tai, tai;
        int ret;
 
@@ -2423,6 +2426,7 @@ int do_adjtimex(struct __kernel_timex *txc)
        if (tai != orig_tai) {
                __timekeeping_set_tai_offset(tk, tai);
                timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
+               clock_set = true;
        }
        tk_update_leap_state(tk);
 
@@ -2433,10 +2437,10 @@ int do_adjtimex(struct __kernel_timex *txc)
 
        /* Update the multiplier immediately if frequency was set directly */
        if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
-               timekeeping_advance(TK_ADV_FREQ);
+               clock_set |= timekeeping_advance(TK_ADV_FREQ);
 
-       if (tai != orig_tai)
-               clock_was_set();
+       if (clock_set)
+               clock_was_set(CLOCK_REALTIME);
 
        ntp_notify_cmos_timer();
 
index 9eb11c2..e3d2c23 100644 (file)
@@ -1265,8 +1265,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
 static void timer_sync_wait_running(struct timer_base *base)
 {
        if (atomic_read(&base->timer_waiters)) {
+               raw_spin_unlock_irq(&base->lock);
                spin_unlock(&base->expiry_lock);
                spin_lock(&base->expiry_lock);
+               raw_spin_lock_irq(&base->lock);
        }
 }
 
@@ -1457,14 +1459,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
                if (timer->flags & TIMER_IRQSAFE) {
                        raw_spin_unlock(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
-                       base->running_timer = NULL;
                        raw_spin_lock(&base->lock);
+                       base->running_timer = NULL;
                } else {
                        raw_spin_unlock_irq(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
+                       raw_spin_lock_irq(&base->lock);
                        base->running_timer = NULL;
                        timer_sync_wait_running(base);
-                       raw_spin_lock_irq(&base->lock);
                }
        }
 }
index 0a315c3..bb8f411 100644 (file)
@@ -521,11 +521,11 @@ static void torture_shuffle_tasks(void)
        struct shuffle_task *stp;
 
        cpumask_setall(shuffle_tmp_mask);
-       get_online_cpus();
+       cpus_read_lock();
 
        /* No point in shuffling if there is only one online CPU (ex: UP) */
        if (num_online_cpus() == 1) {
-               put_online_cpus();
+               cpus_read_unlock();
                return;
        }
 
@@ -541,7 +541,7 @@ static void torture_shuffle_tasks(void)
                set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
        mutex_unlock(&shuffle_task_mutex);
 
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
index d567b17..3ee23f4 100644 (file)
@@ -219,6 +219,11 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
        depends on DYNAMIC_FTRACE_WITH_REGS
        depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 
+config DYNAMIC_FTRACE_WITH_ARGS
+       def_bool y
+       depends on DYNAMIC_FTRACE
+       depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
 config FUNCTION_PROFILER
        bool "Kernel function profiler"
        depends on FUNCTION_TRACER
index b4916ef..fdd1407 100644 (file)
@@ -990,28 +990,29 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_numa_node_id_proto;
        case BPF_FUNC_perf_event_read:
                return &bpf_perf_event_read_proto;
-       case BPF_FUNC_probe_write_user:
-               return bpf_get_probe_write_proto();
        case BPF_FUNC_current_task_under_cgroup:
                return &bpf_current_task_under_cgroup_proto;
        case BPF_FUNC_get_prandom_u32:
                return &bpf_get_prandom_u32_proto;
+       case BPF_FUNC_probe_write_user:
+               return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
+                      NULL : bpf_get_probe_write_proto();
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_str_proto;
 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        case BPF_FUNC_probe_read:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_compat_str_proto;
 #endif
 #ifdef CONFIG_CGROUPS
index 7b180f6..7efbc8a 100644 (file)
@@ -3100,6 +3100,7 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
 
 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
 {
+       bool init_nop = ftrace_need_init_nop();
        struct ftrace_page *pg;
        struct dyn_ftrace *p;
        u64 start, stop;
@@ -3138,8 +3139,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
                         * Do the initial record conversion from mcount jump
                         * to the NOP instructions.
                         */
-                       if (!__is_defined(CC_USING_NOP_MCOUNT) &&
-                           !ftrace_nop_initialize(mod, p))
+                       if (init_nop && !ftrace_nop_initialize(mod, p))
                                break;
 
                        update_cnt++;
index c59dd35..a1adb29 100644 (file)
@@ -2897,14 +2897,26 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 
 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 {
+       enum event_trigger_type tt = ETT_NONE;
+       struct trace_event_file *file = fbuffer->trace_file;
+
+       if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
+                       fbuffer->entry, &tt))
+               goto discard;
+
        if (static_key_false(&tracepoint_printk_key.key))
                output_printk(fbuffer);
 
        if (static_branch_unlikely(&trace_event_exports_enabled))
                ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
-       event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
-                                   fbuffer->event, fbuffer->entry,
-                                   fbuffer->trace_ctx, fbuffer->regs);
+
+       trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
+                       fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
+
+discard:
+       if (tt)
+               event_triggers_post_call(file, tt);
+
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
@@ -9135,8 +9147,10 @@ static int trace_array_create_dir(struct trace_array *tr)
                return -EINVAL;
 
        ret = event_trace_add_tracer(tr->dir, tr);
-       if (ret)
+       if (ret) {
                tracefs_remove(tr->dir);
+               return ret;
+       }
 
        init_tracer_tracefs(tr, tr->dir);
        __update_tracer_options(tr);
index a180abf..4a0e693 100644 (file)
@@ -1389,38 +1389,6 @@ event_trigger_unlock_commit(struct trace_event_file *file,
                event_triggers_post_call(file, tt);
 }
 
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer associated with the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @trace_ctx: The tracing context flags.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
-                                struct trace_buffer *buffer,
-                                struct ring_buffer_event *event,
-                                void *entry, unsigned int trace_ctx,
-                                struct pt_regs *regs)
-{
-       enum event_trigger_type tt = ETT_NONE;
-
-       if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-               trace_buffer_unlock_commit_regs(file->tr, buffer, event,
-                                               trace_ctx, regs);
-
-       if (tt)
-               event_triggers_post_call(file, tt);
-}
-
 #define FILTER_PRED_INVALID    ((unsigned short)-1)
 #define FILTER_PRED_IS_RIGHT   (1 << 15)
 #define FILTER_PRED_FOLD       (1 << 15)
index 34325f4..a48aa2a 100644 (file)
@@ -65,7 +65,8 @@
        C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),               \
        C(EMPTY_SORT_FIELD,     "Empty sort field"),                    \
        C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"),      \
-       C(INVALID_SORT_FIELD,   "Sort field must be a key or a val"),
+       C(INVALID_SORT_FIELD,   "Sort field must be a key or a val"),   \
+       C(INVALID_STR_OPERAND,  "String type can not be an operand in expression"),
 
 #undef C
 #define C(a, b)                HIST_ERR_##a
@@ -2156,6 +2157,13 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
                ret = PTR_ERR(operand1);
                goto free;
        }
+       if (operand1->flags & HIST_FIELD_FL_STRING) {
+               /* String type can not be the operand of unary operator. */
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+               destroy_hist_field(operand1, 0);
+               ret = -EINVAL;
+               goto free;
+       }
 
        expr->flags |= operand1->flags &
                (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
@@ -2257,6 +2265,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                operand1 = NULL;
                goto free;
        }
+       if (operand1->flags & HIST_FIELD_FL_STRING) {
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
+               ret = -EINVAL;
+               goto free;
+       }
 
        /* rest of string could be another expression e.g. b+c in a+b+c */
        operand_flags = 0;
@@ -2266,6 +2279,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                operand2 = NULL;
                goto free;
        }
+       if (operand2->flags & HIST_FIELD_FL_STRING) {
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+               ret = -EINVAL;
+               goto free;
+       }
 
        ret = check_expr_operands(file->tr, operand1, operand2);
        if (ret)
@@ -2287,6 +2305,10 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
 
        expr->operands[0] = operand1;
        expr->operands[1] = operand2;
+
+       /* The operand sizes should be the same, so just pick one */
+       expr->size = operand1->size;
+
        expr->operator = field_op;
        expr->name = expr_str(expr, 0);
        expr->type = kstrdup(operand1->type, GFP_KERNEL);
@@ -3408,6 +3430,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
                        event = data->match_data.event;
                }
 
+               if (!event)
+                       goto free;
                /*
                 * At this point, we're looking at a field on another
                 * event.  Because we can't modify a hist trigger on
index a6c0cda..14f46aa 100644 (file)
@@ -327,7 +327,7 @@ static void move_to_next_cpu(void)
 
        get_online_cpus();
        cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
-       next_cpu = cpumask_next(smp_processor_id(), current_mask);
+       next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
        put_online_cpus();
 
        if (next_cpu >= nr_cpu_ids)
index a7e3c24..b61eefe 100644 (file)
@@ -253,10 +253,40 @@ static struct osnoise_data {
  */
 static bool osnoise_busy;
 
+#ifdef CONFIG_PREEMPT_RT
 /*
  * Print the osnoise header info.
  */
 static void print_osnoise_headers(struct seq_file *s)
+{
+       if (osnoise_data.tainted)
+               seq_puts(s, "# osnoise is tainted!\n");
+
+       seq_puts(s, "#                                _-------=> irqs-off\n");
+       seq_puts(s, "#                               / _------=> need-resched\n");
+       seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
+       seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
+       seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
+       seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
+       seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
+
+       seq_puts(s, "#                              |||||| /          ");
+       seq_puts(s, "                                     MAX\n");
+
+       seq_puts(s, "#                              ||||| /                         ");
+       seq_puts(s, "                    SINGLE      Interference counters:\n");
+
+       seq_puts(s, "#                              |||||||               RUNTIME   ");
+       seq_puts(s, "   NOISE  %% OF CPU  NOISE    +-----------------------------+\n");
+
+       seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    IN US    ");
+       seq_puts(s, "   IN US  AVAILABLE  IN US     HW    NMI    IRQ   SIRQ THREAD\n");
+
+       seq_puts(s, "#              | |         |   |||||||      |           |      ");
+       seq_puts(s, "       |    |            |      |      |      |      |      |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
+static void print_osnoise_headers(struct seq_file *s)
 {
        if (osnoise_data.tainted)
                seq_puts(s, "# osnoise is tainted!\n");
@@ -279,6 +309,7 @@ static void print_osnoise_headers(struct seq_file *s)
        seq_puts(s, "#              | |         |   ||||      |           |      ");
        seq_puts(s, "       |    |            |      |      |      |      |      |\n");
 }
+#endif /* CONFIG_PREEMPT_RT */
 
 /*
  * osnoise_taint - report an osnoise error.
@@ -323,6 +354,24 @@ static void trace_osnoise_sample(struct osnoise_sample *sample)
 /*
  * Print the timerlat header info.
  */
+#ifdef CONFIG_PREEMPT_RT
+static void print_timerlat_headers(struct seq_file *s)
+{
+       seq_puts(s, "#                                _-------=> irqs-off\n");
+       seq_puts(s, "#                               / _------=> need-resched\n");
+       seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
+       seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
+       seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
+       seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
+       seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
+       seq_puts(s, "#                              |||||| /\n");
+       seq_puts(s, "#                              |||||||             ACTIVATION\n");
+       seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    ID     ");
+       seq_puts(s, "       CONTEXT                LATENCY\n");
+       seq_puts(s, "#              | |         |   |||||||      |         |      ");
+       seq_puts(s, "            |                       |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
 static void print_timerlat_headers(struct seq_file *s)
 {
        seq_puts(s, "#                                _-----=> irqs-off\n");
@@ -336,6 +385,7 @@ static void print_timerlat_headers(struct seq_file *s)
        seq_puts(s, "#              | |         |   ||||      |         |      ");
        seq_puts(s, "            |                       |\n");
 }
+#endif /* CONFIG_PREEMPT_RT */
 
 /*
  * Record an timerlat_sample into the tracer buffer.
@@ -1025,9 +1075,13 @@ diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *
 /*
  * osnoise_stop_tracing - Stop tracing and the tracer.
  */
-static void osnoise_stop_tracing(void)
+static __always_inline void osnoise_stop_tracing(void)
 {
        struct trace_array *tr = osnoise_trace;
+
+       trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
+                       "stop tracing hit on cpu %d\n", smp_processor_id());
+
        tracer_tracing_off(tr);
 }
 
index fc32821..efd14c7 100644 (file)
 #include <linux/sched/task.h>
 #include <linux/static_key.h>
 
+enum tp_func_state {
+       TP_FUNC_0,
+       TP_FUNC_1,
+       TP_FUNC_2,
+       TP_FUNC_N,
+};
+
 extern tracepoint_ptr_t __start___tracepoints_ptrs[];
 extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
 
 DEFINE_SRCU(tracepoint_srcu);
 EXPORT_SYMBOL_GPL(tracepoint_srcu);
 
+enum tp_transition_sync {
+       TP_TRANSITION_SYNC_1_0_1,
+       TP_TRANSITION_SYNC_N_2_1,
+
+       _NR_TP_TRANSITION_SYNC,
+};
+
+struct tp_transition_snapshot {
+       unsigned long rcu;
+       unsigned long srcu;
+       bool ongoing;
+};
+
+/* Protected by tracepoints_mutex */
+static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
+
+static void tp_rcu_get_state(enum tp_transition_sync sync)
+{
+       struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+       /* Keep the latest get_state snapshot. */
+       snapshot->rcu = get_state_synchronize_rcu();
+       snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
+       snapshot->ongoing = true;
+}
+
+static void tp_rcu_cond_sync(enum tp_transition_sync sync)
+{
+       struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+       if (!snapshot->ongoing)
+               return;
+       cond_synchronize_rcu(snapshot->rcu);
+       if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
+               synchronize_srcu(&tracepoint_srcu);
+       snapshot->ongoing = false;
+}
+
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
 
@@ -246,26 +291,29 @@ static void *func_remove(struct tracepoint_func **funcs,
        return old;
 }
 
-static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
+/*
+ * Count the number of functions (enum tp_func_state) in a tp_funcs array.
+ */
+static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
+{
+       if (!tp_funcs)
+               return TP_FUNC_0;
+       if (!tp_funcs[1].func)
+               return TP_FUNC_1;
+       if (!tp_funcs[2].func)
+               return TP_FUNC_2;
+       return TP_FUNC_N;       /* 3 or more */
+}
+
+static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
 {
        void *func = tp->iterator;
 
        /* Synthetic events do not have static call sites */
        if (!tp->static_call_key)
                return;
-
-       if (!tp_funcs[1].func) {
+       if (nr_func_state(tp_funcs) == TP_FUNC_1)
                func = tp_funcs[0].func;
-               /*
-                * If going from the iterator back to a single caller,
-                * we need to synchronize with __DO_TRACE to make sure
-                * that the data passed to the callback is the one that
-                * belongs to that callback.
-                */
-               if (sync)
-                       tracepoint_synchronize_unregister();
-       }
-
        __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
 }
 
@@ -299,9 +347,41 @@ static int tracepoint_add_func(struct tracepoint *tp,
         * a pointer to it.  This array is referenced by __DO_TRACE from
         * include/linux/tracepoint.h using rcu_dereference_sched().
         */
-       tracepoint_update_call(tp, tp_funcs, false);
-       rcu_assign_pointer(tp->funcs, tp_funcs);
-       static_key_enable(&tp->key);
+       switch (nr_func_state(tp_funcs)) {
+       case TP_FUNC_1:         /* 0->1 */
+               /*
+                * Make sure new static func never uses old data after a
+                * 1->0->1 transition sequence.
+                */
+               tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
+               /* Set static call to first function */
+               tracepoint_update_call(tp, tp_funcs);
+               /* Both iterator and static call handle NULL tp->funcs */
+               rcu_assign_pointer(tp->funcs, tp_funcs);
+               static_key_enable(&tp->key);
+               break;
+       case TP_FUNC_2:         /* 1->2 */
+               /* Set iterator static call */
+               tracepoint_update_call(tp, tp_funcs);
+               /*
+                * Iterator callback installed before updating tp->funcs.
+                * Requires ordering between RCU assign/dereference and
+                * static call update/call.
+                */
+               fallthrough;
+       case TP_FUNC_N:         /* N->N+1 (N>1) */
+               rcu_assign_pointer(tp->funcs, tp_funcs);
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>1) transition sequence.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
 
        release_probes(old);
        return 0;
@@ -328,17 +408,52 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                /* Failed allocating new tp_funcs, replaced func with stub */
                return 0;
 
-       if (!tp_funcs) {
+       switch (nr_func_state(tp_funcs)) {
+       case TP_FUNC_0:         /* 1->0 */
                /* Removed last function */
                if (tp->unregfunc && static_key_enabled(&tp->key))
                        tp->unregfunc();
 
                static_key_disable(&tp->key);
+               /* Set iterator static call */
+               tracepoint_update_call(tp, tp_funcs);
+               /* Both iterator and static call handle NULL tp->funcs */
+               rcu_assign_pointer(tp->funcs, NULL);
+               /*
+                * Make sure new static func never uses old data after a
+                * 1->0->1 transition sequence.
+                */
+               tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
+               break;
+       case TP_FUNC_1:         /* 2->1 */
                rcu_assign_pointer(tp->funcs, tp_funcs);
-       } else {
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>2) transition sequence. If the first
+                * element's data has changed, then force the synchronization
+                * to prevent current readers that have loaded the old data
+                * from calling the new function.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
+               /* Set static call to first function */
+               tracepoint_update_call(tp, tp_funcs);
+               break;
+       case TP_FUNC_2:         /* N->N-1 (N>2) */
+               fallthrough;
+       case TP_FUNC_N:
                rcu_assign_pointer(tp->funcs, tp_funcs);
-               tracepoint_update_call(tp, tp_funcs,
-                                      tp_funcs[0].func != old[0].func);
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>2) transition sequence.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
        }
        release_probes(old);
        return 0;
index 87799e2..bb51849 100644 (file)
@@ -58,14 +58,17 @@ static struct ctl_table_root set_root = {
        .permissions = set_permissions,
 };
 
-#define UCOUNT_ENTRY(name)                             \
-       {                                               \
-               .procname       = name,                 \
-               .maxlen         = sizeof(int),          \
-               .mode           = 0644,                 \
-               .proc_handler   = proc_dointvec_minmax, \
-               .extra1         = SYSCTL_ZERO,          \
-               .extra2         = SYSCTL_INT_MAX,       \
+static long ue_zero = 0;
+static long ue_int_max = INT_MAX;
+
+#define UCOUNT_ENTRY(name)                                     \
+       {                                                       \
+               .procname       = name,                         \
+               .maxlen         = sizeof(long),                 \
+               .mode           = 0644,                         \
+               .proc_handler   = proc_doulongvec_minmax,       \
+               .extra1         = &ue_zero,                     \
+               .extra2         = &ue_int_max,                  \
        }
 static struct ctl_table user_table[] = {
        UCOUNT_ENTRY("max_user_namespaces"),
@@ -160,6 +163,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
 {
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
+       long overflow;
 
        spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
@@ -184,8 +188,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
                        return new;
                }
        }
+       overflow = atomic_add_negative(1, &ucounts->count);
        spin_unlock_irq(&ucounts_lock);
-       ucounts = get_ucounts(ucounts);
+       if (overflow) {
+               put_ucounts(ucounts);
+               return NULL;
+       }
        return ucounts;
 }
 
@@ -193,8 +201,7 @@ void put_ucounts(struct ucounts *ucounts)
 {
        unsigned long flags;
 
-       if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock_irqsave(&ucounts_lock, flags);
+       if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
                hlist_del_init(&ucounts->node);
                spin_unlock_irqrestore(&ucounts_lock, flags);
                kfree(ucounts);
index 5ddd575..12b805d 100644 (file)
@@ -1235,7 +1235,7 @@ config PROVE_LOCKING
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select LOCKDEP
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_RWSEMS
        select DEBUG_WW_MUTEX_SLOWPATH
@@ -1299,7 +1299,7 @@ config LOCK_STAT
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select LOCKDEP
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_LOCK_ALLOC
        default n
@@ -1335,7 +1335,7 @@ config DEBUG_SPINLOCK
 
 config DEBUG_MUTEXES
        bool "Mutex debugging: basic checks"
-       depends on DEBUG_KERNEL
+       depends on DEBUG_KERNEL && !PREEMPT_RT
        help
         This feature allows mutex semantics violations to be detected and
         reported.
@@ -1345,7 +1345,8 @@ config DEBUG_WW_MUTEX_SLOWPATH
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select DEBUG_LOCK_ALLOC
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
+       select DEBUG_RT_MUTEXES if PREEMPT_RT
        help
         This feature enables slowpath testing for w/w mutex users by
         injecting additional -EDEADLK wound/backoff cases. Together with
@@ -1368,7 +1369,7 @@ config DEBUG_LOCK_ALLOC
        bool "Lock debugging: detect incorrect freeing of live locks"
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select LOCKDEP
        help
@@ -1679,33 +1680,6 @@ config DEBUG_WQ_FORCE_RR_CPU
          feature by default.  When enabled, memory and cache locality will
          be impacted.
 
-config DEBUG_BLOCK_EXT_DEVT
-       bool "Force extended block device numbers and spread them"
-       depends on DEBUG_KERNEL
-       depends on BLOCK
-       default n
-       help
-         BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON
-         SOME DISTRIBUTIONS.  DO NOT ENABLE THIS UNLESS YOU KNOW WHAT
-         YOU ARE DOING.  Distros, please enable this and fix whatever
-         is broken.
-
-         Conventionally, block device numbers are allocated from
-         predetermined contiguous area.  However, extended block area
-         may introduce non-contiguous block device numbers.  This
-         option forces most block device numbers to be allocated from
-         the extended space and spreads them to discover kernel or
-         userland code paths which assume predetermined contiguous
-         device number allocation.
-
-         Note that turning on this debug option shuffles all the
-         device numbers for all IDE and SCSI devices including libata
-         ones, so root partition specified using device number
-         directly (via rdev or root=MAJ:MIN) won't work anymore.
-         Textual device names (root=/dev/sdXn) will continue to work.
-
-         Say N if you are unsure.
-
 config CPU_HOTPLUG_STATE_CONTROL
        bool "Enable CPU hotplug state control"
        depends on DEBUG_KERNEL
@@ -1971,6 +1945,13 @@ config FAIL_MMC_REQUEST
          and to test how the mmc host driver handles retries from
          the block device.
 
+config FAIL_SUNRPC
+       bool "Fault-injection capability for SunRPC"
+       depends on FAULT_INJECTION_DEBUG_FS && SUNRPC_DEBUG
+       help
+         Provide fault-injection capability for SunRPC and
+         its consumers.
+
 config FAULT_INJECTION_STACKTRACE_FILTER
        bool "stacktrace filter for fault-injection capabilities"
        depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
index 14c032d..545ccbd 100644 (file)
@@ -128,3 +128,6 @@ config CRYPTO_LIB_CHACHA20POLY1305
 
 config CRYPTO_LIB_SHA256
        tristate
+
+config CRYPTO_LIB_SM4
+       tristate
index 3a43562..73205ed 100644 (file)
@@ -38,6 +38,9 @@ libpoly1305-y                                 += poly1305.o
 obj-$(CONFIG_CRYPTO_LIB_SHA256)                        += libsha256.o
 libsha256-y                                    := sha256.o
 
+obj-$(CONFIG_CRYPTO_LIB_SM4)                   += libsm4.o
+libsm4-y                                       := sm4.o
+
 ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
 libblake2s-y                                   += blake2s-selftest.o
 libchacha20poly1305-y                          += chacha20poly1305-selftest.o
index c64ac8b..4055aa5 100644 (file)
@@ -73,7 +73,7 @@ void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
 }
 EXPORT_SYMBOL(blake2s256_hmac);
 
-static int __init mod_init(void)
+static int __init blake2s_mod_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!blake2s_selftest()))
@@ -81,12 +81,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit blake2s_mod_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(blake2s_mod_init);
+module_exit(blake2s_mod_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("BLAKE2s hash function");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index c2fcdb9..fa6a944 100644 (file)
@@ -354,7 +354,7 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
 }
 EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
 
-static int __init mod_init(void)
+static int __init chacha20poly1305_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!chacha20poly1305_selftest()))
@@ -362,12 +362,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit chacha20poly1305_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(chacha20poly1305_init);
+module_exit(chacha20poly1305_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index fb29739..064b352 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 
-static int __init mod_init(void)
+static int __init curve25519_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!curve25519_selftest()))
@@ -21,12 +21,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit curve25519_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(curve25519_init);
+module_exit(curve25519_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Curve25519 scalar multiplication");
diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c
new file mode 100644 (file)
index 0000000..633b59f
--- /dev/null
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4, as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <crypto/sm4.h>
+
+static const u32 fk[4] = {
+       0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
+};
+
+static const u32 __cacheline_aligned ck[32] = {
+       0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
+       0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
+       0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+       0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
+       0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
+       0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+       0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
+       0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
+};
+
+static const u8 __cacheline_aligned sbox[256] = {
+       0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
+       0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
+       0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
+       0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
+       0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
+       0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
+       0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
+       0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
+       0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
+       0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
+       0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
+       0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
+       0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
+       0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
+       0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
+       0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
+       0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
+       0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
+       0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
+       0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
+       0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
+       0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
+       0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
+       0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
+       0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
+       0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
+       0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
+       0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
+       0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
+       0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
+       0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
+       0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
+};
+
+static inline u32 sm4_t_non_lin_sub(u32 x)
+{
+       u32 out;
+
+       out  = (u32)sbox[x & 0xff];
+       out |= (u32)sbox[(x >> 8) & 0xff] << 8;
+       out |= (u32)sbox[(x >> 16) & 0xff] << 16;
+       out |= (u32)sbox[(x >> 24) & 0xff] << 24;
+
+       return out;
+}
+
+static inline u32 sm4_key_lin_sub(u32 x)
+{
+       return x ^ rol32(x, 13) ^ rol32(x, 23);
+}
+
+static inline u32 sm4_enc_lin_sub(u32 x)
+{
+       return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
+}
+
+static inline u32 sm4_key_sub(u32 x)
+{
+       return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_enc_sub(u32 x)
+{
+       return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk)
+{
+       return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk);
+}
+
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx:       The location where the computed key will be stored.
+ * @in_key:    The supplied key.
+ * @key_len:   The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
+                         unsigned int key_len)
+{
+       u32 rk[4];
+       const u32 *key = (u32 *)in_key;
+       int i;
+
+       if (key_len != SM4_KEY_SIZE)
+               return -EINVAL;
+
+       rk[0] = get_unaligned_be32(&key[0]) ^ fk[0];
+       rk[1] = get_unaligned_be32(&key[1]) ^ fk[1];
+       rk[2] = get_unaligned_be32(&key[2]) ^ fk[2];
+       rk[3] = get_unaligned_be32(&key[3]) ^ fk[3];
+
+       for (i = 0; i < 32; i += 4) {
+               rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]);
+               rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]);
+               rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]);
+               rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]);
+
+               ctx->rkey_enc[i + 0] = rk[0];
+               ctx->rkey_enc[i + 1] = rk[1];
+               ctx->rkey_enc[i + 2] = rk[2];
+               ctx->rkey_enc[i + 3] = rk[3];
+               ctx->rkey_dec[31 - 0 - i] = rk[0];
+               ctx->rkey_dec[31 - 1 - i] = rk[1];
+               ctx->rkey_dec[31 - 2 - i] = rk[2];
+               ctx->rkey_dec[31 - 3 - i] = rk[3];
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sm4_expandkey);
+
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk:                The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out:       Buffer to store output data
+ * @in:        Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in)
+{
+       u32 x[4], i;
+
+       x[0] = get_unaligned_be32(in + 0 * 4);
+       x[1] = get_unaligned_be32(in + 1 * 4);
+       x[2] = get_unaligned_be32(in + 2 * 4);
+       x[3] = get_unaligned_be32(in + 3 * 4);
+
+       for (i = 0; i < 32; i += 4) {
+               x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]);
+               x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]);
+               x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]);
+               x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]);
+       }
+
+       put_unaligned_be32(x[3 - 0], out + 0 * 4);
+       put_unaligned_be32(x[3 - 1], out + 1 * 4);
+       put_unaligned_be32(x[3 - 2], out + 2 * 4);
+       put_unaligned_be32(x[3 - 3], out + 3 * 4);
+}
+EXPORT_SYMBOL_GPL(sm4_crypt_block);
+
+MODULE_DESCRIPTION("Generic SM4 library");
+MODULE_LICENSE("GPL v2");
index 9e14ae0..6946f8e 100644 (file)
@@ -557,7 +557,12 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
        struct debug_obj *obj;
        unsigned long flags;
 
-       fill_pool();
+       /*
+        * On RT enabled kernels the pool refill must happen in preemptible
+        * context:
+        */
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+               fill_pool();
 
        db = get_bucket((unsigned long) addr);
 
index c0d67c5..60be9e2 100644 (file)
@@ -19,7 +19,7 @@
  */
 int devmem_is_allowed(unsigned long pfn)
 {
-       if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+       if (iomem_is_exclusive(PFN_PHYS(pfn)))
                return 0;
        if (!page_is_ram(pfn))
                return 1;
index ced5c15..a1a7dfa 100644 (file)
@@ -241,5 +241,36 @@ int linear_range_get_selector_high(const struct linear_range *r,
 }
 EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
 
+/**
+ * linear_range_get_selector_within - return linear range selector for value
+ * @r:         pointer to linear range where selector is looked from
+ * @val:       value for which the selector is searched
+ * @selector:  address where found selector value is updated
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or lower than given
+ * value. But return maximum selector if given value is higher than
+ * maximum value.
+ */
+void linear_range_get_selector_within(const struct linear_range *r,
+                                     unsigned int val, unsigned int *selector)
+{
+       if (r->min > val) {
+               *selector = r->min_sel;
+               return;
+       }
+
+       if (linear_range_get_max_value(r) < val) {
+               *selector = r->max_sel;
+               return;
+       }
+
+       if (r->step == 0)
+               *selector = r->min_sel;
+       else
+               *selector = (val - r->min) / r->step + r->min_sel;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_within);
+
 MODULE_DESCRIPTION("linear-ranges helper");
 MODULE_LICENSE("GPL");
index 9a75ca3..bc81419 100644 (file)
@@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
                return 0;       /* no need to do it */
 
        if (a->d) {
-               p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+               p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
                if (!p)
                        return -ENOMEM;
                memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
index 8b7d623..59149bf 100644 (file)
@@ -3,10 +3,12 @@
 #include <linux/spinlock.h>
 #include <linux/once.h>
 #include <linux/random.h>
+#include <linux/module.h>
 
 struct once_work {
        struct work_struct work;
        struct static_key_true *key;
+       struct module *module;
 };
 
 static void once_deferred(struct work_struct *w)
@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
        work = container_of(w, struct once_work, work);
        BUG_ON(!static_key_enabled(work->key));
        static_branch_disable(work->key);
+       module_put(work->module);
        kfree(work);
 }
 
-static void once_disable_jump(struct static_key_true *key)
+static void once_disable_jump(struct static_key_true *key, struct module *mod)
 {
        struct once_work *w;
 
@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
 
        INIT_WORK(&w->work, once_deferred);
        w->key = key;
+       w->module = mod;
+       __module_get(mod);
        schedule_work(&w->work);
 }
 
@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
 EXPORT_SYMBOL(__do_once_start);
 
 void __do_once_done(bool *done, struct static_key_true *once_key,
-                   unsigned long *flags)
+                   unsigned long *flags, struct module *mod)
        __releases(once_lock)
 {
        *done = true;
        spin_unlock_irqrestore(&once_lock, *flags);
-       once_disable_jump(once_key);
+       once_disable_jump(once_key, mod);
 }
 EXPORT_SYMBOL(__do_once_done);
index 77bd0b1..b2de45a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 
+#include <asm/unaligned.h>
 #include <asm/byteorder.h>
 #include <asm/word-at-a-time.h>
 #include <asm/page.h>
@@ -935,6 +936,21 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
        const unsigned char *su1, *su2;
        int res = 0;
 
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (count >= sizeof(unsigned long)) {
+               const unsigned long *u1 = cs;
+               const unsigned long *u2 = ct;
+               do {
+                       if (get_unaligned(u1) != get_unaligned(u2))
+                               break;
+                       u1++;
+                       u2++;
+                       count -= sizeof(unsigned long);
+               } while (count >= sizeof(unsigned long));
+               cs = u1;
+               ct = u2;
+       }
+#endif
        for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
                if ((res = *su1 - *su2) != 0)
                        break;
index 864554e..906b598 100644 (file)
@@ -485,13 +485,13 @@ static int __init test_lockup_init(void)
                       offsetof(spinlock_t, lock.wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_rwlock_ptr,
-                      offsetof(rwlock_t, rtmutex.wait_lock.magic),
+                      offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_mutex_ptr,
-                      offsetof(struct mutex, lock.wait_lock.magic),
+                      offsetof(struct mutex, rtmutex.wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_rwsem_ptr,
-                      offsetof(struct rw_semaphore, rtmutex.wait_lock.magic),
+                      offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic),
                       SPINLOCK_MAGIC))
                return -EINVAL;
 #else
@@ -502,7 +502,7 @@ static int __init test_lockup_init(void)
                       offsetof(rwlock_t, magic),
                       RWLOCK_MAGIC) ||
            test_magic(lock_mutex_ptr,
-                      offsetof(struct mutex, wait_lock.rlock.magic),
+                      offsetof(struct mutex, wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_rwsem_ptr,
                       offsetof(struct rw_semaphore, wait_lock.magic),
index f5561ea..cd06dca 100644 (file)
@@ -807,6 +807,7 @@ struct backing_dev_info *bdi_alloc(int node_id)
        bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
        bdi->ra_pages = VM_READAHEAD_PAGES;
        bdi->io_pages = VM_READAHEAD_PAGES;
+       timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0);
        return bdi;
 }
 EXPORT_SYMBOL(bdi_alloc);
@@ -928,6 +929,8 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
 
 void bdi_unregister(struct backing_dev_info *bdi)
 {
+       del_timer_sync(&bdi->laptop_mode_wb_timer);
+
        /* make sure nobody finds us on the bdi_list anymore */
        bdi_remove_from_list(bdi);
        wb_shutdown(&bdi->wb);
index d1458ec..920e8dc 100644 (file)
@@ -76,8 +76,9 @@
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->i_pages lock
  *
- *  ->i_mutex
- *    ->i_mmap_rwsem           (truncate->unmap_mapping_range)
+ *  ->i_rwsem
+ *    ->invalidate_lock                (acquired by fs in truncate path)
+ *      ->i_mmap_rwsem         (truncate->unmap_mapping_range)
  *
  *  ->mmap_lock
  *    ->i_mmap_rwsem
  *        ->i_pages lock       (arch-dependent flush_dcache_mmap_lock)
  *
  *  ->mmap_lock
- *    ->lock_page              (access_process_vm)
+ *    ->invalidate_lock                (filemap_fault)
+ *      ->lock_page            (filemap_fault, access_process_vm)
  *
- *  ->i_mutex                  (generic_perform_write)
+ *  ->i_rwsem                  (generic_perform_write)
  *    ->mmap_lock              (fault_in_pages_readable->do_page_fault)
  *
  *  bdi->wb.list_lock
@@ -377,6 +379,32 @@ static int filemap_check_and_keep_errors(struct address_space *mapping)
        return 0;
 }
 
+/**
+ * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
+ * @mapping:   address space structure to write
+ * @wbc:       the writeback_control controlling the writeout
+ *
+ * Call writepages on the mapping using the provided wbc to control the
+ * writeout.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+int filemap_fdatawrite_wbc(struct address_space *mapping,
+                          struct writeback_control *wbc)
+{
+       int ret;
+
+       if (!mapping_can_writeback(mapping) ||
+           !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+               return 0;
+
+       wbc_attach_fdatawrite_inode(wbc, mapping->host);
+       ret = do_writepages(mapping, wbc);
+       wbc_detach_inode(wbc);
+       return ret;
+}
+EXPORT_SYMBOL(filemap_fdatawrite_wbc);
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -397,7 +425,6 @@ static int filemap_check_and_keep_errors(struct address_space *mapping)
 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
                                loff_t end, int sync_mode)
 {
-       int ret;
        struct writeback_control wbc = {
                .sync_mode = sync_mode,
                .nr_to_write = LONG_MAX,
@@ -405,14 +432,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
                .range_end = end,
        };
 
-       if (!mapping_can_writeback(mapping) ||
-           !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
-               return 0;
-
-       wbc_attach_fdatawrite_inode(&wbc, mapping->host);
-       ret = do_writepages(mapping, &wbc);
-       wbc_detach_inode(&wbc);
-       return ret;
+       return filemap_fdatawrite_wbc(mapping, &wbc);
 }
 
 static inline int __filemap_fdatawrite(struct address_space *mapping,
@@ -1007,6 +1027,44 @@ struct page *__page_cache_alloc(gfp_t gfp)
 EXPORT_SYMBOL(__page_cache_alloc);
 #endif
 
+/*
+ * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
+ *
+ * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
+ *
+ * @mapping1: the first mapping to lock
+ * @mapping2: the second mapping to lock
+ */
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+                                struct address_space *mapping2)
+{
+       if (mapping1 > mapping2)
+               swap(mapping1, mapping2);
+       if (mapping1)
+               down_write(&mapping1->invalidate_lock);
+       if (mapping2 && mapping1 != mapping2)
+               down_write_nested(&mapping2->invalidate_lock, 1);
+}
+EXPORT_SYMBOL(filemap_invalidate_lock_two);
+
+/*
+ * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
+ *
+ * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
+ *
+ * @mapping1: the first mapping to unlock
+ * @mapping2: the second mapping to unlock
+ */
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+                                  struct address_space *mapping2)
+{
+       if (mapping1)
+               up_write(&mapping1->invalidate_lock);
+       if (mapping2 && mapping1 != mapping2)
+               up_write(&mapping2->invalidate_lock);
+}
+EXPORT_SYMBOL(filemap_invalidate_unlock_two);
+
 /*
  * In order to wait for pages to become available there must be
  * waitqueues associated with pages. By using a hash table of
@@ -2368,20 +2426,30 @@ static int filemap_update_page(struct kiocb *iocb,
 {
        int error;
 
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (!filemap_invalidate_trylock_shared(mapping))
+                       return -EAGAIN;
+       } else {
+               filemap_invalidate_lock_shared(mapping);
+       }
+
        if (!trylock_page(page)) {
+               error = -EAGAIN;
                if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
-                       return -EAGAIN;
+                       goto unlock_mapping;
                if (!(iocb->ki_flags & IOCB_WAITQ)) {
+                       filemap_invalidate_unlock_shared(mapping);
                        put_and_wait_on_page_locked(page, TASK_KILLABLE);
                        return AOP_TRUNCATED_PAGE;
                }
                error = __lock_page_async(page, iocb->ki_waitq);
                if (error)
-                       return error;
+                       goto unlock_mapping;
        }
 
+       error = AOP_TRUNCATED_PAGE;
        if (!page->mapping)
-               goto truncated;
+               goto unlock;
 
        error = 0;
        if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page))
@@ -2392,15 +2460,13 @@ static int filemap_update_page(struct kiocb *iocb,
                goto unlock;
 
        error = filemap_read_page(iocb->ki_filp, mapping, page);
-       if (error == AOP_TRUNCATED_PAGE)
-               put_page(page);
-       return error;
-truncated:
-       unlock_page(page);
-       put_page(page);
-       return AOP_TRUNCATED_PAGE;
+       goto unlock_mapping;
 unlock:
        unlock_page(page);
+unlock_mapping:
+       filemap_invalidate_unlock_shared(mapping);
+       if (error == AOP_TRUNCATED_PAGE)
+               put_page(page);
        return error;
 }
 
@@ -2415,6 +2481,19 @@ static int filemap_create_page(struct file *file,
        if (!page)
                return -ENOMEM;
 
+       /*
+        * Protect against truncate / hole punch. Grabbing invalidate_lock here
+        * assures we cannot instantiate and bring uptodate new pagecache pages
+        * after evicting page cache during truncate and before actually
+        * freeing blocks.  Note that we could release invalidate_lock after
+        * inserting the page into page cache as the locked page would then be
+        * enough to synchronize with hole punching. But there are code paths
+        * such as filemap_update_page() filling in partially uptodate pages or
+        * ->readpages() that need to hold invalidate_lock while mapping blocks
+        * for IO so let's hold the lock here as well to keep locking rules
+        * simple.
+        */
+       filemap_invalidate_lock_shared(mapping);
        error = add_to_page_cache_lru(page, mapping, index,
                        mapping_gfp_constraint(mapping, GFP_KERNEL));
        if (error == -EEXIST)
@@ -2426,9 +2505,11 @@ static int filemap_create_page(struct file *file,
        if (error)
                goto error;
 
+       filemap_invalidate_unlock_shared(mapping);
        pagevec_add(pvec, page);
        return 0;
 error:
+       filemap_invalidate_unlock_shared(mapping);
        put_page(page);
        return error;
 }
@@ -2967,6 +3048,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
        pgoff_t max_off;
        struct page *page;
        vm_fault_t ret = 0;
+       bool mapping_locked = false;
 
        max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
        if (unlikely(offset >= max_off))
@@ -2976,25 +3058,39 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
         * Do we have something in the page cache already?
         */
        page = find_get_page(mapping, offset);
-       if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
+       if (likely(page)) {
                /*
-                * We found the page, so try async readahead before
-                * waiting for the lock.
+                * We found the page, so try async readahead before waiting for
+                * the lock.
                 */
-               fpin = do_async_mmap_readahead(vmf, page);
-       } else if (!page) {
+               if (!(vmf->flags & FAULT_FLAG_TRIED))
+                       fpin = do_async_mmap_readahead(vmf, page);
+               if (unlikely(!PageUptodate(page))) {
+                       filemap_invalidate_lock_shared(mapping);
+                       mapping_locked = true;
+               }
+       } else {
                /* No page in the page cache at all */
                count_vm_event(PGMAJFAULT);
                count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
                fpin = do_sync_mmap_readahead(vmf);
 retry_find:
+               /*
+                * See comment in filemap_create_page() why we need
+                * invalidate_lock
+                */
+               if (!mapping_locked) {
+                       filemap_invalidate_lock_shared(mapping);
+                       mapping_locked = true;
+               }
                page = pagecache_get_page(mapping, offset,
                                          FGP_CREAT|FGP_FOR_MMAP,
                                          vmf->gfp_mask);
                if (!page) {
                        if (fpin)
                                goto out_retry;
+                       filemap_invalidate_unlock_shared(mapping);
                        return VM_FAULT_OOM;
                }
        }
@@ -3014,8 +3110,20 @@ retry_find:
         * We have a locked page in the page cache, now we need to check
         * that it's up-to-date. If not, it is going to be due to an error.
         */
-       if (unlikely(!PageUptodate(page)))
+       if (unlikely(!PageUptodate(page))) {
+               /*
+                * The page was in cache and uptodate and now it is not.
+                * Strange but possible since we didn't hold the page lock all
+                * the time. Let's drop everything get the invalidate lock and
+                * try again.
+                */
+               if (!mapping_locked) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto retry_find;
+               }
                goto page_not_uptodate;
+       }
 
        /*
         * We've made it this far and we had to drop our mmap_lock, now is the
@@ -3026,6 +3134,8 @@ retry_find:
                unlock_page(page);
                goto out_retry;
        }
+       if (mapping_locked)
+               filemap_invalidate_unlock_shared(mapping);
 
        /*
         * Found the page and have a reference on it.
@@ -3056,6 +3166,7 @@ page_not_uptodate:
 
        if (!error || error == AOP_TRUNCATED_PAGE)
                goto retry_find;
+       filemap_invalidate_unlock_shared(mapping);
 
        return VM_FAULT_SIGBUS;
 
@@ -3067,6 +3178,8 @@ out_retry:
         */
        if (page)
                put_page(page);
+       if (mapping_locked)
+               filemap_invalidate_unlock_shared(mapping);
        if (fpin)
                fput(fpin);
        return ret | VM_FAULT_RETRY;
@@ -3437,6 +3550,8 @@ out:
  *
  * If the page does not get brought uptodate, return -EIO.
  *
+ * The function expects mapping->invalidate_lock to be already held.
+ *
  * Return: up to date page on success, ERR_PTR() on failure.
  */
 struct page *read_cache_page(struct address_space *mapping,
@@ -3460,6 +3575,8 @@ EXPORT_SYMBOL(read_cache_page);
  *
  * If the page does not get brought uptodate, return -EIO.
  *
+ * The function expects mapping->invalidate_lock to be already held.
+ *
  * Return: up to date page on success, ERR_PTR() on failure.
  */
 struct page *read_cache_page_gfp(struct address_space *mapping,
@@ -3704,12 +3821,12 @@ EXPORT_SYMBOL(generic_perform_write);
  * modification times and calls proper subroutines depending on whether we
  * do direct IO or a standard buffered write.
  *
- * It expects i_mutex to be grabbed unless we work on a block device or similar
+ * It expects i_rwsem to be grabbed unless we work on a block device or similar
  * object which does not need locking at all.
  *
  * This function does *not* take care of syncing data in case of O_SYNC write.
  * A caller has to handle it. This is mainly due to the fact that we want to
- * avoid syncing under i_mutex.
+ * avoid syncing under i_rwsem.
  *
  * Return:
  * * number of bytes written, even for truncated writes
@@ -3797,7 +3914,7 @@ EXPORT_SYMBOL(__generic_file_write_iter);
  *
  * This is a wrapper around __generic_file_write_iter() to be used by most
  * filesystems. It takes care of syncing the file in case of O_SYNC file
- * and acquires i_mutex as needed.
+ * and acquires i_rwsem as needed.
  * Return:
  * * negative error code if no data has been written at all of
  *   vfs_fsync_range() failed for a synchronous write
index 42b8b1f..b947179 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1558,9 +1558,12 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
                gup_flags |= FOLL_WRITE;
 
        /*
-        * See check_vma_flags(): Will return -EFAULT on incompatible mappings
-        * or with insufficient permissions.
+        * We want to report -EINVAL instead of -EFAULT for any permission
+        * problems or incompatible mappings.
         */
+       if (check_vma_flags(vma, gup_flags))
+               return -EINVAL;
+
        return __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
 }
index dfc940d..8ea35ba 100644 (file)
@@ -2476,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                if (!rc) {
                        /*
                         * This indicates there is an entry in the reserve map
-                        * added by alloc_huge_page.  We know it was added
+                        * not added by alloc_huge_page.  We know it was added
                         * before the alloc_huge_page call, otherwise
                         * HPageRestoreReserve would be set on the page.
                         * Remove the entry so that a subsequent allocation
@@ -4660,7 +4660,9 @@ retry_avoidcopy:
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(&range);
 out_release_all:
-       restore_reserve_on_error(h, vma, haddr, new_page);
+       /* No restore in case of successful pagetable update (Break COW) */
+       if (new_page != old_page)
+               restore_reserve_on_error(h, vma, haddr, new_page);
        put_page(new_page);
 out_release_old:
        put_page(old_page);
@@ -4776,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        pte_t new_pte;
        spinlock_t *ptl;
        unsigned long haddr = address & huge_page_mask(h);
-       bool new_page = false;
+       bool new_page, new_pagecache_page = false;
 
        /*
         * Currently, we are forced to kill the process in the event the
@@ -4799,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
                goto out;
 
 retry:
+       new_page = false;
        page = find_lock_page(mapping, idx);
        if (!page) {
                /* Check for page in userfault range */
@@ -4842,6 +4845,7 @@ retry:
                                        goto retry;
                                goto out;
                        }
+                       new_pagecache_page = true;
                } else {
                        lock_page(page);
                        if (unlikely(anon_vma_prepare(vma))) {
@@ -4926,7 +4930,9 @@ backout:
        spin_unlock(ptl);
 backout_unlocked:
        unlock_page(page);
-       restore_reserve_on_error(h, vma, haddr, page);
+       /* restore reserve for newly allocated pages not in page cache */
+       if (new_page && !new_pagecache_page)
+               restore_reserve_on_error(h, vma, haddr, page);
        put_page(page);
        goto out;
 }
@@ -5135,6 +5141,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        int ret = -ENOMEM;
        struct page *page;
        int writable;
+       bool new_pagecache_page = false;
 
        if (is_continue) {
                ret = -EFAULT;
@@ -5228,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                ret = huge_add_to_page_cache(page, mapping, idx);
                if (ret)
                        goto out_release_nounlock;
+               new_pagecache_page = true;
        }
 
        ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
@@ -5291,7 +5299,8 @@ out_release_unlock:
        if (vm_shared || is_continue)
                unlock_page(page);
 out_release_nounlock:
-       restore_reserve_on_error(h, dst_vma, dst_addr, page);
+       if (!new_pagecache_page)
+               restore_reserve_on_error(h, dst_vma, dst_addr, page);
        put_page(page);
        goto out;
 }
index 942cbc1..eb6307c 100644 (file)
 #include <linux/tracepoint.h>
 #include <trace/events/printk.h>
 
+#include <asm/kfence.h>
+
 #include "kfence.h"
 
+/* May be overridden by <asm/kfence.h>. */
+#ifndef arch_kfence_test_address
+#define arch_kfence_test_address(addr) (addr)
+#endif
+
 /* Report as observed from console. */
 static struct {
        spinlock_t lock;
@@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
 /* Check observed report matches information in @r. */
 static bool report_matches(const struct expect_report *r)
 {
+       unsigned long addr = (unsigned long)r->addr;
        bool ret = false;
        unsigned long flags;
        typeof(observed.lines) expect;
@@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
        switch (r->type) {
        case KFENCE_ERROR_OOB:
                cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_UAF:
                cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_CORRUPTION:
                cur += scnprintf(cur, end - cur, "Corrupted memory at");
                break;
        case KFENCE_ERROR_INVALID:
                cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_INVALID_FREE:
                cur += scnprintf(cur, end - cur, "Invalid free of");
                break;
        }
 
-       cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
+       cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
 
        spin_lock_irqsave(&observed.lock, flags);
        if (!report_available())
index 228a2fb..73d46d1 100644 (file)
@@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq,
        warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
        kasan_disable_current();
        warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
-                            HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+                            HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
        kasan_enable_current();
 }
 
@@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object)
 
        kasan_disable_current();
        kcsan_disable_current();
-       object->checksum = crc32(0, (void *)object->pointer, object->size);
+       object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
        kasan_enable_current();
        kcsan_enable_current();
 
@@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end,
                        break;
 
                kasan_disable_current();
-               pointer = *ptr;
+               pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
                kasan_enable_current();
 
                untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
index 6d3d348..56324a3 100644 (file)
@@ -862,10 +862,12 @@ static long madvise_populate(struct vm_area_struct *vma,
                        switch (pages) {
                        case -EINTR:
                                return -EINTR;
-                       case -EFAULT: /* Incompatible mappings / permissions. */
+                       case -EINVAL: /* Incompatible mappings / permissions. */
                                return -EINVAL;
                        case -EHWPOISON:
                                return -EHWPOISON;
+                       case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
+                               return -EFAULT;
                        default:
                                pr_warn_once("%s: unhandled return value: %ld\n",
                                             __func__, pages);
@@ -910,7 +912,7 @@ static long madvise_remove(struct vm_area_struct *vma,
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
        /*
-        * Filesystem's fallocate may need to take i_mutex.  We need to
+        * Filesystem's fallocate may need to take i_rwsem.  We need to
         * explicitly grab a reference because the vma (and hence the
         * vma's reference to the file) can go away as soon as we drop
         * mmap_lock.
index eb8e87c..702a81d 100644 (file)
@@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
                stock->cached_pgdat = pgdat;
        } else if (stock->cached_pgdat != pgdat) {
                /* Flush the existing cached vmstat data */
+               struct pglist_data *oldpg = stock->cached_pgdat;
+
                if (stock->nr_slab_reclaimable_b) {
-                       mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
+                       mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
                                          stock->nr_slab_reclaimable_b);
                        stock->nr_slab_reclaimable_b = 0;
                }
                if (stock->nr_slab_unreclaimable_b) {
-                       mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
+                       mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
                                          stock->nr_slab_unreclaimable_b);
                        stock->nr_slab_unreclaimable_b = 0;
                }
index eefd823..e1f87cf 100644 (file)
@@ -866,7 +866,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
        /*
         * Truncation is a bit tricky. Enable it per file system for now.
         *
-        * Open: to take i_mutex or not for this? Right now we don't.
+        * Open: to take i_rwsem or not for this? Right now we don't.
         */
        ret = truncate_error_page(p, pfn, mapping);
 out:
@@ -1146,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page)
         * unexpected races caused by taking a page refcount.
         */
        if (!HWPoisonHandlable(head))
-               return 0;
+               return -EBUSY;
 
        if (PageTransHuge(head)) {
                /*
@@ -1199,9 +1199,15 @@ try_again:
                        }
                        goto out;
                } else if (ret == -EBUSY) {
-                       /* We raced with freeing huge page to buddy, retry. */
-                       if (pass++ < 3)
+                       /*
+                        * We raced with (possibly temporary) unhandlable
+                        * page, retry.
+                        */
+                       if (pass++ < 3) {
+                               shake_page(p, 1);
                                goto try_again;
+                       }
+                       ret = -EIO;
                        goto out;
                }
        }
index 8cb75b2..86c3af7 100644 (file)
@@ -1731,6 +1731,7 @@ failed_removal_isolated:
        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal_pcplists_disabled:
+       lru_cache_enable();
        zone_pcp_enable(zone);
 failed_removal:
        pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
index ca54d36..181a113 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1517,12 +1517,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                        if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       /*
-                        * Make sure there are no mandatory locks on the file.
-                        */
-                       if (locks_verify_locked(file))
-                               return -EAGAIN;
-
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        if (!(file->f_mode & FMODE_WRITE))
                                vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
index 3a93d40..9d0ad98 100644 (file)
@@ -826,9 +826,6 @@ static int validate_mmap_request(struct file *file,
                            (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (locks_verify_locked(file))
-                               return -EAGAIN;
-
                        if (!(capabilities & NOMMU_MAP_DIRECT))
                                return -ENODEV;
 
index 9f63548..c12f67c 100644 (file)
@@ -2010,7 +2010,6 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
        return ret;
 }
 
-#ifdef CONFIG_BLOCK
 void laptop_mode_timer_fn(struct timer_list *t)
 {
        struct backing_dev_info *backing_dev_info =
@@ -2045,7 +2044,6 @@ void laptop_sync_completion(void)
 
        rcu_read_unlock();
 }
-#endif
 
 /*
  * If ratelimit_pages is too high then we can get into dirty-data overload
index 856b175..eeb3a9c 100644 (file)
@@ -3453,19 +3453,10 @@ void free_unref_page_list(struct list_head *list)
                 * comment in free_unref_page.
                 */
                migratetype = get_pcppage_migratetype(page);
-               if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
-                       if (unlikely(is_migrate_isolate(migratetype))) {
-                               list_del(&page->lru);
-                               free_one_page(page_zone(page), page, pfn, 0,
-                                                       migratetype, FPI_NONE);
-                               continue;
-                       }
-
-                       /*
-                        * Non-isolated types over MIGRATE_PCPTYPES get added
-                        * to the MIGRATE_MOVABLE pcp list.
-                        */
-                       set_pcppage_migratetype(page, MIGRATE_MOVABLE);
+               if (unlikely(is_migrate_isolate(migratetype))) {
+                       list_del(&page->lru);
+                       free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
+                       continue;
                }
 
                set_page_private(page, pfn);
@@ -3475,7 +3466,15 @@ void free_unref_page_list(struct list_head *list)
        list_for_each_entry_safe(page, next, list, lru) {
                pfn = page_private(page);
                set_page_private(page, 0);
+
+               /*
+                * Non-isolated types over MIGRATE_PCPTYPES get added
+                * to the MIGRATE_MOVABLE pcp list.
+                */
                migratetype = get_pcppage_migratetype(page);
+               if (unlikely(migratetype >= MIGRATE_PCPTYPES))
+                       migratetype = MIGRATE_MOVABLE;
+
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn, migratetype, 0);
 
index d589f14..41b75d7 100644 (file)
@@ -192,6 +192,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         */
        unsigned int nofs = memalloc_nofs_save();
 
+       filemap_invalidate_lock_shared(mapping);
        /*
         * Preallocate as many pages as we will need.
         */
@@ -236,6 +237,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         * will then handle the error.
         */
        read_pages(ractl, &page_pool, false);
+       filemap_invalidate_unlock_shared(mapping);
        memalloc_nofs_restore(nofs);
 }
 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
index b9eb5c1..2d29a57 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
 /*
  * Lock ordering in mm:
  *
- * inode->i_mutex      (while writing or truncating, not reading or faulting)
+ * inode->i_rwsem      (while writing or truncating, not reading or faulting)
  *   mm->mmap_lock
- *     page->flags PG_locked (lock_page)   * (see huegtlbfs below)
- *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
- *         mapping->i_mmap_rwsem
- *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
- *           anon_vma->rwsem
- *             mm->page_table_lock or pte_lock
- *               swap_lock (in swap_duplicate, swap_info_get)
- *                 mmlist_lock (in mmput, drain_mmlist and others)
- *                 mapping->private_lock (in __set_page_dirty_buffers)
- *                   lock_page_memcg move_lock (in __set_page_dirty_buffers)
- *                     i_pages lock (widely used)
- *                       lruvec->lru_lock (in lock_page_lruvec_irq)
- *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
- *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
- *                   sb_lock (within inode_lock in fs/fs-writeback.c)
- *                   i_pages lock (widely used, in set_page_dirty,
- *                             in arch-dependent flush_dcache_mmap_lock,
- *                             within bdi.wb->list_lock in __sync_single_inode)
+ *     mapping->invalidate_lock (in filemap_fault)
+ *       page->flags PG_locked (lock_page)   * (see hugetlbfs below)
+ *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
+ *           mapping->i_mmap_rwsem
+ *             hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
+ *             anon_vma->rwsem
+ *               mm->page_table_lock or pte_lock
+ *                 swap_lock (in swap_duplicate, swap_info_get)
+ *                   mmlist_lock (in mmput, drain_mmlist and others)
+ *                   mapping->private_lock (in __set_page_dirty_buffers)
+ *                     lock_page_memcg move_lock (in __set_page_dirty_buffers)
+ *                       i_pages lock (widely used)
+ *                         lruvec->lru_lock (in lock_page_lruvec_irq)
+ *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
+ *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
+ *                     sb_lock (within inode_lock in fs/fs-writeback.c)
+ *                     i_pages lock (widely used, in set_page_dirty,
+ *                               in arch-dependent flush_dcache_mmap_lock,
+ *                               within bdi.wb->list_lock in __sync_single_inode)
  *
- * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
+ * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
  *   ->tasklist_lock
  *     pte map lock
  *
index 70d9ce2..3107ace 100644 (file)
@@ -96,7 +96,7 @@ static struct vfsmount *shm_mnt;
 
 /*
  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
- * inode->i_private (with i_mutex making sure that it has only one user at
+ * inode->i_private (with i_rwsem making sure that it has only one user at
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
@@ -774,7 +774,7 @@ static int shmem_free_swap(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given offsets are swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
@@ -806,7 +806,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given vma is swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
@@ -1069,7 +1069,7 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
                loff_t oldsize = inode->i_size;
                loff_t newsize = attr->ia_size;
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
                    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
                        return -EPERM;
@@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
-       struct swap_info_struct *si;
-       struct page *page = NULL;
+       struct page *page;
        swp_entry_t swap;
        int error;
 
@@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        swap = radix_to_swp_entry(*pagep);
        *pagep = NULL;
 
-       /* Prevent swapoff from happening to us. */
-       si = get_swap_device(swap);
-       if (!si) {
-               error = EINVAL;
-               goto failed;
-       }
        /* Look it up and read it in.. */
        page = lookup_swap_cache(swap, NULL, 0);
        if (!page) {
@@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        swap_free(swap);
 
        *pagep = page;
-       if (si)
-               put_swap_device(si);
        return 0;
 failed:
        if (!shmem_confirm_swap(mapping, index, swap))
@@ -1784,9 +1775,6 @@ unlock:
                put_page(page);
        }
 
-       if (si)
-               put_swap_device(si);
-
        return error;
 }
 
@@ -2071,7 +2059,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
        /*
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
-        * locks writers out with its hold on i_mutex.  So refrain from
+        * locks writers out with its hold on i_rwsem.  So refrain from
         * faulting pages into the hole while it's being punched.  Although
         * shmem_undo_range() does remove the additions, it may be unable to
         * keep up, as each new page needs its own unmap_mapping_range() call,
@@ -2082,7 +2070,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
         * we just need to make racing faults a rare case.
         *
         * The implementation below would be much simpler if we just used a
-        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * standard mutex or completion: but we cannot take i_rwsem in fault,
         * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
@@ -2482,7 +2470,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
        struct shmem_inode_info *info = SHMEM_I(inode);
        pgoff_t index = pos >> PAGE_SHIFT;
 
-       /* i_mutex is held by caller */
+       /* i_rwsem is held by caller */
        if (unlikely(info->seals & (F_SEAL_GROW |
                                   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
@@ -2582,7 +2570,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 
                /*
                 * We must evaluate after, since reads (unlike writes)
-                * are called without i_mutex protection against truncate
+                * are called without i_rwsem protection against truncate
                 */
                nr = PAGE_SIZE;
                i_size = i_size_read(inode);
@@ -2652,7 +2640,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
                return -ENXIO;
 
        inode_lock(inode);
-       /* We're holding i_mutex so we can access i_size directly */
+       /* We're holding i_rwsem so we can access i_size directly */
        offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
        if (offset >= 0)
                offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
@@ -2681,7 +2669,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
                DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
                        error = -EPERM;
                        goto out;
index af984e4..f77d8cd 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -576,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr,
                          unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
-                       16, 1, addr, length, 1);
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
+                       16, 1, kasan_reset_tag((void *)addr), length, 1);
        metadata_access_disable();
 }
 
@@ -1400,12 +1400,13 @@ check_slabs:
 static int __init setup_slub_debug(char *str)
 {
        slab_flags_t flags;
+       slab_flags_t global_flags;
        char *saved_str;
        char *slab_list;
        bool global_slub_debug_changed = false;
        bool slab_list_specified = false;
 
-       slub_debug = DEBUG_DEFAULT_FLAGS;
+       global_flags = DEBUG_DEFAULT_FLAGS;
        if (*str++ != '=' || !*str)
                /*
                 * No options specified. Switch on full debugging.
@@ -1417,7 +1418,7 @@ static int __init setup_slub_debug(char *str)
                str = parse_slub_debug_flags(str, &flags, &slab_list, true);
 
                if (!slab_list) {
-                       slub_debug = flags;
+                       global_flags = flags;
                        global_slub_debug_changed = true;
                } else {
                        slab_list_specified = true;
@@ -1426,16 +1427,18 @@ static int __init setup_slub_debug(char *str)
 
        /*
         * For backwards compatibility, a single list of flags with list of
-        * slabs means debugging is only enabled for those slabs, so the global
-        * slub_debug should be 0. We can extended that to multiple lists as
+        * slabs means debugging is only changed for those slabs, so the global
+        * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
+        * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
         * long as there is no option specifying flags without a slab list.
         */
        if (slab_list_specified) {
                if (!global_slub_debug_changed)
-                       slub_debug = 0;
+                       global_flags = slub_debug;
                slub_debug_string = saved_str;
        }
 out:
+       slub_debug = global_flags;
        if (slub_debug != 0 || slub_debug_string)
                static_branch_enable(&slub_debug_enabled);
        else
@@ -3236,12 +3239,12 @@ struct detached_freelist {
        struct kmem_cache *s;
 };
 
-static inline void free_nonslab_page(struct page *page)
+static inline void free_nonslab_page(struct page *page, void *object)
 {
        unsigned int order = compound_order(page);
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
-       kfree_hook(page_address(page));
+       kfree_hook(object);
        mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
        __free_pages(page, order);
 }
@@ -3282,7 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        if (!s) {
                /* Handle kalloc'ed objects */
                if (unlikely(!PageSlab(page))) {
-                       free_nonslab_page(page);
+                       free_nonslab_page(page, object);
                        p[size] = NULL; /* mark object processed */
                        return size;
                }
@@ -4258,7 +4261,7 @@ void kfree(const void *x)
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
-               free_nonslab_page(page);
+               free_nonslab_page(page, object);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
index a66f3e0..16f706c 100644 (file)
@@ -70,9 +70,9 @@ void disable_swap_slots_cache_lock(void)
        swap_slot_cache_enabled = false;
        if (swap_slot_cache_initialized) {
                /* serialize with cpu hotplug operations */
-               get_online_cpus();
+               cpus_read_lock();
                __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
-               put_online_cpus();
+               cpus_read_unlock();
        }
 }
 
index c56aa9a..bc7cee6 100644 (file)
@@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
        if (!mask)
                goto skip;
 
-       /* Test swap type to make sure the dereference is safe */
-       if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
-               struct inode *inode = si->swap_file->f_mapping->host;
-               if (inode_read_congested(inode))
-                       goto skip;
-       }
-
        do_poll = false;
        /* Read a page_cluster sized and aligned cluster around offset. */
        start_offset = offset & ~mask;
index 234ddd8..44ad5e5 100644 (file)
@@ -412,7 +412,8 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
  * @mapping: mapping to truncate
  * @lstart: offset from which to truncate
  *
- * Called under (and serialised by) inode->i_mutex.
+ * Called under (and serialised by) inode->i_rwsem and
+ * mapping->invalidate_lock.
  *
  * Note: When this function returns, there can be a page in the process of
  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
@@ -429,7 +430,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
  * truncate_inode_pages_final - truncate *all* pages before inode dies
  * @mapping: mapping to truncate
  *
- * Called under (and serialized by) inode->i_mutex.
+ * Called under (and serialized by) inode->i_rwsem.
  *
  * Filesystems have to use this in the .evict_inode path to inform the
  * VM that this is the final truncate and the inode is going away.
@@ -748,7 +749,7 @@ EXPORT_SYMBOL(truncate_pagecache);
  * setattr function when ATTR_SIZE is passed in.
  *
  * Must be called with a lock serializing truncates and writes (generally
- * i_mutex but e.g. xfs uses a different lock) and before all filesystem
+ * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
  * specific block truncation has been performed.
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
@@ -777,7 +778,7 @@ EXPORT_SYMBOL(truncate_setsize);
  *
  * The function must be called after i_size is updated so that page fault
  * coming after we unlock the page will already see the new i_size.
- * The function must be called while we still hold i_mutex - this not only
+ * The function must be called while we still hold i_rwsem - this not only
  * makes sure i_size is stable but also that userspace cannot observe new
  * i_size value before we are prepared to store mmap writes at new inode size.
  */
index 4620df6..eeae2f6 100644 (file)
@@ -100,9 +100,12 @@ struct scan_control {
        unsigned int may_swap:1;
 
        /*
-        * Cgroups are not reclaimed below their configured memory.low,
-        * unless we threaten to OOM. If any cgroups are skipped due to
-        * memory.low and nothing was reclaimed, go back for memory.low.
+        * Cgroup memory below memory.low is protected as long as we
+        * don't threaten to OOM. If any cgroup is reclaimed at
+        * reduced force or passed over entirely due to its memory.low
+        * setting (memcg_low_skipped), and nothing is reclaimed as a
+        * result, then go back for one more cycle that reclaims the protected
+        * memory (memcg_low_reclaim) to avert OOM.
         */
        unsigned int memcg_low_reclaim:1;
        unsigned int memcg_low_skipped:1;
@@ -2537,15 +2540,14 @@ out:
        for_each_evictable_lru(lru) {
                int file = is_file_lru(lru);
                unsigned long lruvec_size;
+               unsigned long low, min;
                unsigned long scan;
-               unsigned long protection;
 
                lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
-               protection = mem_cgroup_protection(sc->target_mem_cgroup,
-                                                  memcg,
-                                                  sc->memcg_low_reclaim);
+               mem_cgroup_protection(sc->target_mem_cgroup, memcg,
+                                     &min, &low);
 
-               if (protection) {
+               if (min || low) {
                        /*
                         * Scale a cgroup's reclaim pressure by proportioning
                         * its current usage to its memory.low or memory.min
@@ -2576,6 +2578,15 @@ out:
                         * hard protection.
                         */
                        unsigned long cgroup_size = mem_cgroup_size(memcg);
+                       unsigned long protection;
+
+                       /* memory.low scaling, make sure we retry before OOM */
+                       if (!sc->memcg_low_reclaim && low > min) {
+                               protection = low;
+                               sc->memcg_low_skipped = 1;
+                       } else {
+                               protection = min;
+                       }
 
                        /* Avoid TOCTOU with earlier protection check */
                        cgroup_size = max(cgroup_size, protection);
@@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
                .may_swap = 1,
                .reclaim_idx = gfp_zone(gfp_mask),
        };
+       unsigned long pflags;
 
        trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
                                           sc.gfp_mask);
 
        cond_resched();
+       psi_memstall_enter(&pflags);
        fs_reclaim_acquire(sc.gfp_mask);
        /*
         * We need to be able to allocate from the reserves for RECLAIM_UNMAP
@@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
        current->flags &= ~PF_SWAPWRITE;
        memalloc_noreclaim_restore(noreclaim_flag);
        fs_reclaim_release(sc.gfp_mask);
+       psi_memstall_leave(&pflags);
 
        trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
 
index b0534e0..a7ed56a 100644 (file)
@@ -129,9 +129,9 @@ static void sum_vm_events(unsigned long *ret)
 */
 void all_vm_events(unsigned long *ret)
 {
-       get_online_cpus();
+       cpus_read_lock();
        sum_vm_events(ret);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(all_vm_events);
 
@@ -1948,7 +1948,7 @@ static void vmstat_shepherd(struct work_struct *w)
 {
        int cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
        /* Check processors whose vmstat worker threads have been disabled */
        for_each_online_cpu(cpu) {
                struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
@@ -1958,7 +1958,7 @@ static void vmstat_shepherd(struct work_struct *w)
 
                cond_resched();
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
@@ -2037,9 +2037,9 @@ void __init init_mm_internals(void)
        if (ret < 0)
                pr_err("vmstat: failed to register 'online' hotplug state\n");
 
-       get_online_cpus();
+       cpus_read_lock();
        init_cpu_node_state();
-       put_online_cpus();
+       cpus_read_unlock();
 
        start_shepherd_timer();
 #endif
index 2560ed2..e1a545c 100644 (file)
@@ -3996,14 +3996,10 @@ EXPORT_SYMBOL(hci_register_dev);
 /* Unregister HCI device */
 void hci_unregister_dev(struct hci_dev *hdev)
 {
-       int id;
-
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
        hci_dev_set_flag(hdev, HCI_UNREGISTER);
 
-       id = hdev->id;
-
        write_lock(&hci_dev_list_lock);
        list_del(&hdev->list);
        write_unlock(&hci_dev_list_lock);
@@ -4038,7 +4034,14 @@ void hci_unregister_dev(struct hci_dev *hdev)
        }
 
        device_del(&hdev->dev);
+       /* Actual cleanup is deferred until hci_cleanup_dev(). */
+       hci_dev_put(hdev);
+}
+EXPORT_SYMBOL(hci_unregister_dev);
 
+/* Cleanup HCI device */
+void hci_cleanup_dev(struct hci_dev *hdev)
+{
        debugfs_remove_recursive(hdev->debugfs);
        kfree_const(hdev->hw_info);
        kfree_const(hdev->fw_info);
@@ -4063,11 +4066,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
        hci_blocked_keys_clear(hdev);
        hci_dev_unlock(hdev);
 
-       hci_dev_put(hdev);
-
-       ida_simple_remove(&hci_index_ida, id);
+       ida_simple_remove(&hci_index_ida, hdev->id);
 }
-EXPORT_SYMBOL(hci_unregister_dev);
 
 /* Suspend HCI device */
 int hci_suspend_dev(struct hci_dev *hdev)
index b04a5a0..f1128c2 100644 (file)
@@ -59,6 +59,17 @@ struct hci_pinfo {
        char              comm[TASK_COMM_LEN];
 };
 
+static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
+{
+       struct hci_dev *hdev = hci_pi(sk)->hdev;
+
+       if (!hdev)
+               return ERR_PTR(-EBADFD);
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+               return ERR_PTR(-EPIPE);
+       return hdev;
+}
+
 void hci_sock_set_flag(struct sock *sk, int nr)
 {
        set_bit(nr, &hci_pi(sk)->flags);
@@ -759,19 +770,13 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
        if (event == HCI_DEV_UNREG) {
                struct sock *sk;
 
-               /* Detach sockets from device */
+               /* Wake up sockets using this dead device */
                read_lock(&hci_sk_list.lock);
                sk_for_each(sk, &hci_sk_list.head) {
-                       lock_sock(sk);
                        if (hci_pi(sk)->hdev == hdev) {
-                               hci_pi(sk)->hdev = NULL;
                                sk->sk_err = EPIPE;
-                               sk->sk_state = BT_OPEN;
                                sk->sk_state_change(sk);
-
-                               hci_dev_put(hdev);
                        }
-                       release_sock(sk);
                }
                read_unlock(&hci_sk_list.lock);
        }
@@ -930,10 +935,10 @@ static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
                                unsigned long arg)
 {
-       struct hci_dev *hdev = hci_pi(sk)->hdev;
+       struct hci_dev *hdev = hci_hdev_from_sock(sk);
 
-       if (!hdev)
-               return -EBADFD;
+       if (IS_ERR(hdev))
+               return PTR_ERR(hdev);
 
        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
                return -EBUSY;
@@ -1103,6 +1108,18 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 
        lock_sock(sk);
 
+       /* Allow detaching from dead device and attaching to alive device, if
+        * the caller wants to re-bind (instead of close) this socket in
+        * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
+        */
+       hdev = hci_pi(sk)->hdev;
+       if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+               hci_pi(sk)->hdev = NULL;
+               sk->sk_state = BT_OPEN;
+               hci_dev_put(hdev);
+       }
+       hdev = NULL;
+
        if (sk->sk_state == BT_BOUND) {
                err = -EALREADY;
                goto done;
@@ -1379,9 +1396,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        lock_sock(sk);
 
-       hdev = hci_pi(sk)->hdev;
-       if (!hdev) {
-               err = -EBADFD;
+       hdev = hci_hdev_from_sock(sk);
+       if (IS_ERR(hdev)) {
+               err = PTR_ERR(hdev);
                goto done;
        }
 
@@ -1743,9 +1760,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                goto done;
        }
 
-       hdev = hci_pi(sk)->hdev;
-       if (!hdev) {
-               err = -EBADFD;
+       hdev = hci_hdev_from_sock(sk);
+       if (IS_ERR(hdev)) {
+               err = PTR_ERR(hdev);
                goto done;
        }
 
index 9874844..b69d88b 100644 (file)
@@ -83,6 +83,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
 static void bt_host_release(struct device *dev)
 {
        struct hci_dev *hdev = to_hci_dev(dev);
+
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+               hci_cleanup_dev(hdev);
        kfree(hdev);
        module_put(THIS_MODULE);
 }
index 1cc75c8..caa16bf 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/vmalloc.h>
 #include <linux/etherdevice.h>
 #include <linux/filter.h>
+#include <linux/rcupdate_trace.h>
 #include <linux/sched/signal.h>
 #include <net/bpf_sk_storage.h>
 #include <net/sock.h>
@@ -951,7 +952,10 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
                        goto out;
                }
        }
+
+       rcu_read_lock_trace();
        retval = bpf_prog_run_pin_on_cpu(prog, ctx);
+       rcu_read_unlock_trace();
 
        if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
                err = -EFAULT;
index a16191d..5dee309 100644 (file)
@@ -1019,7 +1019,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 
 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                        struct net_bridge_port *p, const unsigned char *addr,
-                       u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[])
+                       u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
+                       struct netlink_ext_ack *extack)
 {
        int err = 0;
 
@@ -1038,6 +1039,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                rcu_read_unlock();
                local_bh_enable();
        } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
+               if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "FDB entry towards bridge must be permanent");
+                       return -EINVAL;
+               }
                err = br_fdb_external_learn_add(br, p, addr, vid, true);
        } else {
                spin_lock_bh(&br->hash_lock);
@@ -1110,9 +1116,11 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                }
 
                /* VID was specified, so use it. */
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
+                                  extack);
        } else {
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
+                                  extack);
                if (err || !vg || !vg->num_vlans)
                        goto out;
 
@@ -1124,7 +1132,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                        if (!br_vlan_should_use(v))
                                continue;
                        err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
-                                          nfea_tb);
+                                          nfea_tb, extack);
                        if (err)
                                goto out;
                }
@@ -1281,6 +1289,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 
                if (swdev_notify)
                        flags |= BIT(BR_FDB_ADDED_BY_USER);
+
+               if (!p)
+                       flags |= BIT(BR_FDB_LOCAL);
+
                fdb = fdb_create(br, p, addr, vid, flags);
                if (!fdb) {
                        err = -ENOMEM;
@@ -1307,6 +1319,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                if (swdev_notify)
                        set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
 
+               if (!p)
+                       set_bit(BR_FDB_LOCAL, &fdb->flags);
+
                if (modified)
                        fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
        }
index 6e4a323..14cd6ef 100644 (file)
@@ -616,6 +616,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
 
        err = dev_set_allmulti(dev, 1);
        if (err) {
+               br_multicast_del_port(p);
                kfree(p);       /* kobject not yet init'd, manually free */
                goto err1;
        }
@@ -729,6 +730,7 @@ err4:
 err3:
        sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
+       br_multicast_del_port(p);
        kobject_put(&p->kobj);
        dev_set_allmulti(dev, -1);
 err1:
index 8d033a7..fdbed31 100644 (file)
@@ -88,6 +88,12 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
 
                        skb = ip_fraglist_next(&iter);
                }
+
+               if (!err)
+                       return 0;
+
+               kfree_skb_list(iter.frag);
+
                return err;
        }
 slow_path:
index 75431ca..1a45584 100644 (file)
@@ -158,7 +158,7 @@ static void linkwatch_do_dev(struct net_device *dev)
        clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
 
        rfc2863_policy(dev);
-       if (dev->flags & IFF_UP && netif_device_present(dev)) {
+       if (dev->flags & IFF_UP) {
                if (netif_carrier_ok(dev))
                        dev_activate(dev);
                else
@@ -204,7 +204,8 @@ static void __linkwatch_run_queue(int urgent_only)
                dev = list_first_entry(&wrk, struct net_device, link_watch_list);
                list_del_init(&dev->link_watch_list);
 
-               if (urgent_only && !linkwatch_urgent_event(dev)) {
+               if (!netif_device_present(dev) ||
+                   (urgent_only && !linkwatch_urgent_event(dev))) {
                        list_add_tail(&dev->link_watch_list, &lweventlist);
                        continue;
                }
index 5e4eb45..8ab7b40 100644 (file)
@@ -634,7 +634,15 @@ bool page_pool_return_skb_page(struct page *page)
        struct page_pool *pp;
 
        page = compound_head(page);
-       if (unlikely(page->pp_magic != PP_SIGNATURE))
+
+       /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
+        * in order to preserve any existing bits, such as bit 0 for the
+        * head page of compound page and bit 1 for pfmemalloc page, so
+        * mask those bits for freeing side when doing below checking,
+        * and page_is_pfmemalloc() is checked in __page_pool_put_page()
+        * to avoid recycling the pfmemalloc page.
+        */
+       if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
                return false;
 
        pp = page->pp;
index f6af3e7..662eb1c 100644 (file)
@@ -2608,6 +2608,7 @@ static int do_setlink(const struct sk_buff *skb,
                return err;
 
        if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
+               const char *pat = ifname && ifname[0] ? ifname : NULL;
                struct net *net;
                int new_ifindex;
 
@@ -2623,7 +2624,7 @@ static int do_setlink(const struct sk_buff *skb,
                else
                        new_ifindex = 0;
 
-               err = __dev_change_net_namespace(dev, net, ifname, new_ifindex);
+               err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
                put_net(net);
                if (err)
                        goto errout;
index 9cc9d1e..c5c1d2b 100644 (file)
@@ -41,9 +41,9 @@ extern bool dccp_debug;
 #define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
 #define dccp_debug(fmt, a...)            dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
 #else
-#define dccp_pr_debug(format, a...)
-#define dccp_pr_debug_cat(format, a...)
-#define dccp_debug(format, a...)
+#define dccp_pr_debug(format, a...)      do {} while (0)
+#define dccp_pr_debug_cat(format, a...)          do {} while (0)
+#define dccp_debug(format, a...)         do {} while (0)
 #endif
 
 extern struct inet_hashinfo dccp_hashinfo;
index 532085d..23be8e0 100644 (file)
@@ -2291,8 +2291,8 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
 static void
 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
 {
+       struct switchdev_notifier_fdb_info info = {};
        struct dsa_switch *ds = switchdev_work->ds;
-       struct switchdev_notifier_fdb_info info;
        struct dsa_port *dp;
 
        if (!dsa_is_user_port(ds, switchdev_work->port))
index a45a040..c25f761 100644 (file)
@@ -984,6 +984,11 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .sendpage          = sock_no_sendpage,
 };
 
+static void ieee802154_sock_destruct(struct sock *sk)
+{
+       skb_queue_purge(&sk->sk_receive_queue);
+}
+
 /* Create a socket. Initialise the socket, blank the addresses
  * set the state.
  */
@@ -1024,7 +1029,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
        sock->ops = ops;
 
        sock_init_data(sock, sk);
-       /* FIXME: sk->sk_destruct */
+       sk->sk_destruct = ieee802154_sock_destruct;
        sk->sk_family = PF_IEEE802154;
 
        /* Checksums on by default */
index 099259f..7fbd0b5 100644 (file)
@@ -465,14 +465,16 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
        if (!doi_def)
                return;
 
-       switch (doi_def->type) {
-       case CIPSO_V4_MAP_TRANS:
-               kfree(doi_def->map.std->lvl.cipso);
-               kfree(doi_def->map.std->lvl.local);
-               kfree(doi_def->map.std->cat.cipso);
-               kfree(doi_def->map.std->cat.local);
-               kfree(doi_def->map.std);
-               break;
+       if (doi_def->map.std) {
+               switch (doi_def->type) {
+               case CIPSO_V4_MAP_TRANS:
+                       kfree(doi_def->map.std->lvl.cipso);
+                       kfree(doi_def->map.std->lvl.local);
+                       kfree(doi_def->map.std->cat.cipso);
+                       kfree(doi_def->map.std->cat.local);
+                       kfree(doi_def->map.std);
+                       break;
+               }
        }
        kfree(doi_def);
 }
index 6b3c558..00576ba 100644 (file)
@@ -803,10 +803,17 @@ static void igmp_gq_timer_expire(struct timer_list *t)
 static void igmp_ifc_timer_expire(struct timer_list *t)
 {
        struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
+       u32 mr_ifc_count;
 
        igmpv3_send_cr(in_dev);
-       if (in_dev->mr_ifc_count) {
-               in_dev->mr_ifc_count--;
+restart:
+       mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
+
+       if (mr_ifc_count) {
+               if (cmpxchg(&in_dev->mr_ifc_count,
+                           mr_ifc_count,
+                           mr_ifc_count - 1) != mr_ifc_count)
+                       goto restart;
                igmp_ifc_start_timer(in_dev,
                                     unsolicited_report_interval(in_dev));
        }
@@ -818,7 +825,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
        struct net *net = dev_net(in_dev->dev);
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
                return;
-       in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+       WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
        igmp_ifc_start_timer(in_dev, 1);
 }
 
@@ -957,7 +964,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                                in_dev->mr_qri;
                }
                /* cancel the interface change timer */
-               in_dev->mr_ifc_count = 0;
+               WRITE_ONCE(in_dev->mr_ifc_count, 0);
                if (del_timer(&in_dev->mr_ifc_timer))
                        __in_dev_put(in_dev);
                /* clear deleted report items */
@@ -1724,7 +1731,7 @@ void ip_mc_down(struct in_device *in_dev)
                igmp_group_dropped(pmc);
 
 #ifdef CONFIG_IP_MULTICAST
-       in_dev->mr_ifc_count = 0;
+       WRITE_ONCE(in_dev->mr_ifc_count, 0);
        if (del_timer(&in_dev->mr_ifc_timer))
                __in_dev_put(in_dev);
        in_dev->mr_gq_running = 0;
@@ -1941,7 +1948,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                pmc->sfmode = MCAST_INCLUDE;
 #ifdef CONFIG_IP_MULTICAST
                pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               in_dev->mr_ifc_count = pmc->crcount;
+               WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                igmp_ifc_event(pmc->interface);
@@ -2120,7 +2127,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                /* else no filters; keep old mode for reports */
 
                pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               in_dev->mr_ifc_count = pmc->crcount;
+               WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                igmp_ifc_event(in_dev);
index 12dca0c..95419b7 100644 (file)
@@ -473,6 +473,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 
 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
+       if (csum && skb_checksum_start(skb) < skb->data)
+               return -EINVAL;
        return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
index 99c0694..a6f20ee 100644 (file)
@@ -600,14 +600,14 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
        return oldest;
 }
 
-static inline u32 fnhe_hashfun(__be32 daddr)
+static u32 fnhe_hashfun(__be32 daddr)
 {
-       static u32 fnhe_hashrnd __read_mostly;
-       u32 hval;
+       static siphash_key_t fnhe_hash_key __read_mostly;
+       u64 hval;
 
-       net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
-       hval = jhash_1word((__force u32)daddr, fnhe_hashrnd);
-       return hash_32(hval, FNHE_HASH_SHIFT);
+       net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
+       hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
+       return hash_64(hval, FNHE_HASH_SHIFT);
 }
 
 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
index 6ea3dc2..6274462 100644 (file)
@@ -1041,7 +1041,7 @@ static void bbr_init(struct sock *sk)
        bbr->prior_cwnd = 0;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        bbr->rtt_cnt = 0;
-       bbr->next_rtt_delivered = 0;
+       bbr->next_rtt_delivered = tp->delivered;
        bbr->prev_ca_state = TCP_CA_Open;
        bbr->packet_conservation = 0;
 
index e09147a..fc61cd3 100644 (file)
@@ -298,6 +298,9 @@ int tcp_gro_complete(struct sk_buff *skb)
        if (th->cwr)
                skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
+       if (skb->encapsulation)
+               skb->inner_transport_header = skb->transport_header;
+
        return 0;
 }
 EXPORT_SYMBOL(tcp_gro_complete);
index 9dde1e5..1380a6b 100644 (file)
@@ -624,6 +624,10 @@ static int udp_gro_complete_segment(struct sk_buff *skb)
 
        skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
        skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+
+       if (skb->encapsulation)
+               skb->inner_transport_header = skb->transport_header;
+
        return 0;
 }
 
index 2d650dc..ef75c9b 100644 (file)
@@ -1341,7 +1341,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
        struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
                                lockdep_is_held(&rt->fib6_table->tb6_lock));
 
-       /* paired with smp_rmb() in rt6_get_cookie_safe() */
+       /* paired with smp_rmb() in fib6_get_cookie_safe() */
        smp_wmb();
        while (fn) {
                fn->fn_sernum = sernum;
index bc224f9..7a5e90e 100644 (file)
@@ -629,6 +629,8 @@ drop:
 
 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
+       if (csum && skb_checksum_start(skb) < skb->data)
+               return -EINVAL;
        return iptunnel_handle_offloads(skb,
                                        csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
index b6ddf23..c5e8ecb 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/nsproxy.h>
 #include <linux/slab.h>
 #include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <net/net_namespace.h>
 #include <net/snmp.h>
 #include <net/ipv6.h>
@@ -1484,17 +1485,24 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
 static u32 rt6_exception_hash(const struct in6_addr *dst,
                              const struct in6_addr *src)
 {
-       static u32 seed __read_mostly;
-       u32 val;
+       static siphash_key_t rt6_exception_key __read_mostly;
+       struct {
+               struct in6_addr dst;
+               struct in6_addr src;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .dst = *dst,
+       };
+       u64 val;
 
-       net_get_random_once(&seed, sizeof(seed));
-       val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
+       net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
 
 #ifdef CONFIG_IPV6_SUBTREES
        if (src)
-               val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
+               combined.src = *src;
 #endif
-       return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
+       val = siphash(&combined, sizeof(combined), &rt6_exception_key);
+
+       return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
 }
 
 /* Helper function to find the cached rt in the hash table
index 05f4c3c..fcae76d 100644 (file)
@@ -260,6 +260,8 @@ static void ieee80211_restart_work(struct work_struct *work)
        flush_work(&local->radar_detected_work);
 
        rtnl_lock();
+       /* we might do interface manipulations, so need both */
+       wiphy_lock(local->hw.wiphy);
 
        WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
             "%s called with hardware scan in progress\n", __func__);
index 4452455..7adcbc1 100644 (file)
@@ -885,20 +885,16 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
                return subflow->mp_capable;
        }
 
-       if (mp_opt->dss && mp_opt->use_ack) {
+       if ((mp_opt->dss && mp_opt->use_ack) ||
+           (mp_opt->add_addr && !mp_opt->echo)) {
                /* subflows are fully established as soon as we get any
-                * additional ack.
+                * additional ack, including ADD_ADDR.
                 */
                subflow->fully_established = 1;
                WRITE_ONCE(msk->fully_established, true);
                goto fully_established;
        }
 
-       if (mp_opt->add_addr) {
-               WRITE_ONCE(msk->fully_established, true);
-               return true;
-       }
-
        /* If the first established packet does not contain MP_CAPABLE + data
         * then fallback to TCP. Fallback scenarios requires a reset for
         * MP_JOIN subflows.
index d2591eb..7b37944 100644 (file)
@@ -27,7 +27,6 @@ struct mptcp_pm_addr_entry {
        struct mptcp_addr_info  addr;
        u8                      flags;
        int                     ifindex;
-       struct rcu_head         rcu;
        struct socket           *lsk;
 };
 
@@ -1136,36 +1135,12 @@ next:
        return 0;
 }
 
-struct addr_entry_release_work {
-       struct rcu_work rwork;
-       struct mptcp_pm_addr_entry *entry;
-};
-
-static void mptcp_pm_release_addr_entry(struct work_struct *work)
+/* caller must ensure the RCU grace period is already elapsed */
+static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
 {
-       struct addr_entry_release_work *w;
-       struct mptcp_pm_addr_entry *entry;
-
-       w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork);
-       entry = w->entry;
-       if (entry) {
-               if (entry->lsk)
-                       sock_release(entry->lsk);
-               kfree(entry);
-       }
-       kfree(w);
-}
-
-static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry)
-{
-       struct addr_entry_release_work *w;
-
-       w = kmalloc(sizeof(*w), GFP_ATOMIC);
-       if (w) {
-               INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry);
-               w->entry = entry;
-               queue_rcu_work(system_wq, &w->rwork);
-       }
+       if (entry->lsk)
+               sock_release(entry->lsk);
+       kfree(entry);
 }
 
 static int mptcp_nl_remove_id_zero_address(struct net *net,
@@ -1245,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
        spin_unlock_bh(&pernet->lock);
 
        mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
-       mptcp_pm_free_addr_entry(entry);
+       synchronize_rcu();
+       __mptcp_pm_release_addr_entry(entry);
 
        return ret;
 }
@@ -1298,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
        }
 }
 
+/* caller must ensure the RCU grace period is already elapsed */
 static void __flush_addrs(struct list_head *list)
 {
        while (!list_empty(list)) {
@@ -1306,7 +1283,7 @@ static void __flush_addrs(struct list_head *list)
                cur = list_entry(list->next,
                                 struct mptcp_pm_addr_entry, list);
                list_del_rcu(&cur->list);
-               mptcp_pm_free_addr_entry(cur);
+               __mptcp_pm_release_addr_entry(cur);
        }
 }
 
@@ -1330,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
        bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
        spin_unlock_bh(&pernet->lock);
        mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
+       synchronize_rcu();
        __flush_addrs(&free_list);
        return 0;
 }
@@ -1940,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
                struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
 
                /* net is removed from namespace list, can't race with
-                * other modifiers
+                * other modifiers, also netns core already waited for a
+                * RCU grace period.
                 */
                __flush_addrs(&pernet->local_addr_list);
        }
index d1bef23..dd30c03 100644 (file)
@@ -132,8 +132,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
                        return ret;
-               if (ip > ip_to)
+               if (ip > ip_to) {
+                       if (ip_to == 0)
+                               return -IPSET_ERR_HASH_ELEM;
                        swap(ip, ip_to);
+               }
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
@@ -144,6 +147,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
+       /* 64bit division is not allowed on 32bit */
+       if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried) {
                ip = ntohl(h->next.ip);
                e.ip = htonl(ip);
index 18346d1..153de34 100644 (file)
@@ -121,6 +121,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
        e.mark &= h->markmask;
+       if (e.mark == 0 && e.ip == 0)
+               return -IPSET_ERR_HASH_ELEM;
 
        if (adt == IPSET_TEST ||
            !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
@@ -133,8 +135,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
                        return ret;
-               if (ip > ip_to)
+               if (ip > ip_to) {
+                       if (e.mark == 0 && ip_to == 0)
+                               return -IPSET_ERR_HASH_ELEM;
                        swap(ip, ip_to);
+               }
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
@@ -143,6 +148,9 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
 
+       if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index e1ca111..7303138 100644 (file)
@@ -173,6 +173,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index ab179e0..334fb1a 100644 (file)
@@ -180,6 +180,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index 8f075b4..7df94f4 100644 (file)
@@ -253,6 +253,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        ip2_to = ip2_from;
        if (tb[IPSET_ATTR_IP2_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
index c1a11f0..1422739 100644 (file)
@@ -140,7 +140,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = { .cidr = HOST_MASK };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0;
+       u32 ip = 0, ip_to = 0, ipn, n = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -188,6 +188,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (ip + UINT_MAX == ip_to)
                        return -IPSET_ERR_HASH_RANGE;
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        do {
index ddd51c2..9810f5b 100644 (file)
@@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0;
+       u32 ip = 0, ip_to = 0, ipn, n = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -256,6 +256,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried)
                ip = ntohl(h->next.ip);
index 6532f05..3d09eef 100644 (file)
@@ -168,7 +168,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0;
-       u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
+       u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
+       u64 n = 0, m = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -244,6 +245,19 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+               n++;
+       } while (ipn++ < ip_to);
+       ipn = ip2_from;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+               m++;
+       } while (ipn++ < ip2_to);
+
+       if (n*m > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
index ec1564a..09cf72e 100644 (file)
@@ -158,7 +158,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 port, port_to, p = 0, ip = 0, ip_to = 0;
+       u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
+       u64 n = 0;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -235,6 +236,14 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip);
index 0e91d1e..19bcdb3 100644 (file)
@@ -182,7 +182,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netportnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0, p = 0, port, port_to;
-       u32 ip2_from = 0, ip2_to = 0, ip2;
+       u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
+       u64 n = 0, m = 0;
        bool with_ports = false;
        int ret;
 
@@ -284,6 +285,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+               n++;
+       } while (ipn++ < ip_to);
+       ipn = ip2_from;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+               m++;
+       } while (ipn++ < ip2_to);
+
+       if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
index 5c03e51..d31dbcc 100644 (file)
@@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
 
 struct conntrack_gc_work {
        struct delayed_work     dwork;
-       u32                     last_bucket;
+       u32                     next_bucket;
        bool                    exiting;
        bool                    early_drop;
-       long                    next_gc_run;
 };
 
 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
-/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV     128u
-/* upper bound of full table scan */
-#define GC_MAX_SCAN_JIFFIES    (16u * HZ)
-/* desired ratio of entries found to be expired */
-#define GC_EVICT_RATIO 50u
+#define GC_SCAN_INTERVAL       (120u * HZ)
+#define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
 
 static struct conntrack_gc_work conntrack_gc_work;
 
@@ -1363,17 +1358,13 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
 
 static void gc_worker(struct work_struct *work)
 {
-       unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
-       unsigned int i, goal, buckets = 0, expired_count = 0;
-       unsigned int nf_conntrack_max95 = 0;
+       unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+       unsigned int i, hashsz, nf_conntrack_max95 = 0;
+       unsigned long next_run = GC_SCAN_INTERVAL;
        struct conntrack_gc_work *gc_work;
-       unsigned int ratio, scanned = 0;
-       unsigned long next_run;
-
        gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
 
-       goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
-       i = gc_work->last_bucket;
+       i = gc_work->next_bucket;
        if (gc_work->early_drop)
                nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
 
@@ -1381,15 +1372,15 @@ static void gc_worker(struct work_struct *work)
                struct nf_conntrack_tuple_hash *h;
                struct hlist_nulls_head *ct_hash;
                struct hlist_nulls_node *n;
-               unsigned int hashsz;
                struct nf_conn *tmp;
 
-               i++;
                rcu_read_lock();
 
                nf_conntrack_get_ht(&ct_hash, &hashsz);
-               if (i >= hashsz)
-                       i = 0;
+               if (i >= hashsz) {
+                       rcu_read_unlock();
+                       break;
+               }
 
                hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
                        struct nf_conntrack_net *cnet;
@@ -1397,7 +1388,6 @@ static void gc_worker(struct work_struct *work)
 
                        tmp = nf_ct_tuplehash_to_ctrack(h);
 
-                       scanned++;
                        if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
                                nf_ct_offload_timeout(tmp);
                                continue;
@@ -1405,7 +1395,6 @@ static void gc_worker(struct work_struct *work)
 
                        if (nf_ct_is_expired(tmp)) {
                                nf_ct_gc_expired(tmp);
-                               expired_count++;
                                continue;
                        }
 
@@ -1438,7 +1427,14 @@ static void gc_worker(struct work_struct *work)
                 */
                rcu_read_unlock();
                cond_resched();
-       } while (++buckets < goal);
+               i++;
+
+               if (time_after(jiffies, end_time) && i < hashsz) {
+                       gc_work->next_bucket = i;
+                       next_run = 0;
+                       break;
+               }
+       } while (i < hashsz);
 
        if (gc_work->exiting)
                return;
@@ -1449,40 +1445,17 @@ static void gc_worker(struct work_struct *work)
         *
         * This worker is only here to reap expired entries when system went
         * idle after a busy period.
-        *
-        * The heuristics below are supposed to balance conflicting goals:
-        *
-        * 1. Minimize time until we notice a stale entry
-        * 2. Maximize scan intervals to not waste cycles
-        *
-        * Normally, expire ratio will be close to 0.
-        *
-        * As soon as a sizeable fraction of the entries have expired
-        * increase scan frequency.
         */
-       ratio = scanned ? expired_count * 100 / scanned : 0;
-       if (ratio > GC_EVICT_RATIO) {
-               gc_work->next_gc_run = min_interval;
-       } else {
-               unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
-
-               BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
-
-               gc_work->next_gc_run += min_interval;
-               if (gc_work->next_gc_run > max)
-                       gc_work->next_gc_run = max;
+       if (next_run) {
+               gc_work->early_drop = false;
+               gc_work->next_bucket = 0;
        }
-
-       next_run = gc_work->next_gc_run;
-       gc_work->last_bucket = i;
-       gc_work->early_drop = false;
        queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
 }
 
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
        INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
-       gc_work->next_gc_run = HZ;
        gc_work->exiting = false;
 }
 
index 3259416..af5115e 100644 (file)
@@ -1478,7 +1478,6 @@ void nf_conntrack_tcp_init_net(struct net *net)
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        tn->offload_timeout = 30 * HZ;
-       tn->offload_pickup = 120 * HZ;
 #endif
 }
 
index 698fee4..f8e3c0d 100644 (file)
@@ -271,7 +271,6 @@ void nf_conntrack_udp_init_net(struct net *net)
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        un->offload_timeout = 30 * HZ;
-       un->offload_pickup = 30 * HZ;
 #endif
 }
 
index 214d9f9..e84b499 100644 (file)
@@ -575,7 +575,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP,
 #endif
        NF_SYSCTL_CT_PROTO_TCP_LOOSE,
        NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
@@ -585,7 +584,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP,
 #endif
        NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
        NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
@@ -776,12 +774,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = {
-               .procname       = "nf_flowtable_tcp_pickup",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
                .procname       = "nf_conntrack_tcp_loose",
@@ -832,12 +824,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = {
-               .procname       = "nf_flowtable_udp_pickup",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
                .procname       = "nf_conntrack_icmp_timeout",
@@ -1018,7 +1004,6 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
-       table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup;
 #endif
 
 }
@@ -1111,7 +1096,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
-       table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup;
 #endif
 
        nf_conntrack_standalone_init_tcp_sysctl(net, table);
index 551976e..8788b51 100644 (file)
@@ -183,7 +183,7 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        const struct nf_conntrack_l4proto *l4proto;
        struct net *net = nf_ct_net(ct);
        int l4num = nf_ct_protonum(ct);
-       unsigned int timeout;
+       s32 timeout;
 
        l4proto = nf_ct_l4proto_find(l4num);
        if (!l4proto)
@@ -192,15 +192,20 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        if (l4num == IPPROTO_TCP) {
                struct nf_tcp_net *tn = nf_tcp_pernet(net);
 
-               timeout = tn->offload_pickup;
+               timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+               timeout -= tn->offload_timeout;
        } else if (l4num == IPPROTO_UDP) {
                struct nf_udp_net *tn = nf_udp_pernet(net);
 
-               timeout = tn->offload_pickup;
+               timeout = tn->timeouts[UDP_CT_REPLIED];
+               timeout -= tn->offload_timeout;
        } else {
                return;
        }
 
+       if (timeout < 0)
+               timeout = 0;
+
        if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
                ct->timeout = nfct_time_stamp + timeout;
 }
index 202f57d..f554e2e 100644 (file)
@@ -89,11 +89,15 @@ static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
        if (!nest2)
                goto cancel_nest;
 
-       ret = nla_put_string(nlskb, NFTA_CHAIN_TABLE, chain->table->name);
+       ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name);
        if (ret)
                goto cancel_nest;
 
-       ret = nla_put_string(nlskb, NFTA_CHAIN_NAME, chain->name);
+       ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name);
+       if (ret)
+               goto cancel_nest;
+
+       ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family);
        if (ret)
                goto cancel_nest;
 
@@ -109,18 +113,19 @@ cancel_nest:
 static int nfnl_hook_dump_one(struct sk_buff *nlskb,
                              const struct nfnl_dump_hook_data *ctx,
                              const struct nf_hook_ops *ops,
-                             unsigned int seq)
+                             int family, unsigned int seq)
 {
        u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET);
        unsigned int portid = NETLINK_CB(nlskb).portid;
        struct nlmsghdr *nlh;
        int ret = -EMSGSIZE;
+       u32 hooknum;
 #ifdef CONFIG_KALLSYMS
        char sym[KSYM_SYMBOL_LEN];
        char *module_name;
 #endif
        nlh = nfnl_msg_put(nlskb, portid, seq, event,
-                          NLM_F_MULTI, ops->pf, NFNETLINK_V0, 0);
+                          NLM_F_MULTI, family, NFNETLINK_V0, 0);
        if (!nlh)
                goto nla_put_failure;
 
@@ -135,6 +140,7 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
        if (module_name) {
                char *end;
 
+               *module_name = '\0';
                module_name += 2;
                end = strchr(module_name, ']');
                if (end) {
@@ -151,7 +157,12 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
                goto nla_put_failure;
 #endif
 
-       ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(ops->hooknum));
+       if (ops->pf == NFPROTO_INET && ops->hooknum == NF_INET_INGRESS)
+               hooknum = NF_NETDEV_INGRESS;
+       else
+               hooknum = ops->hooknum;
+
+       ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(hooknum));
        if (ret)
                goto nla_put_failure;
 
@@ -259,7 +270,8 @@ static int nfnl_hook_dump(struct sk_buff *nlskb,
        ops = nf_hook_entries_get_hook_ops(e);
 
        for (; i < e->num_hook_entries; i++) {
-               err = nfnl_hook_dump_one(nlskb, ctx, ops[i], cb->seq);
+               err = nfnl_hook_dump_one(nlskb, ctx, ops[i], family,
+                                        cb->nlh->nlmsg_seq);
                if (err)
                        break;
        }
index e586424..9713035 100644 (file)
@@ -293,14 +293,14 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
 }
 
 /**
- * Parse vlan tag from vlan header.
+ * parse_vlan_tag - Parse vlan tag from vlan header.
  * @skb: skb containing frame to parse
  * @key_vh: pointer to parsed vlan tag
  * @untag_vlan: should the vlan header be removed from the frame
  *
- * Returns ERROR on memory error.
- * Returns 0 if it encounters a non-vlan or incomplete packet.
- * Returns 1 after successfully parsing vlan tag.
+ * Return: ERROR on memory error.
+ * %0 if it encounters a non-vlan or incomplete packet.
+ * %1 after successfully parsing vlan tag.
  */
 static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
                          bool untag_vlan)
@@ -532,6 +532,7 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
  *       L3 header
  * @key: output flow key
  *
+ * Return: %0 if successful, otherwise a negative errno value.
  */
 static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
 {
@@ -748,8 +749,6 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
  *
  * The caller must ensure that skb->len >= ETH_HLEN.
  *
- * Returns 0 if successful, otherwise a negative errno value.
- *
  * Initializes @skb header fields as follows:
  *
  *    - skb->mac_header: the L2 header.
@@ -764,6 +763,8 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
  *
  *    - skb->protocol: the type of the data starting at skb->network_header.
  *      Equals to key->eth.type.
+ *
+ * Return: %0 if successful, otherwise a negative errno value.
  */
 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 {
index 88deb5b..cf2ce58 100644 (file)
@@ -507,6 +507,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
        }
 
        skb->dev = vport->dev;
+       skb->tstamp = 0;
        vport->ops->send(skb);
        return;
 
index 171b7f3..0c30908 100644 (file)
@@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                goto err;
        }
 
-       if (len != ALIGN(size, 4) + hdrlen)
+       if (!size || len != ALIGN(size, 4) + hdrlen)
                goto err;
 
        if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
index 9b6ffff..28c1b00 100644 (file)
@@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
                cpu_relax();
        }
 
-       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
+       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
                                &off, PAGE_SIZE);
-       if (unlikely(ret != ibmr->sg_len))
+       if (unlikely(ret != ibmr->sg_dma_len))
                return ret < 0 ? ret : -EINVAL;
 
        if (cmpxchg(&frmr->fr_state,
index 7153c67..2ef4cd2 100644 (file)
@@ -273,6 +273,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
                        goto out;
        }
 
+       /* All mirred/redirected skbs should clear previous ct info */
+       nf_reset_ct(skb2);
+
        want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
 
        expects_nh = want_ingress || !m_mac_header_xmit;
index 9515428..28af8b1 100644 (file)
@@ -720,7 +720,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
 skip_hash:
        if (flow_override)
                flow_hash = flow_override - 1;
-       else if (use_skbhash)
+       else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
                flow_hash = skb->hash;
        if (host_override) {
                dsthost_hash = host_override - 1;
index c1e84d1..c76701a 100644 (file)
@@ -660,6 +660,13 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
        sch_tree_lock(sch);
 
        q->nbands = nbands;
+       for (i = nstrict; i < q->nstrict; i++) {
+               INIT_LIST_HEAD(&q->classes[i].alist);
+               if (q->classes[i].qdisc->q.qlen) {
+                       list_add_tail(&q->classes[i].alist, &q->active);
+                       q->classes[i].deficit = quanta[i];
+               }
+       }
        q->nstrict = nstrict;
        memcpy(q->prio2band, priomap, sizeof(priomap));
 
index d9ac60f..a8dd06c 100644 (file)
@@ -913,7 +913,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 
        /* seqlock has the same scope of busylock, for NOLOCK qdisc */
        spin_lock_init(&sch->seqlock);
-       lockdep_set_class(&sch->busylock,
+       lockdep_set_class(&sch->seqlock,
                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 
        seqcount_init(&sch->running);
index 07b30d0..9c79374 100644 (file)
@@ -1739,8 +1739,6 @@ static void taprio_attach(struct Qdisc *sch)
                if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
                        qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
                        old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
-                       if (ntx < dev->real_num_tx_queues)
-                               qdisc_hash_add(qdisc, false);
                } else {
                        old = dev_graft_qdisc(qdisc->dev_queue, sch);
                        qdisc_refcount_inc(sch);
index fe74c5f..db6b737 100644 (file)
@@ -857,14 +857,18 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
        memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
        cur_key->key = key;
 
-       if (replace) {
-               list_del_init(&shkey->key_list);
-               sctp_auth_shkey_release(shkey);
-               if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
-                       sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+       if (!replace) {
+               list_add(&cur_key->key_list, sh_keys);
+               return 0;
        }
+
+       list_del_init(&shkey->key_list);
+       sctp_auth_shkey_release(shkey);
        list_add(&cur_key->key_list, sh_keys);
 
+       if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
+               sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+
        return 0;
 }
 
index 8983896..c038efc 100644 (file)
@@ -795,7 +795,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
                        reason_code = SMC_CLC_DECL_NOSRVLINK;
                        goto connect_abort;
                }
-               smc->conn.lnk = link;
+               smc_switch_link_and_count(&smc->conn, link);
        }
 
        /* create send buffer and rmb */
index cd0d7c9..c160ff5 100644 (file)
@@ -917,8 +917,8 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
        return rc;
 }
 
-static void smc_switch_link_and_count(struct smc_connection *conn,
-                                     struct smc_link *to_lnk)
+void smc_switch_link_and_count(struct smc_connection *conn,
+                              struct smc_link *to_lnk)
 {
        atomic_dec(&conn->lnk->conn_cnt);
        conn->lnk = to_lnk;
index 6d6fd13..c043ecd 100644 (file)
@@ -97,6 +97,7 @@ struct smc_link {
        unsigned long           *wr_tx_mask;    /* bit mask of used indexes */
        u32                     wr_tx_cnt;      /* number of WR send buffers */
        wait_queue_head_t       wr_tx_wait;     /* wait for free WR send buf */
+       atomic_t                wr_tx_refcnt;   /* tx refs to link */
 
        struct smc_wr_buf       *wr_rx_bufs;    /* WR recv payload buffers */
        struct ib_recv_wr       *wr_rx_ibs;     /* WR recv meta data */
@@ -109,6 +110,7 @@ struct smc_link {
 
        struct ib_reg_wr        wr_reg;         /* WR register memory region */
        wait_queue_head_t       wr_reg_wait;    /* wait for wr_reg result */
+       atomic_t                wr_reg_refcnt;  /* reg refs to link */
        enum smc_wr_reg_state   wr_reg_state;   /* state of wr_reg request */
 
        u8                      gid[SMC_GID_SIZE];/* gid matching used vlan id*/
@@ -444,6 +446,8 @@ void smc_core_exit(void);
 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
                   u8 link_idx, struct smc_init_info *ini);
 void smcr_link_clear(struct smc_link *lnk, bool log);
+void smc_switch_link_and_count(struct smc_connection *conn,
+                              struct smc_link *to_lnk);
 int smcr_buf_map_lgr(struct smc_link *lnk);
 int smcr_buf_reg_lgr(struct smc_link *lnk);
 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
index 273eaf1..2e7560e 100644 (file)
@@ -888,6 +888,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
        if (!rc)
                goto out;
 out_clear_lnk:
+       lnk_new->state = SMC_LNK_INACTIVE;
        smcr_link_clear(lnk_new, false);
 out_reject:
        smc_llc_cli_add_link_reject(qentry);
@@ -1184,6 +1185,7 @@ int smc_llc_srv_add_link(struct smc_link *link)
                goto out_err;
        return 0;
 out_err:
+       link_new->state = SMC_LNK_INACTIVE;
        smcr_link_clear(link_new, false);
        return rc;
 }
@@ -1286,10 +1288,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
        del_llc->reason = 0;
        smc_llc_send_message(lnk, &qentry->msg); /* response */
 
-       if (smc_link_downing(&lnk_del->state)) {
-               if (smc_switch_conns(lgr, lnk_del, false))
-                       smc_wr_tx_wait_no_pending_sends(lnk_del);
-       }
+       if (smc_link_downing(&lnk_del->state))
+               smc_switch_conns(lgr, lnk_del, false);
        smcr_link_clear(lnk_del, true);
 
        active_links = smc_llc_active_link_count(lgr);
@@ -1805,8 +1805,6 @@ void smc_llc_link_clear(struct smc_link *link, bool log)
                                    link->smcibdev->ibdev->name, link->ibport);
        complete(&link->llc_testlink_resp);
        cancel_delayed_work_sync(&link->llc_testlink_wrk);
-       smc_wr_wakeup_reg_wait(link);
-       smc_wr_wakeup_tx_wait(link);
 }
 
 /* register a new rtoken at the remote peer (for all links) */
index 289025c..c79361d 100644 (file)
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
 /* Wakeup sndbuf consumers from any context (IRQ or process)
  * since there is more data to transmit; usable snd_wnd as max transmit
  */
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
        struct smc_link *link = conn->lnk;
@@ -550,6 +550,22 @@ out_unlock:
        return rc;
 }
 
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+       struct smc_link *link = conn->lnk;
+       int rc = -ENOLINK;
+
+       if (!link)
+               return rc;
+
+       atomic_inc(&link->wr_tx_refcnt);
+       if (smc_link_usable(link))
+               rc = _smcr_tx_sndbuf_nonempty(conn);
+       if (atomic_dec_and_test(&link->wr_tx_refcnt))
+               wake_up_all(&link->wr_tx_wait);
+       return rc;
+}
+
 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
index cbc73a7..a419e9a 100644 (file)
@@ -322,9 +322,12 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
        if (rc)
                return rc;
 
+       atomic_inc(&link->wr_reg_refcnt);
        rc = wait_event_interruptible_timeout(link->wr_reg_wait,
                                              (link->wr_reg_state != POSTED),
                                              SMC_WR_REG_MR_WAIT_TIME);
+       if (atomic_dec_and_test(&link->wr_reg_refcnt))
+               wake_up_all(&link->wr_reg_wait);
        if (!rc) {
                /* timeout - terminate link */
                smcr_link_down_cond_sched(link);
@@ -566,10 +569,15 @@ void smc_wr_free_link(struct smc_link *lnk)
                return;
        ibdev = lnk->smcibdev->ibdev;
 
+       smc_wr_wakeup_reg_wait(lnk);
+       smc_wr_wakeup_tx_wait(lnk);
+
        if (smc_wr_tx_wait_no_pending_sends(lnk))
                memset(lnk->wr_tx_mask, 0,
                       BITS_TO_LONGS(SMC_WR_BUF_CNT) *
                                                sizeof(*lnk->wr_tx_mask));
+       wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
+       wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
 
        if (lnk->wr_rx_dma_addr) {
                ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@@ -728,7 +736,9 @@ int smc_wr_create_link(struct smc_link *lnk)
        memset(lnk->wr_tx_mask, 0,
               BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
        init_waitqueue_head(&lnk->wr_tx_wait);
+       atomic_set(&lnk->wr_tx_refcnt, 0);
        init_waitqueue_head(&lnk->wr_reg_wait);
+       atomic_set(&lnk->wr_reg_refcnt, 0);
        return rc;
 
 dma_unmap:
index 0b2dad3..532fff5 100644 (file)
@@ -1722,32 +1722,22 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
        return __sys_listen(fd, backlog);
 }
 
-int __sys_accept4_file(struct file *file, unsigned file_flags,
+struct file *do_accept(struct file *file, unsigned file_flags,
                       struct sockaddr __user *upeer_sockaddr,
-                      int __user *upeer_addrlen, int flags,
-                      unsigned long nofile)
+                      int __user *upeer_addrlen, int flags)
 {
        struct socket *sock, *newsock;
        struct file *newfile;
-       int err, len, newfd;
+       int err, len;
        struct sockaddr_storage address;
 
-       if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
-               return -EINVAL;
-
-       if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
-               flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
-
        sock = sock_from_file(file);
-       if (!sock) {
-               err = -ENOTSOCK;
-               goto out;
-       }
+       if (!sock)
+               return ERR_PTR(-ENOTSOCK);
 
-       err = -ENFILE;
        newsock = sock_alloc();
        if (!newsock)
-               goto out;
+               return ERR_PTR(-ENFILE);
 
        newsock->type = sock->type;
        newsock->ops = sock->ops;
@@ -1758,18 +1748,9 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
         */
        __module_get(newsock->ops->owner);
 
-       newfd = __get_unused_fd_flags(flags, nofile);
-       if (unlikely(newfd < 0)) {
-               err = newfd;
-               sock_release(newsock);
-               goto out;
-       }
        newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
-       if (IS_ERR(newfile)) {
-               err = PTR_ERR(newfile);
-               put_unused_fd(newfd);
-               goto out;
-       }
+       if (IS_ERR(newfile))
+               return newfile;
 
        err = security_socket_accept(sock, newsock);
        if (err)
@@ -1794,16 +1775,38 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
        }
 
        /* File flags are not inherited via accept() unlike another OSes. */
-
-       fd_install(newfd, newfile);
-       err = newfd;
-out:
-       return err;
+       return newfile;
 out_fd:
        fput(newfile);
-       put_unused_fd(newfd);
-       goto out;
+       return ERR_PTR(err);
+}
+
+int __sys_accept4_file(struct file *file, unsigned file_flags,
+                      struct sockaddr __user *upeer_sockaddr,
+                      int __user *upeer_addrlen, int flags,
+                      unsigned long nofile)
+{
+       struct file *newfile;
+       int newfd;
 
+       if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+               return -EINVAL;
+
+       if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+               flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
+       newfd = __get_unused_fd_flags(flags, nofile);
+       if (unlikely(newfd < 0))
+               return newfd;
+
+       newfile = do_accept(file, file_flags, upeer_sockaddr, upeer_addrlen,
+                           flags);
+       if (IS_ERR(newfile)) {
+               put_unused_fd(newfd);
+               return PTR_ERR(newfile);
+       }
+       fd_install(newfd, newfile);
+       return newfd;
 }
 
 /*
index a81be45..3d685fe 100644 (file)
@@ -1980,7 +1980,7 @@ gss_svc_init_net(struct net *net)
                goto out2;
        return 0;
 out2:
-       destroy_use_gss_proxy_proc_entry(net);
+       rsi_cache_destroy_net(net);
 out1:
        rsc_cache_destroy_net(net);
        return rv;
index 56029e3..827bf3a 100644 (file)
@@ -8,14 +8,14 @@
 #include <linux/debugfs.h>
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/clnt.h>
+
 #include "netns.h"
+#include "fail.h"
 
 static struct dentry *topdir;
 static struct dentry *rpc_clnt_dir;
 static struct dentry *rpc_xprt_dir;
 
-unsigned int rpc_inject_disconnect;
-
 static int
 tasks_show(struct seq_file *f, void *v)
 {
@@ -235,8 +235,6 @@ rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
        /* make tasks file */
        debugfs_create_file("info", S_IFREG | 0400, xprt->debugfs, xprt,
                            &xprt_info_fops);
-
-       atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
 }
 
 void
@@ -246,56 +244,30 @@ rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt)
        xprt->debugfs = NULL;
 }
 
-static int
-fault_open(struct inode *inode, struct file *filp)
-{
-       filp->private_data = kmalloc(128, GFP_KERNEL);
-       if (!filp->private_data)
-               return -ENOMEM;
-       return 0;
-}
+#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
+struct fail_sunrpc_attr fail_sunrpc = {
+       .attr                   = FAULT_ATTR_INITIALIZER,
+};
+EXPORT_SYMBOL_GPL(fail_sunrpc);
 
-static int
-fault_release(struct inode *inode, struct file *filp)
+static void fail_sunrpc_init(void)
 {
-       kfree(filp->private_data);
-       return 0;
-}
+       struct dentry *dir;
 
-static ssize_t
-fault_disconnect_read(struct file *filp, char __user *user_buf,
-                     size_t len, loff_t *offset)
-{
-       char *buffer = (char *)filp->private_data;
-       size_t size;
+       dir = fault_create_debugfs_attr("fail_sunrpc", NULL,
+                                       &fail_sunrpc.attr);
 
-       size = sprintf(buffer, "%u\n", rpc_inject_disconnect);
-       return simple_read_from_buffer(user_buf, len, offset, buffer, size);
-}
+       debugfs_create_bool("ignore-client-disconnect", S_IFREG | 0600, dir,
+                           &fail_sunrpc.ignore_client_disconnect);
 
-static ssize_t
-fault_disconnect_write(struct file *filp, const char __user *user_buf,
-                      size_t len, loff_t *offset)
+       debugfs_create_bool("ignore-server-disconnect", S_IFREG | 0600, dir,
+                           &fail_sunrpc.ignore_server_disconnect);
+}
+#else
+static void fail_sunrpc_init(void)
 {
-       char buffer[16];
-
-       if (len >= sizeof(buffer))
-               len = sizeof(buffer) - 1;
-       if (copy_from_user(buffer, user_buf, len))
-               return -EFAULT;
-       buffer[len] = '\0';
-       if (kstrtouint(buffer, 10, &rpc_inject_disconnect))
-               return -EINVAL;
-       return len;
 }
-
-static const struct file_operations fault_disconnect_fops = {
-       .owner          = THIS_MODULE,
-       .open           = fault_open,
-       .read           = fault_disconnect_read,
-       .write          = fault_disconnect_write,
-       .release        = fault_release,
-};
+#endif
 
 void __exit
 sunrpc_debugfs_exit(void)
@@ -309,16 +281,11 @@ sunrpc_debugfs_exit(void)
 void __init
 sunrpc_debugfs_init(void)
 {
-       struct dentry *rpc_fault_dir;
-
        topdir = debugfs_create_dir("sunrpc", NULL);
 
        rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
 
        rpc_xprt_dir = debugfs_create_dir("rpc_xprt", topdir);
 
-       rpc_fault_dir = debugfs_create_dir("inject_fault", topdir);
-
-       debugfs_create_file("disconnect", S_IFREG | 0400, rpc_fault_dir, NULL,
-                           &fault_disconnect_fops);
+       fail_sunrpc_init();
 }
diff --git a/net/sunrpc/fail.h b/net/sunrpc/fail.h
new file mode 100644 (file)
index 0000000..69dc30c
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021, Oracle. All rights reserved.
+ */
+
+#ifndef _NET_SUNRPC_FAIL_H_
+#define _NET_SUNRPC_FAIL_H_
+
+#include <linux/fault-inject.h>
+
+#if IS_ENABLED(CONFIG_FAULT_INJECTION)
+
+struct fail_sunrpc_attr {
+       struct fault_attr       attr;
+
+       bool                    ignore_client_disconnect;
+
+       bool                    ignore_server_disconnect;
+};
+
+extern struct fail_sunrpc_attr fail_sunrpc;
+
+#endif /* CONFIG_FAULT_INJECTION */
+
+#endif /* _NET_SUNRPC_FAIL_H_ */
index 0de918c..bfcbaf7 100644 (file)
@@ -31,6 +31,8 @@
 
 #include <trace/events/sunrpc.h>
 
+#include "fail.h"
+
 #define RPCDBG_FACILITY        RPCDBG_SVCDSP
 
 static void svc_unregister(const struct svc_serv *serv, struct net *net);
@@ -838,6 +840,27 @@ svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrser
 }
 EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
 
+/**
+ * svc_rqst_replace_page - Replace one page in rq_pages[]
+ * @rqstp: svc_rqst with pages to replace
+ * @page: replacement page
+ *
+ * When replacing a page in rq_pages, batch the release of the
+ * replaced pages to avoid hammering the page allocator.
+ */
+void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
+{
+       if (*rqstp->rq_next_page) {
+               if (!pagevec_space(&rqstp->rq_pvec))
+                       __pagevec_release(&rqstp->rq_pvec);
+               pagevec_add(&rqstp->rq_pvec, *rqstp->rq_next_page);
+       }
+
+       get_page(page);
+       *(rqstp->rq_next_page++) = page;
+}
+EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
+
 /*
  * Called from a server thread as it's exiting. Caller must hold the "service
  * mutex" for the service.
@@ -1503,6 +1526,12 @@ svc_process(struct svc_rqst *rqstp)
        struct svc_serv         *serv = rqstp->rq_server;
        u32                     dir;
 
+#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
+       if (!fail_sunrpc.ignore_server_disconnect &&
+           should_fail(&fail_sunrpc.attr, 1))
+               svc_xprt_deferred_close(rqstp->rq_xprt);
+#endif
+
        /*
         * Setup response xdr_buf.
         * Initially it has just one page
@@ -1629,6 +1658,21 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
 }
 EXPORT_SYMBOL_GPL(svc_max_payload);
 
+/**
+ * svc_proc_name - Return RPC procedure name in string form
+ * @rqstp: svc_rqst to operate on
+ *
+ * Return value:
+ *   Pointer to a NUL-terminated string
+ */
+const char *svc_proc_name(const struct svc_rqst *rqstp)
+{
+       if (rqstp && rqstp->rq_procinfo)
+               return rqstp->rq_procinfo->pc_name;
+       return "unknown";
+}
+
+
 /**
  * svc_encode_result_payload - mark a range of bytes as a result payload
  * @rqstp: svc_rqst to operate on
index d66a8e4..e1153cb 100644 (file)
@@ -539,6 +539,7 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
        kfree(rqstp->rq_deferred);
        rqstp->rq_deferred = NULL;
 
+       pagevec_release(&rqstp->rq_pvec);
        svc_free_res_pages(rqstp);
        rqstp->rq_res.page_len = 0;
        rqstp->rq_res.page_base = 0;
@@ -664,6 +665,8 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
        struct xdr_buf *arg = &rqstp->rq_arg;
        unsigned long pages, filled;
 
+       pagevec_init(&rqstp->rq_pvec);
+
        pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
        if (pages > RPCSVC_MAXPAGES) {
                pr_warn_once("svc: warning: pages=%lu > RPCSVC_MAXPAGES=%lu\n",
@@ -835,7 +838,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
                rqstp->rq_stime = ktime_get();
                rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
-       }
+       } else
+               svc_xprt_received(xprt);
 out:
        trace_svc_handle_xprt(xprt, len);
        return len;
index fb6db09..05abe34 100644 (file)
@@ -56,6 +56,7 @@
 
 #include "sunrpc.h"
 #include "sysfs.h"
+#include "fail.h"
 
 /*
  * Local variables
@@ -855,6 +856,19 @@ xprt_init_autodisconnect(struct timer_list *t)
        queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 }
 
+#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
+static void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+       if (!fail_sunrpc.ignore_client_disconnect &&
+           should_fail(&fail_sunrpc.attr, 1))
+               xprt->ops->inject_disconnect(xprt);
+}
+#else
+static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+}
+#endif
+
 bool xprt_lock_connect(struct rpc_xprt *xprt,
                struct rpc_task *task,
                void *cookie)
index 1e65144..e27433f 100644 (file)
@@ -35,6 +35,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
  * controlling svcxprt_rdma is destroyed.
  */
 struct svc_rdma_rw_ctxt {
+       struct llist_node       rw_node;
        struct list_head        rw_list;
        struct rdma_rw_ctx      rw_ctx;
        unsigned int            rw_nents;
@@ -53,19 +54,19 @@ static struct svc_rdma_rw_ctxt *
 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
 {
        struct svc_rdma_rw_ctxt *ctxt;
+       struct llist_node *node;
 
        spin_lock(&rdma->sc_rw_ctxt_lock);
-
-       ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
-       if (ctxt) {
-               list_del(&ctxt->rw_list);
-               spin_unlock(&rdma->sc_rw_ctxt_lock);
+       node = llist_del_first(&rdma->sc_rw_ctxts);
+       spin_unlock(&rdma->sc_rw_ctxt_lock);
+       if (node) {
+               ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
        } else {
-               spin_unlock(&rdma->sc_rw_ctxt_lock);
                ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
                               GFP_KERNEL);
                if (!ctxt)
                        goto out_noctx;
+
                INIT_LIST_HEAD(&ctxt->rw_list);
        }
 
@@ -83,14 +84,18 @@ out_noctx:
        return NULL;
 }
 
-static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
-                                struct svc_rdma_rw_ctxt *ctxt)
+static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
+                                  struct svc_rdma_rw_ctxt *ctxt,
+                                  struct llist_head *list)
 {
        sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
+       llist_add(&ctxt->rw_node, list);
+}
 
-       spin_lock(&rdma->sc_rw_ctxt_lock);
-       list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
-       spin_unlock(&rdma->sc_rw_ctxt_lock);
+static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
+                                struct svc_rdma_rw_ctxt *ctxt)
+{
+       __svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts);
 }
 
 /**
@@ -101,9 +106,10 @@ static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
 {
        struct svc_rdma_rw_ctxt *ctxt;
+       struct llist_node *node;
 
-       while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
-               list_del(&ctxt->rw_list);
+       while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) {
+               ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
                kfree(ctxt);
        }
 }
@@ -171,20 +177,35 @@ static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
        cc->cc_sqecount = 0;
 }
 
+/*
+ * The consumed rw_ctx's are cleaned and placed on a local llist so
+ * that only one atomic llist operation is needed to put them all
+ * back on the free list.
+ */
 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
                                enum dma_data_direction dir)
 {
        struct svcxprt_rdma *rdma = cc->cc_rdma;
+       struct llist_node *first, *last;
        struct svc_rdma_rw_ctxt *ctxt;
+       LLIST_HEAD(free);
 
+       first = last = NULL;
        while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
                list_del(&ctxt->rw_list);
 
                rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
                                    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
                                    ctxt->rw_nents, dir);
-               svc_rdma_put_rw_ctxt(rdma, ctxt);
+               __svc_rdma_put_rw_ctxt(rdma, ctxt, &free);
+
+               ctxt->rw_node.next = first;
+               first = &ctxt->rw_node;
+               if (!last)
+                       last = first;
        }
+       if (first)
+               llist_add_batch(first, last, &rdma->sc_rw_ctxts);
 }
 
 /* State for sending a Write or Reply chunk.
@@ -248,8 +269,7 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
 
        trace_svcrdma_wc_write(wc, &cc->cc_cid);
 
-       atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
+       svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
 
        if (unlikely(wc->status != IB_WC_SUCCESS))
                svc_xprt_deferred_close(&rdma->sc_xprt);
@@ -304,9 +324,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
 
        trace_svcrdma_wc_read(wc, &cc->cc_cid);
 
-       atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
-
+       svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
        cc->cc_status = wc->status;
        complete(&cc->cc_done);
        return;
index d6bbafb..599021b 100644 (file)
 
 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
 
-static inline struct svc_rdma_send_ctxt *
-svc_rdma_next_send_ctxt(struct list_head *list)
-{
-       return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
-                                       sc_list);
-}
-
 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
                                   struct rpc_rdma_cid *cid)
 {
@@ -182,9 +175,10 @@ fail0:
 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
 {
        struct svc_rdma_send_ctxt *ctxt;
+       struct llist_node *node;
 
-       while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
-               list_del(&ctxt->sc_list);
+       while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
+               ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
                ib_dma_unmap_single(rdma->sc_pd->device,
                                    ctxt->sc_sges[0].addr,
                                    rdma->sc_max_req_size,
@@ -204,12 +198,13 @@ void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
 {
        struct svc_rdma_send_ctxt *ctxt;
+       struct llist_node *node;
 
        spin_lock(&rdma->sc_send_lock);
-       ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
-       if (!ctxt)
+       node = llist_del_first(&rdma->sc_send_ctxts);
+       if (!node)
                goto out_empty;
-       list_del(&ctxt->sc_list);
+       ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
        spin_unlock(&rdma->sc_send_lock);
 
 out:
@@ -253,9 +248,21 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
                                             ctxt->sc_sges[i].length);
        }
 
-       spin_lock(&rdma->sc_send_lock);
-       list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
-       spin_unlock(&rdma->sc_send_lock);
+       llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
+}
+
+/**
+ * svc_rdma_wake_send_waiters - manage Send Queue accounting
+ * @rdma: controlling transport
+ * @avail: Number of additional SQEs that are now available
+ *
+ */
+void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
+{
+       atomic_add(avail, &rdma->sc_sq_avail);
+       smp_mb__after_atomic();
+       if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
+               wake_up(&rdma->sc_send_wait);
 }
 
 /**
@@ -275,11 +282,9 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
 
        trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
 
+       svc_rdma_wake_send_waiters(rdma, 1);
        complete(&ctxt->sc_done);
 
-       atomic_inc(&rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
-
        if (unlikely(wc->status != IB_WC_SUCCESS))
                svc_xprt_deferred_close(&rdma->sc_xprt);
 }
index d94b775..94b20fb 100644 (file)
@@ -136,9 +136,9 @@ static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
        svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
        INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
        INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
-       INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
+       init_llist_head(&cma_xprt->sc_send_ctxts);
        init_llist_head(&cma_xprt->sc_recv_ctxts);
-       INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
+       init_llist_head(&cma_xprt->sc_rw_ctxts);
        init_waitqueue_head(&cma_xprt->sc_send_wait);
 
        spin_lock_init(&cma_xprt->sc_lock);
@@ -545,7 +545,6 @@ static void __svc_rdma_free(struct work_struct *work)
 {
        struct svcxprt_rdma *rdma =
                container_of(work, struct svcxprt_rdma, sc_work);
-       struct svc_xprt *xprt = &rdma->sc_xprt;
 
        /* This blocks until the Completion Queues are empty */
        if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
@@ -553,12 +552,6 @@ static void __svc_rdma_free(struct work_struct *work)
 
        svc_rdma_flush_recv_queues(rdma);
 
-       /* Final put of backchannel client transport */
-       if (xprt->xpt_bc_xprt) {
-               xprt_put(xprt->xpt_bc_xprt);
-               xprt->xpt_bc_xprt = NULL;
-       }
-
        svc_rdma_destroy_rw_ctxts(rdma);
        svc_rdma_send_ctxts_destroy(rdma);
        svc_rdma_recv_ctxts_destroy(rdma);
index e573dce..b7dbdcb 100644 (file)
@@ -3149,24 +3149,6 @@ void cleanup_socket_xprt(void)
        xprt_unregister_transport(&xs_bc_tcp_transport);
 }
 
-static int param_set_uint_minmax(const char *val,
-               const struct kernel_param *kp,
-               unsigned int min, unsigned int max)
-{
-       unsigned int num;
-       int ret;
-
-       if (!val)
-               return -EINVAL;
-       ret = kstrtouint(val, 0, &num);
-       if (ret)
-               return ret;
-       if (num < min || num > max)
-               return -EINVAL;
-       *((unsigned int *)kp->arg) = num;
-       return 0;
-}
-
 static int param_set_portnr(const char *val, const struct kernel_param *kp)
 {
        return param_set_uint_minmax(val, kp,
index cf58684..1b7a487 100644 (file)
@@ -913,7 +913,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
        skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
                              dnode, l->addr, dport, 0, 0);
        if (!skb)
-               return -ENOMEM;
+               return -ENOBUFS;
        msg_set_dest_droppable(buf_msg(skb), true);
        TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
        skb_queue_tail(&l->wakeupq, skb);
@@ -1031,7 +1031,7 @@ void tipc_link_reset(struct tipc_link *l)
  *
  * Consumes the buffer chain.
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS or -ENOMEM
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  */
 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                   struct sk_buff_head *xmitq)
@@ -1089,7 +1089,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                        if (!_skb) {
                                kfree_skb(skb);
                                __skb_queue_purge(list);
-                               return -ENOMEM;
+                               return -ENOBUFS;
                        }
                        __skb_queue_tail(transmq, skb);
                        tipc_link_set_skb_retransmit_time(skb, l);
index 75b99b7..8754bd8 100644 (file)
@@ -1518,7 +1518,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
 
        if (unlikely(syn && !rc)) {
                tipc_set_sk_state(sk, TIPC_CONNECTING);
-               if (timeout) {
+               if (dlen && timeout) {
                        timeout = msecs_to_jiffies(timeout);
                        tipc_wait_for_connect(sock, &timeout);
                }
index e0c2c99..4f7c99d 100644 (file)
@@ -357,11 +357,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
 
 static void virtio_vsock_reset_sock(struct sock *sk)
 {
-       lock_sock(sk);
+       /* vmci_transport.c doesn't take sk_lock here either.  At least we're
+        * under vsock_table_lock so the sock cannot disappear while we're
+        * executing.
+        */
+
        sk->sk_state = TCP_CLOSE;
        sk->sk_err = ECONNRESET;
        sk_error_report(sk);
-       release_sock(sk);
 }
 
 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
index 169ba8b..081e7ae 100644 (file)
@@ -1079,6 +1079,9 @@ virtio_transport_recv_connected(struct sock *sk,
                virtio_transport_recv_enqueue(vsk, pkt);
                sk->sk_data_ready(sk);
                return err;
+       case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
+               virtio_transport_send_credit_update(vsk);
+               break;
        case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
                sk->sk_write_space(sk);
                break;
index a20aec9..2bf2693 100644 (file)
@@ -298,8 +298,16 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
        len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]);
 
        nla_for_each_attr(nla, attrs, len, remaining) {
-               int err = xfrm_xlate64_attr(dst, nla);
+               int err;
 
+               switch (type) {
+               case XFRM_MSG_NEWSPDINFO:
+                       err = xfrm_nla_cpy(dst, nla, nla_len(nla));
+                       break;
+               default:
+                       err = xfrm_xlate64_attr(dst, nla);
+                       break;
+               }
                if (err)
                        return err;
        }
@@ -341,7 +349,8 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
 
 /* Calculates len of translated 64-bit message. */
 static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
-                                           struct nlattr *attrs[XFRMA_MAX+1])
+                                           struct nlattr *attrs[XFRMA_MAX + 1],
+                                           int maxtype)
 {
        size_t len = nlmsg_len(src);
 
@@ -358,10 +367,20 @@ static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
        case XFRM_MSG_POLEXPIRE:
                len += 8;
                break;
+       case XFRM_MSG_NEWSPDINFO:
+               /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+               return len;
        default:
                break;
        }
 
+       /* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please
+        * correct both 64=>32-bit and 32=>64-bit translators to copy
+        * new attributes.
+        */
+       if (WARN_ON_ONCE(maxtype))
+               return len;
+
        if (attrs[XFRMA_SA])
                len += 4;
        if (attrs[XFRMA_POLICY])
@@ -440,7 +459,8 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
 
 static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
                        struct nlattr *attrs[XFRMA_MAX+1],
-                       size_t size, u8 type, struct netlink_ext_ack *extack)
+                       size_t size, u8 type, int maxtype,
+                       struct netlink_ext_ack *extack)
 {
        size_t pos;
        int i;
@@ -520,6 +540,25 @@ static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
        }
        pos = dst->nlmsg_len;
 
+       if (maxtype) {
+               /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+               WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO);
+
+               for (i = 1; i <= maxtype; i++) {
+                       int err;
+
+                       if (!attrs[i])
+                               continue;
+
+                       /* just copy - no need for translation */
+                       err = xfrm_attr_cpy32(dst, &pos, attrs[i], size,
+                                       nla_len(attrs[i]), nla_len(attrs[i]));
+                       if (err)
+                               return err;
+               }
+               return 0;
+       }
+
        for (i = 1; i < XFRMA_MAX + 1; i++) {
                int err;
 
@@ -564,7 +603,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
        if (err < 0)
                return ERR_PTR(err);
 
-       len = xfrm_user_rcv_calculate_len64(h32, attrs);
+       len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype);
        /* The message doesn't need translation */
        if (len == nlmsg_len(h32))
                return NULL;
@@ -574,7 +613,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
        if (!h64)
                return ERR_PTR(-ENOMEM);
 
-       err = xfrm_xlate32(h64, h32, attrs, len, type, extack);
+       err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack);
        if (err < 0) {
                kvfree(h64);
                return ERR_PTR(err);
index 2e8afe0..cb40ff0 100644 (file)
@@ -241,7 +241,7 @@ static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
                        break;
        }
 
-       WARN_ON(!pos);
+       WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
 
        if (--pos->users)
                return;
index 827d842..7f881f5 100644 (file)
@@ -155,7 +155,6 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
                                                __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
-static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
 
 static struct rhashtable xfrm_policy_inexact_table;
 static const struct rhashtable_params xfrm_pol_inexact_params;
@@ -585,7 +584,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
                return;
 
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
-       write_seqcount_begin(&xfrm_policy_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 
        odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
                                lockdep_is_held(&net->xfrm.xfrm_policy_lock));
@@ -596,7 +595,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
        rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
        net->xfrm.policy_bydst[dir].hmask = nhashmask;
 
-       write_seqcount_end(&xfrm_policy_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
        synchronize_rcu();
@@ -1245,7 +1244,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
        } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
 
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
-       write_seqcount_begin(&xfrm_policy_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 
        /* make sure that we can insert the indirect policies again before
         * we start with destructive action.
@@ -1354,7 +1353,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
 out_unlock:
        __xfrm_policy_inexact_flush(net);
-       write_seqcount_end(&xfrm_policy_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
        mutex_unlock(&hash_resize_mutex);
@@ -2091,15 +2090,12 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        if (unlikely(!daddr || !saddr))
                return NULL;
 
- retry:
-       sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
        rcu_read_lock();
-
-       chain = policy_hash_direct(net, daddr, saddr, family, dir);
-       if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
-               rcu_read_unlock();
-               goto retry;
-       }
+ retry:
+       do {
+               sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
+               chain = policy_hash_direct(net, daddr, saddr, family, dir);
+       } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
 
        ret = NULL;
        hlist_for_each_entry_rcu(pol, chain, bydst) {
@@ -2130,15 +2126,11 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        }
 
 skip_inexact:
-       if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
-               rcu_read_unlock();
+       if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
                goto retry;
-       }
 
-       if (ret && !xfrm_pol_hold_rcu(ret)) {
-               rcu_read_unlock();
+       if (ret && !xfrm_pol_hold_rcu(ret))
                goto retry;
-       }
 fail:
        rcu_read_unlock();
 
@@ -4089,6 +4081,7 @@ static int __net_init xfrm_net_init(struct net *net)
        /* Initialize the per-net locks here */
        spin_lock_init(&net->xfrm.xfrm_state_lock);
        spin_lock_init(&net->xfrm.xfrm_policy_lock);
+       seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
        mutex_init(&net->xfrm.xfrm_cfg_mutex);
 
        rv = xfrm_statistics_init(net);
@@ -4133,7 +4126,6 @@ void __init xfrm_init(void)
 {
        register_pernet_subsys(&xfrm_net_ops);
        xfrm_dev_init();
-       seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
        xfrm_input_init();
 
 #ifdef CONFIG_XFRM_ESPINTCP
index b47d613..7aff641 100644 (file)
@@ -2811,6 +2811,16 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = link->doit(skb, nlh, attrs);
 
+       /* We need to free skb allocated in xfrm_alloc_compat() before
+        * returning from this function, because consume_skb() won't take
+        * care of frag_list since netlink destructor sets
+        * sbk->head to NULL. (see netlink_skb_destructor())
+        */
+       if (skb_has_frag_list(skb)) {
+               kfree_skb(skb_shinfo(skb)->frag_list);
+               skb_shinfo(skb)->frag_list = NULL;
+       }
+
 err:
        kvfree(nlh64);
        return err;
index 13a35f7..e61471a 100644 (file)
  *         In most cases, the __assign_str() macro will take the same
  *         parameters as the __string() macro had to declare the string.
  *
+ *   __string_len: This is a helper to a __dynamic_array, but it understands
+ *        that the array has characters in it, and with the combined
+ *         use of __assign_str_len(), it will allocate 'len' + 1 bytes
+ *         in the ring buffer and add a '\0' to the string. This is
+ *         useful if the string being saved has no terminating '\0' byte.
+ *         It requires that the length of the string is known as it acts
+ *         like a memcpy().
+ *
+ *         Declared with:
+ *
+ *         __string_len(foo, bar, len)
+ *
+ *         To assign this string, use the helper macro __assign_str_len().
+ *
+ *         __assign_str(foo, bar, len);
+ *
+ *         Then len + 1 is allocated to the ring buffer, and a nul terminating
+ *         byte is added. This is similar to:
+ *
+ *         memcpy(__get_str(foo), bar, len);
+ *         __get_str(foo)[len] = 0;
+ *
+ *        The advantage of using this over __dynamic_array, is that it
+ *        takes care of allocating the extra byte on the ring buffer
+ *        for the '\0' terminating byte, and __get_str(foo) can be used
+ *        in the TP_printk().
+ *
  *   __bitmask: This is another kind of __dynamic_array, but it expects
  *         an array of longs, and the number of bits to parse. It takes
  *         two parameters (name, nr_bits), where name is the name of the
index 9c7fbd4..0e7bab3 100755 (executable)
@@ -14,9 +14,9 @@ if [ $? -ne 0 ]; then
 fi
 
 cat <<EOF |
-asm-generic/atomic-instrumented.h
-asm-generic/atomic-long.h
-linux/atomic-arch-fallback.h
+linux/atomic/atomic-instrumented.h
+linux/atomic/atomic-long.h
+linux/atomic/atomic-arch-fallback.h
 EOF
 while read header; do
        OLDSUM="$(tail -n 1 ${LINUXDIR}/include/${header})"
index 59c0052..ef76408 100755 (executable)
@@ -1,8 +1,8 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
+arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
 {
-       ${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
        __atomic_acquire_fence();
        return ret;
 }
index a66635b..15caa2e 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_add_negative - add and test if negative
+ * arch_${atomic}_add_negative - add and test if negative
  * @i: integer value to add
  * @v: pointer of type ${atomic}_t
  *
@@ -9,8 +9,8 @@ cat <<EOF
  * result is greater than or equal to zero.
  */
 static __always_inline bool
-${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
+arch_${atomic}_add_negative(${int} i, ${atomic}_t *v)
 {
-       return ${arch}${atomic}_add_return(i, v) < 0;
+       return arch_${atomic}_add_return(i, v) < 0;
 }
 EOF
index 2ff598a..9e5159c 100755 (executable)
@@ -1,6 +1,6 @@
 cat << EOF
 /**
- * ${arch}${atomic}_add_unless - add unless the number is already a given value
+ * arch_${atomic}_add_unless - add unless the number is already a given value
  * @v: pointer of type ${atomic}_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -9,8 +9,8 @@ cat << EOF
  * Returns true if the addition was done.
  */
 static __always_inline bool
-${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
 {
-       return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
+       return arch_${atomic}_fetch_add_unless(v, a, u) != u;
 }
 EOF
index 3f18663..5a42f54 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
+arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
+       ${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
 }
 EOF
index e2e01f0..8c144c8 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
+arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
+       ${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
 }
 EOF
index e8a5e49..8549f35 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_dec_and_test - decrement and test
+ * arch_${atomic}_dec_and_test - decrement and test
  * @v: pointer of type ${atomic}_t
  *
  * Atomically decrements @v by 1 and
@@ -8,8 +8,8 @@ cat <<EOF
  * cases.
  */
 static __always_inline bool
-${arch}${atomic}_dec_and_test(${atomic}_t *v)
+arch_${atomic}_dec_and_test(${atomic}_t *v)
 {
-       return ${arch}${atomic}_dec_return(v) == 0;
+       return arch_${atomic}_dec_return(v) == 0;
 }
 EOF
index 527adec..86bdced 100755 (executable)
@@ -1,14 +1,14 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_dec_if_positive(${atomic}_t *v)
+arch_${atomic}_dec_if_positive(${atomic}_t *v)
 {
-       ${int} dec, c = ${arch}${atomic}_read(v);
+       ${int} dec, c = arch_${atomic}_read(v);
 
        do {
                dec = c - 1;
                if (unlikely(dec < 0))
                        break;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
 
        return dec;
 }
index dcab684..c531d5a 100755 (executable)
@@ -1,13 +1,13 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
+arch_${atomic}_dec_unless_positive(${atomic}_t *v)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c > 0))
                        return false;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
 
        return true;
 }
index 3764fc8..07757d8 100755 (executable)
@@ -1,10 +1,10 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}(${params})
+arch_${atomic}_${pfx}${name}${sfx}(${params})
 {
        ${ret} ret;
        __atomic_pre_full_fence();
-       ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
        __atomic_post_full_fence();
        return ret;
 }
index 0e0b9ae..68ce13c 100755 (executable)
@@ -1,6 +1,6 @@
 cat << EOF
 /**
- * ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
+ * arch_${atomic}_fetch_add_unless - add unless the number is already a given value
  * @v: pointer of type ${atomic}_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -9,14 +9,14 @@ cat << EOF
  * Returns original value of @v
  */
 static __always_inline ${int}
-${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c == u))
                        break;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
 
        return c;
 }
index 15ec629..3c2c373 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
+arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
+       ${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
 }
 EOF
index cecc832..0cf23fe 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_inc_and_test - increment and test
+ * arch_${atomic}_inc_and_test - increment and test
  * @v: pointer of type ${atomic}_t
  *
  * Atomically increments @v by 1
@@ -8,8 +8,8 @@ cat <<EOF
  * other cases.
  */
 static __always_inline bool
-${arch}${atomic}_inc_and_test(${atomic}_t *v)
+arch_${atomic}_inc_and_test(${atomic}_t *v)
 {
-       return ${arch}${atomic}_inc_return(v) == 0;
+       return arch_${atomic}_inc_return(v) == 0;
 }
 EOF
index 50f2d4d..ed8a1f5 100755 (executable)
@@ -1,14 +1,14 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_inc_not_zero - increment unless the number is zero
+ * arch_${atomic}_inc_not_zero - increment unless the number is zero
  * @v: pointer of type ${atomic}_t
  *
  * Atomically increments @v by 1, if @v is non-zero.
  * Returns true if the increment was done.
  */
 static __always_inline bool
-${arch}${atomic}_inc_not_zero(${atomic}_t *v)
+arch_${atomic}_inc_not_zero(${atomic}_t *v)
 {
-       return ${arch}${atomic}_add_unless(v, 1, 0);
+       return arch_${atomic}_add_unless(v, 1, 0);
 }
 EOF
index 87629e0..95d8ce4 100755 (executable)
@@ -1,13 +1,13 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
+arch_${atomic}_inc_unless_negative(${atomic}_t *v)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c < 0))
                        return false;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
 
        return true;
 }
index 341a88d..803ba75 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_read_acquire(const ${atomic}_t *v)
+arch_${atomic}_read_acquire(const ${atomic}_t *v)
 {
        return smp_load_acquire(&(v)->counter);
 }
index f8906d5..b46feb5 100755 (executable)
@@ -1,8 +1,8 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
+arch_${atomic}_${pfx}${name}${sfx}_release(${params})
 {
        __atomic_release_fence();
-       ${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
 }
 EOF
index 7606827..86ede75 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 static __always_inline void
-${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
+arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
 {
        smp_store_release(&(v)->counter, i);
 }
index c580f4c..260f373 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_sub_and_test - subtract value from variable and test result
+ * arch_${atomic}_sub_and_test - subtract value from variable and test result
  * @i: integer value to subtract
  * @v: pointer of type ${atomic}_t
  *
@@ -9,8 +9,8 @@ cat <<EOF
  * other cases.
  */
 static __always_inline bool
-${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
+arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
 {
-       return ${arch}${atomic}_sub_return(i, v) == 0;
+       return arch_${atomic}_sub_return(i, v) == 0;
 }
 EOF
index 06db0f7..890f850 100755 (executable)
@@ -1,9 +1,9 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
+arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
 {
        ${int} r, o = *old;
-       r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
+       r = arch_${atomic}_cmpxchg${order}(v, o, new);
        if (unlikely(r != o))
                *old = r;
        return likely(r == o);
index 317a6ce..8e2da71 100755 (executable)
@@ -2,11 +2,10 @@
 # SPDX-License-Identifier: GPL-2.0
 
 ATOMICDIR=$(dirname $0)
-ARCH=$2
 
 . ${ATOMICDIR}/atomic-tbl.sh
 
-#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
+#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
 gen_template_fallback()
 {
        local template="$1"; shift
@@ -15,11 +14,10 @@ gen_template_fallback()
        local name="$1"; shift
        local sfx="$1"; shift
        local order="$1"; shift
-       local arch="$1"; shift
        local atomic="$1"; shift
        local int="$1"; shift
 
-       local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"
+       local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
 
        local ret="$(gen_ret_type "${meta}" "${int}")"
        local retstmt="$(gen_ret_stmt "${meta}")"
@@ -34,7 +32,7 @@ gen_template_fallback()
        fi
 }
 
-#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
+#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
 gen_proto_fallback()
 {
        local meta="$1"; shift
@@ -65,44 +63,26 @@ gen_proto_order_variant()
        local name="$1"; shift
        local sfx="$1"; shift
        local order="$1"; shift
-       local arch="$1"
-       local atomic="$2"
+       local atomic="$1"
 
-       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+       local basename="arch_${atomic}_${pfx}${name}${sfx}"
 
-       printf "#define arch_${basename}${order} ${basename}${order}\n"
+       printf "#define ${basename}${order} ${basename}${order}\n"
 }
 
-#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
+#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
 gen_proto_order_variants()
 {
        local meta="$1"; shift
        local pfx="$1"; shift
        local name="$1"; shift
        local sfx="$1"; shift
-       local arch="$1"
-       local atomic="$2"
+       local atomic="$1"
 
-       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+       local basename="arch_${atomic}_${pfx}${name}${sfx}"
 
        local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
 
-       if [ -z "$arch" ]; then
-               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
-
-               if meta_has_acquire "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
-               fi
-               if meta_has_release "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
-               fi
-               if meta_has_relaxed "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
-               fi
-
-               echo ""
-       fi
-
        # If we don't have relaxed atomics, then we don't bother with ordering fallbacks
        # read_acquire and set_release need to be templated, though
        if ! meta_has_relaxed "${meta}"; then
@@ -128,7 +108,7 @@ gen_proto_order_variants()
        gen_basic_fallbacks "${basename}"
 
        if [ ! -z "${template}" ]; then
-               printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n"
+               printf "#endif /* ${basename} */\n\n"
                gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
                gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
                gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
@@ -187,38 +167,38 @@ gen_try_cmpxchg_fallback()
        local order="$1"; shift;
 
 cat <<EOF
-#ifndef ${ARCH}try_cmpxchg${order}
-#define ${ARCH}try_cmpxchg${order}(_ptr, _oldp, _new) \\
+#ifndef arch_try_cmpxchg${order}
+#define arch_try_cmpxchg${order}(_ptr, _oldp, _new) \\
 ({ \\
        typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
-       ___r = ${ARCH}cmpxchg${order}((_ptr), ___o, (_new)); \\
+       ___r = arch_cmpxchg${order}((_ptr), ___o, (_new)); \\
        if (unlikely(___r != ___o)) \\
                *___op = ___r; \\
        likely(___r == ___o); \\
 })
-#endif /* ${ARCH}try_cmpxchg${order} */
+#endif /* arch_try_cmpxchg${order} */
 
 EOF
 }
 
 gen_try_cmpxchg_fallbacks()
 {
-       printf "#ifndef ${ARCH}try_cmpxchg_relaxed\n"
-       printf "#ifdef ${ARCH}try_cmpxchg\n"
+       printf "#ifndef arch_try_cmpxchg_relaxed\n"
+       printf "#ifdef arch_try_cmpxchg\n"
 
-       gen_basic_fallbacks "${ARCH}try_cmpxchg"
+       gen_basic_fallbacks "arch_try_cmpxchg"
 
-       printf "#endif /* ${ARCH}try_cmpxchg */\n\n"
+       printf "#endif /* arch_try_cmpxchg */\n\n"
 
        for order in "" "_acquire" "_release" "_relaxed"; do
                gen_try_cmpxchg_fallback "${order}"
        done
 
-       printf "#else /* ${ARCH}try_cmpxchg_relaxed */\n"
+       printf "#else /* arch_try_cmpxchg_relaxed */\n"
 
-       gen_order_fallbacks "${ARCH}try_cmpxchg"
+       gen_order_fallbacks "arch_try_cmpxchg"
 
-       printf "#endif /* ${ARCH}try_cmpxchg_relaxed */\n\n"
+       printf "#endif /* arch_try_cmpxchg_relaxed */\n\n"
 }
 
 cat << EOF
@@ -234,14 +214,14 @@ cat << EOF
 
 EOF
 
-for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
+for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do
        gen_xchg_fallbacks "${xchg}"
 done
 
 gen_try_cmpxchg_fallbacks
 
 grep '^[a-z]' "$1" | while read name meta args; do
-       gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
+       gen_proto "${meta}" "${name}" "atomic" "int" ${args}
 done
 
 cat <<EOF
@@ -252,7 +232,7 @@ cat <<EOF
 EOF
 
 grep '^[a-z]' "$1" | while read name meta args; do
-       gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
+       gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
 done
 
 cat <<EOF
index b0c45ae..035ceb4 100755 (executable)
@@ -121,8 +121,8 @@ cat << EOF
  * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
  * double instrumentation.
  */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
 
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
@@ -138,6 +138,11 @@ grep '^[a-z]' "$1" | while read name meta args; do
        gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
 done
 
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic_long" "long" ${args}
+done
+
+
 for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
        for order in "" "_acquire" "_release" "_relaxed"; do
                gen_xchg "${xchg}${order}" ""
@@ -158,5 +163,5 @@ gen_xchg "cmpxchg_double_local" "2 * "
 
 cat <<EOF
 
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
 EOF
index e318d3f..eda89ce 100755 (executable)
@@ -47,9 +47,9 @@ gen_proto_order_variant()
 
 cat <<EOF
 static __always_inline ${ret}
-atomic_long_${name}(${params})
+arch_atomic_long_${name}(${params})
 {
-       ${retstmt}${atomic}_${name}(${argscast});
+       ${retstmt}arch_${atomic}_${name}(${argscast});
 }
 
 EOF
@@ -61,8 +61,8 @@ cat << EOF
 // Generated by $0
 // DO NOT MODIFY THIS FILE DIRECTLY
 
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
 
 #include <linux/compiler.h>
 #include <asm/types.h>
@@ -98,5 +98,5 @@ done
 
 cat <<EOF
 #endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+#endif /* _LINUX_ATOMIC_LONG_H */
 EOF
index f776a57..5b98a83 100755 (executable)
@@ -8,9 +8,9 @@ ATOMICTBL=${ATOMICDIR}/atomics.tbl
 LINUXDIR=${ATOMICDIR}/../..
 
 cat <<EOF |
-gen-atomic-instrumented.sh      asm-generic/atomic-instrumented.h
-gen-atomic-long.sh              asm-generic/atomic-long.h
-gen-atomic-fallback.sh          linux/atomic-arch-fallback.h           arch_
+gen-atomic-instrumented.sh      linux/atomic/atomic-instrumented.h
+gen-atomic-long.sh              linux/atomic/atomic-long.h
+gen-atomic-fallback.sh          linux/atomic/atomic-arch-fallback.h
 EOF
 while read script header args; do
        /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
index f67b125..94cd49e 100755 (executable)
@@ -1,10 +1,10 @@
 #! /usr/bin/env perl
 # SPDX-License-Identifier: GPL-2.0
 #
-# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION
-# without including <linux/version.h>, or cases of
-# including <linux/version.h> that don't need it.
-# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net>
+# checkversion finds uses of all macros in <linux/version.h>
+# where the source files do not #include <linux/version.h>; or cases
+# of including <linux/version.h> where it is not needed.
+# Copyright (C) 2003, Randy Dunlap <rdunlap@infradead.org>
 
 use strict;
 
@@ -13,7 +13,8 @@ $| = 1;
 my $debugging;
 
 foreach my $file (@ARGV) {
-    next if $file =~ "include/linux/version\.h";
+    next if $file =~ "include/generated/uapi/linux/version\.h";
+    next if $file =~ "usr/include/linux/version\.h";
     # Open this file.
     open( my $f, '<', $file )
       or die "Can't open $file: $!\n";
@@ -41,8 +42,11 @@ foreach my $file (@ARGV) {
            $iLinuxVersion      = $. if m/^\s*#\s*include\s*<linux\/version\.h>/o;
        }
 
-       # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE
-       if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) {
+       # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION,
+       # LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL
+       if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) ||
+           ($_ =~ /LINUX_VERSION_MAJOR/) || ($_ =~ /LINUX_VERSION_PATCHLEVEL/) ||
+           ($_ =~ /LINUX_VERSION_SUBLEVEL/)) {
            $fUseVersion = 1;
             last if $iLinuxVersion;
         }
index c17e480..8f6b13a 100755 (executable)
@@ -173,39 +173,6 @@ my $mcount_regex;  # Find the call site to mcount (return offset)
 my $mcount_adjust;     # Address adjustment to mcount offset
 my $alignment;         # The .align value to use for $mcount_section
 my $section_type;      # Section header plus possible alignment command
-my $can_use_local = 0;         # If we can use local function references
-
-# Shut up recordmcount if user has older objcopy
-my $quiet_recordmcount = ".tmp_quiet_recordmcount";
-my $print_warning = 1;
-$print_warning = 0 if ( -f $quiet_recordmcount);
-
-##
-# check_objcopy - whether objcopy supports --globalize-symbols
-#
-#  --globalize-symbols came out in 2.17, we must test the version
-#  of objcopy, and if it is less than 2.17, then we can not
-#  record local functions.
-sub check_objcopy
-{
-    open (IN, "$objcopy --version |") or die "error running $objcopy";
-    while (<IN>) {
-       if (/objcopy.*\s(\d+)\.(\d+)/) {
-           $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17));
-           last;
-       }
-    }
-    close (IN);
-
-    if (!$can_use_local && $print_warning) {
-       print STDERR "WARNING: could not find objcopy version or version " .
-           "is less than 2.17.\n" .
-           "\tLocal function references are disabled.\n";
-       open (QUIET, ">$quiet_recordmcount");
-       printf QUIET "Disables the warning from recordmcount.pl\n";
-       close QUIET;
-    }
-}
 
 if ($arch =~ /(x86(_64)?)|(i386)/) {
     if ($bits == 64) {
@@ -434,8 +401,6 @@ if ($filename =~ m,^(.*)(\.\S),) {
 my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
 my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
 
-check_objcopy();
-
 #
 # Step 1: find all the local (static functions) and weak symbols.
 #         't' is local, 'w/W' is weak
@@ -473,11 +438,6 @@ sub update_funcs
 
     # is this function static? If so, note this fact.
     if (defined $locals{$ref_func}) {
-
-       # only use locals if objcopy supports globalize-symbols
-       if (!$can_use_local) {
-           return;
-       }
        $convert{$ref_func} = 1;
     }
 
index 74f8aad..7011fbe 100755 (executable)
@@ -17,7 +17,7 @@ Usage:
        $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
        Wait some times but not too much, the script is a bit slow.
        Break the pipe (Ctrl + Z)
-       $ scripts/draw_functrace.py < raw_trace_func > draw_functrace
+       $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace
        Then you have your drawn trace in draw_functrace
 """
 
@@ -103,10 +103,10 @@ def parseLine(line):
        line = line.strip()
        if line.startswith("#"):
                raise CommentLineException
-       m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
+       m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line)
        if m is None:
                raise BrokenLineException
-       return (m.group(1), m.group(2), m.group(3))
+       return (m.group(2), m.group(3), m.group(4))
 
 
 def main():
index 18f01f3..d98260f 100644 (file)
@@ -55,7 +55,7 @@ int __init parse_efi_signature_list(
                memcpy(&list, data, sizeof(list));
                pr_devel("LIST[%04x] guid=%pUl ls=%x hs=%x ss=%x\n",
                         offs,
-                        list.signature_type.b, list.signature_list_size,
+                        &list.signature_type, list.signature_list_size,
                         list.signature_header_size, list.signature_size);
 
                lsize = list.signature_list_size;
index 09533cb..9ffa9e9 100644 (file)
@@ -58,10 +58,11 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
        [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
        [LOCKDOWN_DEBUGFS] = "debugfs access",
        [LOCKDOWN_XMON_WR] = "xmon write access",
+       [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
        [LOCKDOWN_INTEGRITY_MAX] = "integrity",
        [LOCKDOWN_KCORE] = "/proc/kcore access",
        [LOCKDOWN_KPROBES] = "use of kprobes",
-       [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+       [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
        [LOCKDOWN_PERF] = "unsafe use of perf",
        [LOCKDOWN_TRACEFS] = "use of tracefs",
        [LOCKDOWN_XMON_RW] = "xmon read and write access",
index b0032c4..9e84e66 100644 (file)
@@ -3325,6 +3325,8 @@ static int selinux_inode_setxattr(struct user_namespace *mnt_userns,
                        }
                        ab = audit_log_start(audit_context(),
                                             GFP_ATOMIC, AUDIT_SELINUX_ERR);
+                       if (!ab)
+                               return rc;
                        audit_log_format(ab, "op=setxattr invalid_context=");
                        audit_log_n_untrustedstring(ab, value, audit_size);
                        audit_log_end(ab);
@@ -6552,6 +6554,8 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
                                ab = audit_log_start(audit_context(),
                                                     GFP_ATOMIC,
                                                     AUDIT_SELINUX_ERR);
+                               if (!ab)
+                                       return error;
                                audit_log_format(ab, "op=fscreate invalid_context=");
                                audit_log_n_untrustedstring(ab, value, audit_size);
                                audit_log_end(ab);
index defc5ef..0ae1b71 100644 (file)
@@ -874,7 +874,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
        rc = sidtab_init(s);
        if (rc) {
                pr_err("SELinux:  out of memory on SID table init\n");
-               goto out;
+               return rc;
        }
 
        head = p->ocontexts[OCON_ISID];
@@ -885,7 +885,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
                if (sid == SECSID_NULL) {
                        pr_err("SELinux:  SID 0 was assigned a context.\n");
                        sidtab_destroy(s);
-                       goto out;
+                       return -EINVAL;
                }
 
                /* Ignore initial SIDs unused by this kernel. */
@@ -897,12 +897,10 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
                        pr_err("SELinux:  unable to load initial SID %s.\n",
                               name);
                        sidtab_destroy(s);
-                       goto out;
+                       return rc;
                }
        }
-       rc = 0;
-out:
-       return rc;
+       return 0;
 }
 
 int policydb_class_isvalid(struct policydb *p, unsigned int class)
index d84c77f..e5f1b27 100644 (file)
@@ -1673,6 +1673,8 @@ static int compute_sid_handle_invalid_context(
        if (context_struct_to_string(policydb, newcontext, &n, &nlen))
                goto out;
        ab = audit_log_start(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR);
+       if (!ab)
+               goto out;
        audit_log_format(ab,
                         "op=security_compute_sid invalid_context=");
        /* no need to record the NUL with untrusted strings */
index c3cfbdf..99c3422 100644 (file)
@@ -302,7 +302,7 @@ int smack_populate_secattr(struct smack_known *skp);
 /*
  * Shared data.
  */
-extern int smack_enabled;
+extern int smack_enabled __initdata;
 extern int smack_cipso_direct;
 extern int smack_cipso_mapped;
 extern struct smack_known *smack_net_ambient;
index 1f391f6..d2186e2 100644 (file)
@@ -81,23 +81,22 @@ int log_policy = SMACK_AUDIT_DENIED;
 int smk_access_entry(char *subject_label, char *object_label,
                        struct list_head *rule_list)
 {
-       int may = -ENOENT;
        struct smack_rule *srp;
 
        list_for_each_entry_rcu(srp, rule_list, list) {
                if (srp->smk_object->smk_known == object_label &&
                    srp->smk_subject->smk_known == subject_label) {
-                       may = srp->smk_access;
-                       break;
+                       int may = srp->smk_access;
+                       /*
+                        * MAY_WRITE implies MAY_LOCK.
+                        */
+                       if ((may & MAY_WRITE) == MAY_WRITE)
+                               may |= MAY_LOCK;
+                       return may;
                }
        }
 
-       /*
-        * MAY_WRITE implies MAY_LOCK.
-        */
-       if ((may & MAY_WRITE) == MAY_WRITE)
-               may |= MAY_LOCK;
-       return may;
+       return -ENOENT;
 }
 
 /**
index 223a6da..cacbe75 100644 (file)
@@ -54,7 +54,7 @@
 static DEFINE_MUTEX(smack_ipv6_lock);
 static LIST_HEAD(smk_ipv6_port_list);
 struct kmem_cache *smack_rule_cache;
-int smack_enabled;
+int smack_enabled __initdata;
 
 #define A(s) {"smack"#s, sizeof("smack"#s) - 1, Opt_##s}
 static struct {
index 83b79ed..439a358 100644 (file)
@@ -215,7 +215,7 @@ static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
                                   struct vm_area_struct *area)
 {
        return remap_pfn_range(area, area->vm_start,
-                              dmab->addr >> PAGE_SHIFT,
+                              page_to_pfn(virt_to_page(dmab->area)),
                               area->vm_end - area->vm_start,
                               area->vm_page_prot);
 }
index 6a2971a..71323d8 100644 (file)
@@ -246,12 +246,15 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
                return false;
 
-       if (substream->ops->mmap)
+       if (substream->ops->mmap || substream->ops->page)
                return true;
 
        switch (substream->dma_buffer.dev.type) {
        case SNDRV_DMA_TYPE_UNKNOWN:
-               return false;
+               /* we can't know the device, so just assume that the driver does
+                * everything right
+                */
+               return true;
        case SNDRV_DMA_TYPE_CONTINUOUS:
        case SNDRV_DMA_TYPE_VMALLOC:
                return true;
index b9c2ce2..84d7863 100644 (file)
@@ -514,10 +514,11 @@ static int check_and_subscribe_port(struct snd_seq_client *client,
        return err;
 }
 
-static void delete_and_unsubscribe_port(struct snd_seq_client *client,
-                                       struct snd_seq_client_port *port,
-                                       struct snd_seq_subscribers *subs,
-                                       bool is_src, bool ack)
+/* called with grp->list_mutex held */
+static void __delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                         struct snd_seq_client_port *port,
+                                         struct snd_seq_subscribers *subs,
+                                         bool is_src, bool ack)
 {
        struct snd_seq_port_subs_info *grp;
        struct list_head *list;
@@ -525,7 +526,6 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
 
        grp = is_src ? &port->c_src : &port->c_dest;
        list = is_src ? &subs->src_list : &subs->dest_list;
-       down_write(&grp->list_mutex);
        write_lock_irq(&grp->list_lock);
        empty = list_empty(list);
        if (!empty)
@@ -535,6 +535,18 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
 
        if (!empty)
                unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                       struct snd_seq_client_port *port,
+                                       struct snd_seq_subscribers *subs,
+                                       bool is_src, bool ack)
+{
+       struct snd_seq_port_subs_info *grp;
+
+       grp = is_src ? &port->c_src : &port->c_dest;
+       down_write(&grp->list_mutex);
+       __delete_and_unsubscribe_port(client, port, subs, is_src, ack);
        up_write(&grp->list_mutex);
 }
 
@@ -590,27 +602,30 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
                            struct snd_seq_client_port *dest_port,
                            struct snd_seq_port_subscribe *info)
 {
-       struct snd_seq_port_subs_info *src = &src_port->c_src;
+       struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
        struct snd_seq_subscribers *subs;
        int err = -ENOENT;
 
-       down_write(&src->list_mutex);
+       /* always start from deleting the dest port for avoiding concurrent
+        * deletions
+        */
+       down_write(&dest->list_mutex);
        /* look for the connection */
-       list_for_each_entry(subs, &src->list_head, src_list) {
+       list_for_each_entry(subs, &dest->list_head, dest_list) {
                if (match_subs_info(info, &subs->info)) {
-                       atomic_dec(&subs->ref_count); /* mark as not ready */
+                       __delete_and_unsubscribe_port(dest_client, dest_port,
+                                                     subs, false,
+                                                     connector->number != dest_client->number);
                        err = 0;
                        break;
                }
        }
-       up_write(&src->list_mutex);
+       up_write(&dest->list_mutex);
        if (err < 0)
                return err;
 
        delete_and_unsubscribe_port(src_client, src_port, subs, true,
                                    connector->number != src_client->number);
-       delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
-                                   connector->number != dest_client->number);
        kfree(subs);
        return 0;
 }
index 0ef242f..fff18b5 100644 (file)
@@ -153,7 +153,7 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
        struct cmp_connection *conn;
        enum cmp_direction c_dir;
        enum amdtp_stream_direction s_dir;
-       unsigned int flags = CIP_UNAWARE_SYT;
+       unsigned int flags = 0;
        int err;
 
        if (!(oxfw->quirks & SND_OXFW_QUIRK_BLOCKING_TRANSMISSION))
@@ -161,6 +161,13 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
        else
                flags |= CIP_BLOCKING;
 
+       // OXFW 970/971 has no function to generate playback timing according to the sequence
+       // of value in syt field, thus the packet should include NO_INFO value in the field.
+       // However, some models just ignore data blocks in packet with NO_INFO for audio data
+       // processing.
+       if (!(oxfw->quirks & SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET))
+               flags |= CIP_UNAWARE_SYT;
+
        if (stream == &oxfw->tx_stream) {
                conn = &oxfw->out_conn;
                c_dir = CMP_OUTPUT;
index 84971d7..cb5b5e3 100644 (file)
@@ -159,8 +159,10 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
                return snd_oxfw_scs1x_add(oxfw);
        }
 
-       if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW)
-               oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+       if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) {
+               oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION |
+                               SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET;
+       }
 
        /*
         * TASCAM FireOne has physical control and requires a pair of additional
index ee47abc..c13034f 100644 (file)
@@ -42,6 +42,11 @@ enum snd_oxfw_quirk {
        SND_OXFW_QUIRK_BLOCKING_TRANSMISSION = 0x04,
        // Stanton SCS1.d and SCS1.m support unique transaction.
        SND_OXFW_QUIRK_SCS_TRANSACTION = 0x08,
+       // Apogee Duet FireWire ignores data blocks in packet with NO_INFO for audio data
+       // processing, while output level meter moves. Any value in syt field of packet takes
+       // the device to process audio data even if the value is invalid in a point of
+       // IEC 61883-1/6.
+       SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET = 0x10,
 };
 
 /* This is an arbitrary number for convinience. */
index e97d005..481d8f8 100644 (file)
@@ -3460,7 +3460,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
        struct hda_gen_spec *spec = codec->spec;
        const struct hda_input_mux *imux;
        struct nid_path *path;
-       int i, adc_idx, err = 0;
+       int i, adc_idx, ret, err = 0;
 
        imux = &spec->input_mux;
        adc_idx = kcontrol->id.index;
@@ -3470,9 +3470,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
                if (!path || !path->ctls[type])
                        continue;
                kcontrol->private_value = path->ctls[type];
-               err = func(kcontrol, ucontrol);
-               if (err < 0)
+               ret = func(kcontrol, ucontrol);
+               if (ret < 0) {
+                       err = ret;
                        break;
+               }
+               if (ret > 0)
+                       err = 1;
        }
        mutex_unlock(&codec->control_mutex);
        if (err >= 0 && spec->cap_sync_hook)
index 0322b28..0062c18 100644 (file)
@@ -883,10 +883,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
        return azx_get_pos_posbuf(chip, azx_dev);
 }
 
-static void azx_shutdown_chip(struct azx *chip)
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
 {
        azx_stop_chip(chip);
-       azx_enter_link_reset(chip);
+       if (!skip_link_reset)
+               azx_enter_link_reset(chip);
        azx_clear_irq_pending(chip);
        display_power(chip, false);
 }
@@ -895,6 +896,11 @@ static void azx_shutdown_chip(struct azx *chip)
 static DEFINE_MUTEX(card_list_lock);
 static LIST_HEAD(card_list);
 
+static void azx_shutdown_chip(struct azx *chip)
+{
+       __azx_shutdown_chip(chip, false);
+}
+
 static void azx_add_card_list(struct azx *chip)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -2385,7 +2391,7 @@ static void azx_shutdown(struct pci_dev *pci)
                return;
        chip = card->private_data;
        if (chip && chip->running)
-               azx_shutdown_chip(chip);
+               __azx_shutdown_chip(chip, true);
 }
 
 /* PCI IDs */
index caaf0e8..7ad689f 100644 (file)
@@ -6658,6 +6658,7 @@ enum {
        ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
        ALC623_FIXUP_LENOVO_THINKSTATION_P340,
        ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
+       ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8242,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
        },
+       [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8274,9 +8281,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
@@ -8330,6 +8339,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8429,13 +8439,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+       SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
-       SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
@@ -8463,6 +8474,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+       SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
index a5c1a2c..773a136 100644 (file)
@@ -1041,6 +1041,7 @@ static const struct hda_fixup via_fixups[] = {
 };
 
 static const struct snd_pci_quirk vt2002p_fixups[] = {
+       SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE),
        SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
index 8a13462..5dcf77a 100644 (file)
@@ -36,6 +36,7 @@ config SND_SOC_COMPRESS
 
 config SND_SOC_TOPOLOGY
        bool
+       select SND_DYNAMIC_MINORS
 
 config SND_SOC_TOPOLOGY_KUNIT_TEST
        tristate "KUnit tests for SoC topology"
index 9449fb4..3c60c5f 100644 (file)
@@ -525,6 +525,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                                | SND_SOC_DAIFMT_CBM_CFM,
                .init = cz_da7219_init,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_da7219_play_ops,
                SND_SOC_DAILINK_REG(designware1, dlgs, platform),
        },
@@ -534,6 +535,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_da7219_cap_ops,
                SND_SOC_DAILINK_REG(designware2, dlgs, platform),
        },
@@ -543,6 +545,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_max_play_ops,
                SND_SOC_DAILINK_REG(designware3, mx, platform),
        },
@@ -553,6 +556,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_dmic0_cap_ops,
                SND_SOC_DAILINK_REG(designware3, adau, platform),
        },
@@ -563,6 +567,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_dmic1_cap_ops,
                SND_SOC_DAILINK_REG(designware2, adau, platform),
        },
index 143155a..cc1ce6f 100644 (file)
@@ -969,7 +969,7 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
 
        acp_set_sram_bank_state(rtd->acp_mmio, 0, true);
        /* Save for runtime private data */
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = runtime->dma_addr;
        rtd->order = get_order(size);
 
        /* Fill the page table entries in ACP SRAM */
index 8148b0d..597d7c4 100644 (file)
@@ -286,7 +286,7 @@ static int acp3x_dma_hw_params(struct snd_soc_component *component,
                pr_err("pinfo failed\n");
        }
        size = params_buffer_bytes(params);
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = substream->runtime->dma_addr;
        rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
        config_acp3x_dma(rtd, substream->stream);
        return 0;
index bd20622..0391c28 100644 (file)
@@ -242,7 +242,7 @@ static int acp_pdm_dma_hw_params(struct snd_soc_component *component,
                return -EINVAL;
        size = params_buffer_bytes(params);
        period_bytes = params_period_bytes(params);
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = substream->runtime->dma_addr;
        rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
        config_acp_dma(rtd, substream->stream);
        init_pdm_ring_buffer(MEM_WINDOW_START, size, period_bytes,
index 19438da..7b8040e 100644 (file)
@@ -382,6 +382,8 @@ static const struct dev_pm_ops rn_acp_pm = {
        .runtime_resume =  snd_rn_acp_resume,
        .suspend = snd_rn_acp_suspend,
        .resume =       snd_rn_acp_resume,
+       .restore =      snd_rn_acp_resume,
+       .poweroff =     snd_rn_acp_suspend,
 };
 
 static void snd_rn_acp_remove(struct pci_dev *pci)
index a3b784e..db16071 100644 (file)
@@ -1559,6 +1559,7 @@ config SND_SOC_WCD934X
 config SND_SOC_WCD938X
        depends on SND_SOC_WCD938X_SDW
        tristate
+       depends on SOUNDWIRE || !SOUNDWIRE
 
 config SND_SOC_WCD938X_SDW
        tristate "WCD9380/WCD9385 Codec - SDW"
index de8b83d..7bb38c3 100644 (file)
@@ -583,7 +583,10 @@ obj-$(CONFIG_SND_SOC_WCD_MBHC)     += snd-soc-wcd-mbhc.o
 obj-$(CONFIG_SND_SOC_WCD9335)  += snd-soc-wcd9335.o
 obj-$(CONFIG_SND_SOC_WCD934X)  += snd-soc-wcd934x.o
 obj-$(CONFIG_SND_SOC_WCD938X)  += snd-soc-wcd938x.o
-obj-$(CONFIG_SND_SOC_WCD938X_SDW) += snd-soc-wcd938x-sdw.o
+ifdef CONFIG_SND_SOC_WCD938X_SDW
+# avoid link failure by forcing sdw code built-in when needed
+obj-$(CONFIG_SND_SOC_WCD938X) += snd-soc-wcd938x-sdw.o
+endif
 obj-$(CONFIG_SND_SOC_WL1273)   += snd-soc-wl1273.o
 obj-$(CONFIG_SND_SOC_WM0010)   += snd-soc-wm0010.o
 obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
index eff013f..99c022b 100644 (file)
@@ -405,7 +405,7 @@ static const struct regmap_config cs42l42_regmap = {
        .use_single_write = true,
 };
 
-static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true);
 static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
 
 static const char * const cs42l42_hpf_freq_text[] = {
@@ -425,34 +425,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
                            CS42L42_ADC_WNF_CF_SHIFT,
                            cs42l42_wnf3_freq_text);
 
-static const char * const cs42l42_wnf05_freq_text[] = {
-       "280Hz", "315Hz", "350Hz", "385Hz",
-       "420Hz", "455Hz", "490Hz", "525Hz"
-};
-
-static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
-                           CS42L42_ADC_WNF_CF_SHIFT,
-                           cs42l42_wnf05_freq_text);
-
 static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
        /* ADC Volume and Filter Controls */
        SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
-                               CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
+                               CS42L42_ADC_NOTCH_DIS_SHIFT, true, true),
        SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
        SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_INV_SHIFT, true, false),
        SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
-       SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
-                               CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
+       SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv),
        SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
                                CS42L42_ADC_WNF_EN_SHIFT, true, false),
        SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
                                CS42L42_ADC_HPF_EN_SHIFT, true, false),
        SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
        SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
-       SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),
 
        /* DAC Volume and Filter Controls */
        SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
@@ -471,8 +460,8 @@ static const struct snd_soc_dapm_widget cs42l42_dapm_widgets[] = {
        SND_SOC_DAPM_OUTPUT("HP"),
        SND_SOC_DAPM_DAC("DAC", NULL, CS42L42_PWR_CTL1, CS42L42_HP_PDN_SHIFT, 1),
        SND_SOC_DAPM_MIXER("MIXER", CS42L42_PWR_CTL1, CS42L42_MIXER_PDN_SHIFT, 1, NULL, 0),
-       SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH1_SHIFT, 0),
-       SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH2_SHIFT, 0),
+       SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, SND_SOC_NOPM, 0, 0),
 
        /* Playback Requirements */
        SND_SOC_DAPM_SUPPLY("ASP DAI0", CS42L42_PWR_CTL1, CS42L42_ASP_DAI_PDN_SHIFT, 1, NULL, 0),
@@ -630,6 +619,8 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
 
        for (i = 0; i < ARRAY_SIZE(pll_ratio_table); i++) {
                if (pll_ratio_table[i].sclk == clk) {
+                       cs42l42->pll_config = i;
+
                        /* Configure the internal sample rate */
                        snd_soc_component_update_bits(component, CS42L42_MCLK_CTL,
                                        CS42L42_INTERNAL_FS_MASK,
@@ -638,14 +629,9 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
                                        (pll_ratio_table[i].mclk_int !=
                                        24000000)) <<
                                        CS42L42_INTERNAL_FS_SHIFT);
-                       /* Set the MCLK src (PLL or SCLK) and the divide
-                        * ratio
-                        */
+
                        snd_soc_component_update_bits(component, CS42L42_MCLK_SRC_SEL,
-                                       CS42L42_MCLK_SRC_SEL_MASK |
                                        CS42L42_MCLKDIV_MASK,
-                                       (pll_ratio_table[i].mclk_src_sel
-                                       << CS42L42_MCLK_SRC_SEL_SHIFT) |
                                        (pll_ratio_table[i].mclk_div <<
                                        CS42L42_MCLKDIV_SHIFT));
                        /* Set up the LRCLK */
@@ -681,15 +667,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
                                        CS42L42_FSYNC_PULSE_WIDTH_MASK,
                                        CS42L42_FRAC1_VAL(fsync - 1) <<
                                        CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
-                       snd_soc_component_update_bits(component,
-                                       CS42L42_ASP_FRM_CFG,
-                                       CS42L42_ASP_5050_MASK,
-                                       CS42L42_ASP_5050_MASK);
-                       /* Set the frame delay to 1.0 SCLK clocks */
-                       snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG,
-                                       CS42L42_ASP_FSD_MASK,
-                                       CS42L42_ASP_FSD_1_0 <<
-                                       CS42L42_ASP_FSD_SHIFT);
                        /* Set the sample rates (96k or lower) */
                        snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN,
                                        CS42L42_FS_EN_MASK,
@@ -789,7 +766,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        /* interface format */
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
-       case SND_SOC_DAIFMT_LEFT_J:
+               /*
+                * 5050 mode, frame starts on falling edge of LRCLK,
+                * frame delayed by 1.0 SCLKs
+                */
+               snd_soc_component_update_bits(component,
+                                             CS42L42_ASP_FRM_CFG,
+                                             CS42L42_ASP_STP_MASK |
+                                             CS42L42_ASP_5050_MASK |
+                                             CS42L42_ASP_FSD_MASK,
+                                             CS42L42_ASP_5050_MASK |
+                                             (CS42L42_ASP_FSD_1_0 <<
+                                               CS42L42_ASP_FSD_SHIFT));
                break;
        default:
                return -EINVAL;
@@ -819,6 +807,25 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        return 0;
 }
 
+static int cs42l42_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
+
+       /*
+        * Sample rates < 44.1 kHz would produce an out-of-range SCLK with
+        * a standard I2S frame. If the machine driver sets SCLK it must be
+        * legal.
+        */
+       if (cs42l42->sclk)
+               return 0;
+
+       /* Machine driver has not set a SCLK, limit bottom end to 44.1 kHz */
+       return snd_pcm_hw_constraint_minmax(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_RATE,
+                                           44100, 192000);
+}
+
 static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
                                struct snd_pcm_hw_params *params,
                                struct snd_soc_dai *dai)
@@ -832,6 +839,10 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
        cs42l42->srate = params_rate(params);
        cs42l42->bclk = snd_soc_params_to_bclk(params);
 
+       /* I2S frame always has 2 channels even for mono audio */
+       if (channels == 1)
+               cs42l42->bclk *= 2;
+
        switch(substream->stream) {
        case SNDRV_PCM_STREAM_CAPTURE:
                if (channels == 2) {
@@ -855,6 +866,17 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
                snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
                                                         CS42L42_ASP_RX_CH_AP_MASK |
                                                         CS42L42_ASP_RX_CH_RES_MASK, val);
+
+               /* Channel B comes from the last active channel */
+               snd_soc_component_update_bits(component, CS42L42_SP_RX_CH_SEL,
+                                             CS42L42_SP_RX_CHB_SEL_MASK,
+                                             (channels - 1) << CS42L42_SP_RX_CHB_SEL_SHIFT);
+
+               /* Both LRCLK slots must be enabled */
+               snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_EN,
+                                             CS42L42_ASP_RX0_CH_EN_MASK,
+                                             BIT(CS42L42_ASP_RX0_CH1_SHIFT) |
+                                             BIT(CS42L42_ASP_RX0_CH2_SHIFT));
                break;
        default:
                break;
@@ -900,13 +922,21 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                         */
                        regmap_multi_reg_write(cs42l42->regmap, cs42l42_to_osc_seq,
                                               ARRAY_SIZE(cs42l42_to_osc_seq));
+
+                       /* Must disconnect PLL before stopping it */
+                       snd_soc_component_update_bits(component,
+                                                     CS42L42_MCLK_SRC_SEL,
+                                                     CS42L42_MCLK_SRC_SEL_MASK,
+                                                     0);
+                       usleep_range(100, 200);
+
                        snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
                                                      CS42L42_PLL_START_MASK, 0);
                }
        } else {
                if (!cs42l42->stream_use) {
                        /* SCLK must be running before codec unmute */
-                       if ((cs42l42->bclk < 11289600) && (cs42l42->sclk < 11289600)) {
+                       if (pll_ratio_table[cs42l42->pll_config].mclk_src_sel) {
                                snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
                                                              CS42L42_PLL_START_MASK, 1);
 
@@ -927,6 +957,12 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                                                               CS42L42_PLL_LOCK_TIMEOUT_US);
                                if (ret < 0)
                                        dev_warn(component->dev, "PLL failed to lock: %d\n", ret);
+
+                               /* PLL must be running to drive glitchless switch logic */
+                               snd_soc_component_update_bits(component,
+                                                             CS42L42_MCLK_SRC_SEL,
+                                                             CS42L42_MCLK_SRC_SEL_MASK,
+                                                             CS42L42_MCLK_SRC_SEL_MASK);
                        }
 
                        /* Mark SCLK as present, turn off internal oscillator */
@@ -960,8 +996,8 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                         SNDRV_PCM_FMTBIT_S24_LE |\
                         SNDRV_PCM_FMTBIT_S32_LE )
 
-
 static const struct snd_soc_dai_ops cs42l42_ops = {
+       .startup        = cs42l42_dai_startup,
        .hw_params      = cs42l42_pcm_hw_params,
        .set_fmt        = cs42l42_set_dai_fmt,
        .set_sysclk     = cs42l42_set_sysclk,
index 206b3c8..8734f68 100644 (file)
 
 /* Page 0x25 Audio Port Registers */
 #define CS42L42_SP_RX_CH_SEL           (CS42L42_PAGE_25 + 0x01)
+#define CS42L42_SP_RX_CHB_SEL_SHIFT    2
+#define CS42L42_SP_RX_CHB_SEL_MASK     (3 << CS42L42_SP_RX_CHB_SEL_SHIFT)
 
 #define CS42L42_SP_RX_ISOC_CTL         (CS42L42_PAGE_25 + 0x02)
 #define CS42L42_SP_RX_RSYNC_SHIFT      6
@@ -775,6 +777,7 @@ struct  cs42l42_private {
        struct gpio_desc *reset_gpio;
        struct completion pdn_done;
        struct snd_soc_jack *jack;
+       int pll_config;
        int bclk;
        u32 sclk;
        u32 srate;
index 15bd833..db88be4 100644 (file)
@@ -828,36 +828,6 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
        }
 }
 
-static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
-{
-       struct snd_soc_dapm_context *dapm = nau8824->dapm;
-       const char *prefix = dapm->component->name_prefix;
-       char prefixed_pin[80];
-
-       if (prefix) {
-               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
-                        prefix, pin);
-               snd_soc_dapm_disable_pin(dapm, prefixed_pin);
-       } else {
-               snd_soc_dapm_disable_pin(dapm, pin);
-       }
-}
-
-static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
-{
-       struct snd_soc_dapm_context *dapm = nau8824->dapm;
-       const char *prefix = dapm->component->name_prefix;
-       char prefixed_pin[80];
-
-       if (prefix) {
-               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
-                        prefix, pin);
-               snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
-       } else {
-               snd_soc_dapm_force_enable_pin(dapm, pin);
-       }
-}
-
 static void nau8824_eject_jack(struct nau8824 *nau8824)
 {
        struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -866,8 +836,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
        /* Clear all interruption status */
        nau8824_int_status_clear_all(regmap);
 
-       nau8824_dapm_disable_pin(nau8824, "SAR");
-       nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+       snd_soc_dapm_disable_pin(dapm, "SAR");
+       snd_soc_dapm_disable_pin(dapm, "MICBIAS");
        snd_soc_dapm_sync(dapm);
 
        /* Enable the insertion interruption, disable the ejection
@@ -897,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       nau8824_dapm_enable_pin(nau8824, "MICBIAS");
-       nau8824_dapm_enable_pin(nau8824, "SAR");
+       snd_soc_dapm_enable_pin(dapm, "MICBIAS");
+       snd_soc_dapm_enable_pin(dapm, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
@@ -909,8 +879,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        if (adc_value < HEADSET_SARADC_THD) {
                event |= SND_JACK_HEADPHONE;
 
-               nau8824_dapm_disable_pin(nau8824, "SAR");
-               nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+               snd_soc_dapm_disable_pin(dapm, "SAR");
+               snd_soc_dapm_disable_pin(dapm, "MICBIAS");
                snd_soc_dapm_sync(dapm);
        } else {
                event |= SND_JACK_HEADSET;
index abcd6f4..51ecaa2 100644 (file)
@@ -44,6 +44,7 @@ static const struct reg_sequence patch_list[] = {
        {RT5682_I2C_CTRL, 0x000f},
        {RT5682_PLL2_INTERNAL, 0x8266},
        {RT5682_SAR_IL_CMD_3, 0x8365},
+       {RT5682_SAR_IL_CMD_6, 0x0180},
 };
 
 void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
index b504d63..52d2c96 100644 (file)
@@ -35,6 +35,9 @@
 
 #include "tlv320aic31xx.h"
 
+static int aic31xx_set_jack(struct snd_soc_component *component,
+                            struct snd_soc_jack *jack, void *data);
+
 static const struct reg_default aic31xx_reg_defaults[] = {
        { AIC31XX_CLKMUX, 0x00 },
        { AIC31XX_PLLPR, 0x11 },
@@ -1256,6 +1259,13 @@ static int aic31xx_power_on(struct snd_soc_component *component)
                return ret;
        }
 
+       /*
+        * The jack detection configuration is in the same register
+        * that is used to report jack detect status so is volatile
+        * and not covered by the cache sync, restore it separately.
+        */
+       aic31xx_set_jack(component, aic31xx->jack, NULL);
+
        return 0;
 }
 
index dcd8aeb..2e9175b 100644 (file)
@@ -682,11 +682,20 @@ static int aic32x4_set_dosr(struct snd_soc_component *component, u16 dosr)
 static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
                                                u8 r_block, u8 p_block)
 {
-       if (r_block > 18 || p_block > 25)
-               return -EINVAL;
+       struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
+
+       if (aic32x4->type == AIC32X4_TYPE_TAS2505) {
+               if (r_block || p_block > 3)
+                       return -EINVAL;
 
-       snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
-       snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+               snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+       } else { /* AIC32x4 */
+               if (r_block > 18 || p_block > 25)
+                       return -EINVAL;
+
+               snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
+               snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+       }
 
        return 0;
 }
@@ -695,6 +704,7 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
                                unsigned int sample_rate, unsigned int channels,
                                unsigned int bit_depth)
 {
+       struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
        u8 aosr;
        u16 dosr;
        u8 adc_resource_class, dac_resource_class;
@@ -721,19 +731,28 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
                adc_resource_class = 6;
                dac_resource_class = 8;
                dosr_increment = 8;
-               aic32x4_set_processing_blocks(component, 1, 1);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 1, 1);
        } else if (sample_rate <= 96000) {
                aosr = 64;
                adc_resource_class = 6;
                dac_resource_class = 8;
                dosr_increment = 4;
-               aic32x4_set_processing_blocks(component, 1, 9);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 1, 9);
        } else if (sample_rate == 192000) {
                aosr = 32;
                adc_resource_class = 3;
                dac_resource_class = 4;
                dosr_increment = 2;
-               aic32x4_set_processing_blocks(component, 13, 19);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 13, 19);
        } else {
                dev_err(component->dev, "Sampling rate not supported\n");
                return -EINVAL;
index 549d982..fe15cbc 100644 (file)
@@ -747,7 +747,6 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
 static void wm_adsp2_cleanup_debugfs(struct wm_adsp *dsp)
 {
        wm_adsp_debugfs_clear(dsp);
-       debugfs_remove_recursive(dsp->debugfs_root);
 }
 #else
 static inline void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
index 4124aa2..905c796 100644 (file)
@@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
        snd_pcm_uframes_t period_size;
        ssize_t periodbytes;
        ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
-       u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+       u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
 
        channels = substream->runtime->channels;
        period_size = substream->runtime->period_size;
@@ -233,7 +233,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
        /* set codec params and inform SST driver the same */
        sst_fill_pcm_params(substream, &param);
        sst_fill_alloc_params(substream, &alloc_params);
-       substream->runtime->dma_area = substream->dma_buffer.area;
        str_params.sparams = param;
        str_params.aparams = alloc_params;
        str_params.codec = SST_CODEC_TYPE_PCM;
index 896251d..b7b3b0b 100644 (file)
@@ -404,7 +404,7 @@ static int audio_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* By default dais[0] is configured for max98373 */
-       if (!strcmp(pdev->name, "sof_da7219_max98360a")) {
+       if (!strcmp(pdev->name, "sof_da7219_mx98360a")) {
                dais[0] = (struct snd_soc_dai_link) {
                        .name = "SSP1-Codec",
                        .id = 0,
index c2a5933..700a185 100644 (file)
@@ -104,8 +104,6 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
        int err;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct kirkwood_dma_data *priv = kirkwood_priv(substream);
-       const struct mbus_dram_target_info *dram;
-       unsigned long addr;
 
        snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw);
 
@@ -142,20 +140,14 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
                writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK);
        }
 
-       dram = mv_mbus_dram_info();
-       addr = substream->dma_buffer.addr;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                if (priv->substream_play)
                        return -EBUSY;
                priv->substream_play = substream;
-               kirkwood_dma_conf_mbus_windows(priv->io,
-                       KIRKWOOD_PLAYBACK_WIN, addr, dram);
        } else {
                if (priv->substream_rec)
                        return -EBUSY;
                priv->substream_rec = substream;
-               kirkwood_dma_conf_mbus_windows(priv->io,
-                       KIRKWOOD_RECORD_WIN, addr, dram);
        }
 
        return 0;
@@ -182,6 +174,23 @@ static int kirkwood_dma_close(struct snd_soc_component *component,
        return 0;
 }
 
+static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+                                 struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params)
+{
+       struct kirkwood_dma_data *priv = kirkwood_priv(substream);
+       const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
+       unsigned long addr = substream->runtime->dma_addr;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               kirkwood_dma_conf_mbus_windows(priv->io,
+                       KIRKWOOD_PLAYBACK_WIN, addr, dram);
+       else
+               kirkwood_dma_conf_mbus_windows(priv->io,
+                       KIRKWOOD_RECORD_WIN, addr, dram);
+       return 0;
+}
+
 static int kirkwood_dma_prepare(struct snd_soc_component *component,
                                struct snd_pcm_substream *substream)
 {
@@ -246,6 +255,7 @@ const struct snd_soc_component_driver kirkwood_soc_component = {
        .name           = DRV_NAME,
        .open           = kirkwood_dma_open,
        .close          = kirkwood_dma_close,
+       .hw_params      = kirkwood_dma_hw_params,
        .prepare        = kirkwood_dma_prepare,
        .pointer        = kirkwood_dma_pointer,
        .pcm_construct  = kirkwood_dma_new,
index 3a5e84e..c8dfd0d 100644 (file)
@@ -148,86 +148,75 @@ int snd_soc_component_set_bias_level(struct snd_soc_component *component,
        return soc_component_ret(component, ret);
 }
 
-static int soc_component_pin(struct snd_soc_component *component,
-                            const char *pin,
-                            int (*pin_func)(struct snd_soc_dapm_context *dapm,
-                                            const char *pin))
-{
-       struct snd_soc_dapm_context *dapm =
-               snd_soc_component_get_dapm(component);
-       char *full_name;
-       int ret;
-
-       if (!component->name_prefix) {
-               ret = pin_func(dapm, pin);
-               goto end;
-       }
-
-       full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
-       if (!full_name) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       ret = pin_func(dapm, full_name);
-       kfree(full_name);
-end:
-       return soc_component_ret(component, ret);
-}
-
 int snd_soc_component_enable_pin(struct snd_soc_component *component,
                                 const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_enable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_enable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin);
 
 int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
                                          const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_enable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_enable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked);
 
 int snd_soc_component_disable_pin(struct snd_soc_component *component,
                                  const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_disable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_disable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin);
 
 int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
                                           const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_disable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm = 
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_disable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked);
 
 int snd_soc_component_nc_pin(struct snd_soc_component *component,
                             const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_nc_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_nc_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin);
 
 int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
                                      const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_nc_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_nc_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked);
 
 int snd_soc_component_get_pin_status(struct snd_soc_component *component,
                                     const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_get_pin_status);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_get_pin_status(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status);
 
 int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
                                       const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_force_enable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin);
 
@@ -235,7 +224,9 @@ int snd_soc_component_force_enable_pin_unlocked(
        struct snd_soc_component *component,
        const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked);
 
index 4bce89b..4447f51 100644 (file)
@@ -278,6 +278,8 @@ config SND_SOC_SOF_HDA
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
        tristate
+       select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE
+       select SND_INTEL_SOUNDWIRE_ACPI if SND_SOC_SOF_INTEL_SOUNDWIRE
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE
        tristate "SOF support for SoundWire"
@@ -285,8 +287,6 @@ config SND_SOC_SOF_INTEL_SOUNDWIRE
        depends on SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
        depends on ACPI && SOUNDWIRE
        depends on !(SOUNDWIRE=m && SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=y)
-       select SOUNDWIRE_INTEL
-       select SND_INTEL_SOUNDWIRE_ACPI
        help
          This adds support for SoundWire with Sound Open Firmware
          for Intel(R) platforms.
index c91aa95..acfeca4 100644 (file)
@@ -107,8 +107,8 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
        } else {
                /* reply correct size ? */
                if (reply.hdr.size != msg->reply_size &&
-                       /* getter payload is never known upfront */
-                       !(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) {
+                   /* getter payload is never known upfront */
+                   ((reply.hdr.cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_PROBE)) {
                        dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
                                msg->reply_size, reply.hdr.size);
                        ret = -EINVAL;
index e1e368f..891e6e1 100644 (file)
@@ -187,12 +187,16 @@ static int hda_sdw_probe(struct snd_sof_dev *sdev)
 int hda_sdw_startup(struct snd_sof_dev *sdev)
 {
        struct sof_intel_hda_dev *hdev;
+       struct snd_sof_pdata *pdata = sdev->pdata;
 
        hdev = sdev->pdata->hw_pdata;
 
        if (!hdev->sdw)
                return 0;
 
+       if (pdata->machine && !pdata->machine->mach_params.link_mask)
+               return 0;
+
        return sdw_intel_startup(hdev->sdw);
 }
 
@@ -1002,6 +1006,14 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
                        hda_mach->mach_params.dmic_num = dmic_num;
                        pdata->machine = hda_mach;
                        pdata->tplg_filename = tplg_filename;
+
+                       if (codec_num == 2) {
+                               /*
+                                * Prevent SoundWire links from starting when an external
+                                * HDaudio codec is used
+                                */
+                               hda_mach->mach_params.link_mask = 0;
+                       }
                }
        }
 
index 3c1628a..3d9736e 100644 (file)
@@ -198,7 +198,7 @@ static int uniphier_aiodma_mmap(struct snd_soc_component *component,
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
        return remap_pfn_range(vma, vma->vm_start,
-                              substream->dma_buffer.addr >> PAGE_SHIFT,
+                              substream->runtime->dma_addr >> PAGE_SHIFT,
                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
 }
 
index 1d59fb6..91afea9 100644 (file)
@@ -452,8 +452,8 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
 
        stream_data->buffer_size = size;
 
-       low = lower_32_bits(substream->dma_buffer.addr);
-       high = upper_32_bits(substream->dma_buffer.addr);
+       low = lower_32_bits(runtime->dma_addr);
+       high = upper_32_bits(runtime->dma_addr);
        writel(low, stream_data->mmio + XLNX_AUD_BUFF_ADDR_LSB);
        writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
 
index 2f6a624..a1f8c3a 100644 (file)
@@ -907,7 +907,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
                }
        }
 
-       if (chip->quirk_type & QUIRK_SETUP_DISABLE_AUTOSUSPEND)
+       if (chip->quirk_type == QUIRK_SETUP_DISABLE_AUTOSUSPEND)
                usb_enable_autosuspend(interface_to_usbdev(intf));
 
        chip->num_interfaces--;
index 52de522..14456f6 100644 (file)
@@ -324,6 +324,12 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
                                              sources[ret - 1],
                                              visited, validate);
                if (ret > 0) {
+                       /*
+                        * For Samsung USBC Headset (AKG), setting clock selector again
+                        * will result in incorrect default clock setting problems
+                        */
+                       if (chip->usb_id == USB_ID(0x04e8, 0xa051))
+                               return ret;
                        err = uac_clock_selector_set_val(chip, entity_id, cur);
                        if (err < 0)
                                return err;
index f4cdaf1..9b713b4 100644 (file)
@@ -1816,6 +1816,15 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer,
                strlcat(name, " - Output Jack", name_size);
 }
 
+/* get connector value to "wake up" the USB audio */
+static int connector_mixer_resume(struct usb_mixer_elem_list *list)
+{
+       struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
+
+       get_connector_value(cval, NULL, NULL);
+       return 0;
+}
+
 /* Build a mixer control for a UAC connector control (jack-detect) */
 static void build_connector_control(struct usb_mixer_interface *mixer,
                                    const struct usbmix_name_map *imap,
@@ -1833,6 +1842,10 @@ static void build_connector_control(struct usb_mixer_interface *mixer,
        if (!cval)
                return;
        snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id);
+
+       /* set up a specific resume callback */
+       cval->head.resume = connector_mixer_resume;
+
        /*
         * UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the
         * number of channels connected.
@@ -3642,23 +3655,15 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
        return 0;
 }
 
-static int default_mixer_resume(struct usb_mixer_elem_list *list)
-{
-       struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
-
-       /* get connector value to "wake up" the USB audio */
-       if (cval->val_type == USB_MIXER_BOOLEAN && cval->channels == 1)
-               get_connector_value(cval, NULL, NULL);
-
-       return 0;
-}
-
 static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
 {
-       int err = default_mixer_resume(list);
+       int err;
 
-       if (err < 0)
-               return err;
+       if (list->resume) {
+               err = list->resume(list);
+               if (err < 0)
+                       return err;
+       }
        return restore_mixer_value(list);
 }
 
@@ -3697,7 +3702,7 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
        list->id = unitid;
        list->dump = snd_usb_mixer_dump_cval;
 #ifdef CONFIG_PM
-       list->resume = default_mixer_resume;
+       list->resume = NULL;
        list->reset_resume = default_mixer_reset_resume;
 #endif
 }
index f9d698a..3d5848d 100644 (file)
@@ -228,7 +228,7 @@ enum {
 };
 
 static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
-       "Mute", "Dim"
+       "Mute Playback Switch", "Dim Playback Switch"
 };
 
 /* Description of each hardware port type:
@@ -1856,9 +1856,15 @@ static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
                                        struct snd_ctl_elem_value *ucontrol)
 {
        struct usb_mixer_elem_info *elem = kctl->private_data;
-       struct scarlett2_data *private = elem->head.mixer->private_data;
+       struct usb_mixer_interface *mixer = elem->head.mixer;
+       struct scarlett2_data *private = mixer->private_data;
        int index = line_out_remap(private, elem->control);
 
+       mutex_lock(&private->data_mutex);
+       if (private->vol_updated)
+               scarlett2_update_volumes(mixer);
+       mutex_unlock(&private->data_mutex);
+
        ucontrol->value.integer.value[0] = private->mute_switch[index];
        return 0;
 }
@@ -1955,10 +1961,12 @@ static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
                        ~SNDRV_CTL_ELEM_ACCESS_WRITE;
        }
 
-       /* Notify of write bit change */
-       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+       /* Notify of write bit and possible value change */
+       snd_ctl_notify(card,
+                      SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                       &private->vol_ctls[index]->id);
-       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+       snd_ctl_notify(card,
+                      SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                       &private->mute_ctls[index]->id);
 }
 
@@ -2530,14 +2538,18 @@ static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
 {
        struct scarlett2_data *private = mixer->private_data;
        const struct scarlett2_device_info *info = private->info;
+       const char *s;
 
        if (!info->direct_monitor)
                return 0;
 
+       s = info->direct_monitor == 1
+             ? "Direct Monitor Playback Switch"
+             : "Direct Monitor Playback Enum";
+
        return scarlett2_add_new_ctl(
                mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
-               0, 1, "Direct Monitor Playback Switch",
-               &private->direct_monitor_ctl);
+               0, 1, s, &private->direct_monitor_ctl);
 }
 
 /*** Speaker Switching Control ***/
@@ -2589,7 +2601,9 @@ static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
 
                /* disable the line out SW/HW switch */
                scarlett2_sw_hw_ctl_ro(private, i);
-               snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+               snd_ctl_notify(card,
+                              SNDRV_CTL_EVENT_MASK_VALUE |
+                                SNDRV_CTL_EVENT_MASK_INFO,
                               &private->sw_hw_ctls[i]->id);
        }
 
@@ -2913,7 +2927,7 @@ static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
                        if (private->vol_sw_hw_switch[line_index]) {
                                private->mute_switch[line_index] = val;
                                snd_ctl_notify(mixer->chip->card,
-                                              SNDRV_CTL_EVENT_MASK_INFO,
+                                              SNDRV_CTL_EVENT_MASK_VALUE,
                                               &private->mute_ctls[i]->id);
                        }
                }
@@ -3455,7 +3469,7 @@ static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
 
        /* Add MSD control */
        return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
-                                    0, 1, "MSD Mode", NULL);
+                                    0, 1, "MSD Mode Switch", NULL);
 }
 
 /*** Cleanup/Suspend Callbacks ***/
index e7accd8..326d1b0 100644 (file)
@@ -1899,6 +1899,7 @@ static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
        REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2),     /* JBL Quantum 600 */
        REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2),     /* JBL Quantum 400 */
+       REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2),     /* JBL Quantum 600 */
        REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2),     /* JBL Quantum 800 */
        { 0 }                                   /* terminator */
 };
index 8b7a983..3430667 100644 (file)
@@ -1031,7 +1031,7 @@ struct sys_stat_struct {
  *     scall32-o32.S in the kernel sources.
  *   - the system call is performed by calling "syscall"
  *   - syscall return comes in v0, and register a3 needs to be checked to know
- *     if an error occured, in which case errno is in v0.
+ *     if an error occurred, in which case errno is in v0.
  *   - the arguments are cast to long and assigned into the target registers
  *     which are then simply passed as registers to the asm code, so that we
  *     don't have to experience issues with register constraints.
@@ -2243,6 +2243,19 @@ unsigned int sleep(unsigned int seconds)
                return 0;
 }
 
+static __attribute__((unused))
+int msleep(unsigned int msecs)
+{
+       struct timeval my_timeval = { msecs / 1000, (msecs % 1000) * 1000 };
+
+       if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+               return (my_timeval.tv_sec * 1000) +
+                       (my_timeval.tv_usec / 1000) +
+                       !!(my_timeval.tv_usec % 1000);
+       else
+               return 0;
+}
+
 static __attribute__((unused))
 int stat(const char *path, struct stat *buf)
 {
index 8146181..d9bd6f5 100644 (file)
@@ -131,8 +131,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
        writes = reads = offset = 0;
 
        while (insize || write_left) {
-               unsigned long had_reads;
-               int got_comp;
+               int had_reads, got_comp;
 
                /*
                 * Queue up as many reads as we can
@@ -174,8 +173,13 @@ static int copy_file(struct io_uring *ring, off_t insize)
                        if (!got_comp) {
                                ret = io_uring_wait_cqe(ring, &cqe);
                                got_comp = 1;
-                       } else
+                       } else {
                                ret = io_uring_peek_cqe(ring, &cqe);
+                               if (ret == -EAGAIN) {
+                                       cqe = NULL;
+                                       ret = 0;
+                               }
+                       }
                        if (ret < 0) {
                                fprintf(stderr, "io_uring_peek_cqe: %s\n",
                                                        strerror(-ret));
@@ -194,7 +198,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
                                fprintf(stderr, "cqe failed: %s\n",
                                                strerror(-cqe->res));
                                return 1;
-                       } else if ((size_t) cqe->res != data->iov.iov_len) {
+                       } else if (cqe->res != data->iov.iov_len) {
                                /* Short read/write, adjust and requeue */
                                data->iov.iov_base += cqe->res;
                                data->iov.iov_len -= cqe->res;
@@ -221,6 +225,25 @@ static int copy_file(struct io_uring *ring, off_t insize)
                }
        }
 
+       /* wait out pending writes */
+       while (writes) {
+               struct io_data *data;
+
+               ret = io_uring_wait_cqe(ring, &cqe);
+               if (ret) {
+                       fprintf(stderr, "wait_cqe=%d\n", ret);
+                       return 1;
+               }
+               if (cqe->res < 0) {
+                       fprintf(stderr, "write res=%d\n", cqe->res);
+                       return 1;
+               }
+               data = io_uring_cqe_get_data(cqe);
+               free(data);
+               writes--;
+               io_uring_cqe_seen(ring, cqe);
+       }
+
        return 0;
 }
 
index b46760b..7ff3d5c 100644 (file)
@@ -804,6 +804,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
        btf->nr_types = 0;
        btf->start_id = 1;
        btf->start_str_off = 0;
+       btf->fd = -1;
 
        if (base_btf) {
                btf->base_btf = base_btf;
@@ -832,8 +833,6 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
        if (err)
                goto done;
 
-       btf->fd = -1;
-
 done:
        if (err) {
                btf__free(btf);
index ecaae29..cd8c703 100644 (file)
@@ -75,6 +75,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
        case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
                xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
                break;
+       case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+               xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
+               break;
        case BPF_PROG_TYPE_SK_LOOKUP:
                xattr.expected_attach_type = BPF_SK_LOOKUP;
                break;
@@ -104,7 +107,6 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
        case BPF_PROG_TYPE_SK_REUSEPORT:
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
        case BPF_PROG_TYPE_CGROUP_SYSCTL:
-       case BPF_PROG_TYPE_CGROUP_SOCKOPT:
        case BPF_PROG_TYPE_TRACING:
        case BPF_PROG_TYPE_STRUCT_OPS:
        case BPF_PROG_TYPE_EXT:
index 54f367c..b1bff5f 100644 (file)
@@ -434,7 +434,7 @@ static int nd_intel_test_finish_query(struct nfit_test *t,
                dev_dbg(dev, "%s: transition out verify\n", __func__);
                fw->state = FW_STATE_UPDATED;
                fw->missed_activate = false;
-               /* fall through */
+               fallthrough;
        case FW_STATE_UPDATED:
                nd_cmd->status = 0;
                /* bogus test version */
index fb010a3..dd0388e 100644 (file)
@@ -35,6 +35,7 @@ TARGETS += memory-hotplug
 TARGETS += mincore
 TARGETS += mount
 TARGETS += mount_setattr
+TARGETS += move_mount_set_group
 TARGETS += mqueue
 TARGETS += nci
 TARGETS += net
index 2c8935b..ee45432 100644 (file)
        .result = ACCEPT,
        .retval = 2,
 },
+{
+       "dead code: zero extension",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
+       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
index 412eaee..b669107 100644 (file)
 #define HV_X64_GUEST_DEBUGGING_AVAILABLE               BIT(1)
 #define HV_X64_PERF_MONITOR_AVAILABLE                  BIT(2)
 #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE      BIT(3)
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE          BIT(4)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE           BIT(4)
 #define HV_X64_GUEST_IDLE_STATE_AVAILABLE              BIT(5)
 #define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE            BIT(8)
 #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE           BIT(10)
 #define HV_STATUS_INVALID_CONNECTION_ID                18
 #define HV_STATUS_INSUFFICIENT_BUFFERS         19
 
+/* hypercall options */
+#define HV_HYPERCALL_FAST_BIT          BIT(16)
+
 #endif /* !SELFTEST_KVM_HYPERV_H */
index bab10ae..e0b2bb1 100644 (file)
@@ -215,7 +215,7 @@ int main(void)
        vcpu_set_hv_cpuid(vm, VCPU_ID);
 
        tsc_page_gva = vm_vaddr_alloc_page(vm);
-       memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
+       memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
        TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
                "TSC page has to be page aligned\n");
        vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
index af27c7e..91d88aa 100644 (file)
@@ -47,6 +47,7 @@ static void do_wrmsr(u32 idx, u64 val)
 }
 
 static int nr_gp;
+static int nr_ud;
 
 static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
                            vm_vaddr_t output_address)
@@ -80,6 +81,12 @@ static void guest_gp_handler(struct ex_regs *regs)
                regs->rip = (uint64_t)&wrmsr_end;
 }
 
+static void guest_ud_handler(struct ex_regs *regs)
+{
+       nr_ud++;
+       regs->rip += 3;
+}
+
 struct msr_data {
        uint32_t idx;
        bool available;
@@ -90,6 +97,7 @@ struct msr_data {
 struct hcall_data {
        uint64_t control;
        uint64_t expect;
+       bool ud_expected;
 };
 
 static void guest_msr(struct msr_data *msr)
@@ -117,13 +125,26 @@ static void guest_msr(struct msr_data *msr)
 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
 {
        int i = 0;
+       u64 res, input, output;
 
        wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
        wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
 
        while (hcall->control) {
-               GUEST_ASSERT(hypercall(hcall->control, pgs_gpa,
-                                      pgs_gpa + 4096) == hcall->expect);
+               nr_ud = 0;
+               if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
+                       input = pgs_gpa;
+                       output = pgs_gpa + 4096;
+               } else {
+                       input = output = 0;
+               }
+
+               res = hypercall(hcall->control, input, output);
+               if (hcall->ud_expected)
+                       GUEST_ASSERT(nr_ud == 1);
+               else
+                       GUEST_ASSERT(res == hcall->expect);
+
                GUEST_SYNC(i++);
        }
 
@@ -552,8 +573,18 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
                        recomm.ebx = 0xfff;
                        hcall->expect = HV_STATUS_SUCCESS;
                        break;
-
                case 17:
+                       /* XMM fast hypercall */
+                       hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
+                       hcall->ud_expected = true;
+                       break;
+               case 18:
+                       feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
+                       hcall->ud_expected = false;
+                       hcall->expect = HV_STATUS_SUCCESS;
+                       break;
+
+               case 19:
                        /* END */
                        hcall->control = 0;
                        break;
@@ -625,6 +656,10 @@ int main(void)
        /* Test hypercalls */
        vm = vm_create_default(VCPU_ID, 0, guest_hcall);
 
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+       vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
        /* Hypercall input/output */
        hcall_page = vm_vaddr_alloc_pages(vm, 2);
        memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
diff --git a/tools/testing/selftests/move_mount_set_group/.gitignore b/tools/testing/selftests/move_mount_set_group/.gitignore
new file mode 100644 (file)
index 0000000..f5e3392
--- /dev/null
@@ -0,0 +1 @@
+move_mount_set_group_test
diff --git a/tools/testing/selftests/move_mount_set_group/Makefile b/tools/testing/selftests/move_mount_set_group/Makefile
new file mode 100644 (file)
index 0000000..80c2d86
--- /dev/null
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for mount selftests.
+CFLAGS = -g -I../../../../usr/include/ -Wall -O2
+
+TEST_GEN_FILES += move_mount_set_group_test
+
+include ../lib.mk
diff --git a/tools/testing/selftests/move_mount_set_group/config b/tools/testing/selftests/move_mount_set_group/config
new file mode 100644 (file)
index 0000000..416bd53
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_USER_NS=y
diff --git a/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c b/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
new file mode 100644 (file)
index 0000000..860198f
--- /dev/null
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <sys/syscall.h>
+
+#include "../kselftest_harness.h"
+
+#ifndef CLONE_NEWNS
+#define CLONE_NEWNS 0x00020000
+#endif
+
+#ifndef CLONE_NEWUSER
+#define CLONE_NEWUSER 0x10000000
+#endif
+
+#ifndef MS_SHARED
+#define MS_SHARED (1 << 20)
+#endif
+
+#ifndef MS_PRIVATE
+#define MS_PRIVATE (1<<18)
+#endif
+
+#ifndef MOVE_MOUNT_SET_GROUP
+#define MOVE_MOUNT_SET_GROUP 0x00000100
+#endif
+
+#ifndef MOVE_MOUNT_F_EMPTY_PATH
+#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004
+#endif
+
+#ifndef MOVE_MOUNT_T_EMPTY_PATH
+#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040
+#endif
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+       ssize_t ret;
+
+       do {
+               ret = write(fd, buf, count);
+       } while (ret < 0 && errno == EINTR);
+
+       return ret;
+}
+
+static int write_file(const char *path, const void *buf, size_t count)
+{
+       int fd;
+       ssize_t ret;
+
+       fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW);
+       if (fd < 0)
+               return -1;
+
+       ret = write_nointr(fd, buf, count);
+       close(fd);
+       if (ret < 0 || (size_t)ret != count)
+               return -1;
+
+       return 0;
+}
+
+static int create_and_enter_userns(void)
+{
+       uid_t uid;
+       gid_t gid;
+       char map[100];
+
+       uid = getuid();
+       gid = getgid();
+
+       if (unshare(CLONE_NEWUSER))
+               return -1;
+
+       if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) &&
+           errno != ENOENT)
+               return -1;
+
+       snprintf(map, sizeof(map), "0 %d 1", uid);
+       if (write_file("/proc/self/uid_map", map, strlen(map)))
+               return -1;
+
+
+       snprintf(map, sizeof(map), "0 %d 1", gid);
+       if (write_file("/proc/self/gid_map", map, strlen(map)))
+               return -1;
+
+       if (setgid(0))
+               return -1;
+
+       if (setuid(0))
+               return -1;
+
+       return 0;
+}
+
+static int prepare_unpriv_mountns(void)
+{
+       if (create_and_enter_userns())
+               return -1;
+
+       if (unshare(CLONE_NEWNS))
+               return -1;
+
+       if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0))
+               return -1;
+
+       return 0;
+}
+
+static char *get_field(char *src, int nfields)
+{
+       int i;
+       char *p = src;
+
+       for (i = 0; i < nfields; i++) {
+               while (*p && *p != ' ' && *p != '\t')
+                       p++;
+
+               if (!*p)
+                       break;
+
+               p++;
+       }
+
+       return p;
+}
+
+static void null_endofword(char *word)
+{
+       while (*word && *word != ' ' && *word != '\t')
+               word++;
+       *word = '\0';
+}
+
+static bool is_shared_mount(const char *path)
+{
+       size_t len = 0;
+       char *line = NULL;
+       FILE *f = NULL;
+
+       f = fopen("/proc/self/mountinfo", "re");
+       if (!f)
+               return false;
+
+       while (getline(&line, &len, f) != -1) {
+               char *opts, *target;
+
+               target = get_field(line, 4);
+               if (!target)
+                       continue;
+
+               opts = get_field(target, 2);
+               if (!opts)
+                       continue;
+
+               null_endofword(target);
+
+               if (strcmp(target, path) != 0)
+                       continue;
+
+               null_endofword(opts);
+               if (strstr(opts, "shared:"))
+                       return true;
+       }
+
+       free(line);
+       fclose(f);
+
+       return false;
+}
+
+/* Attempt to de-conflict with the selftests tree. */
+#ifndef SKIP
+#define SKIP(s, ...)   XFAIL(s, ##__VA_ARGS__)
+#endif
+
+#define SET_GROUP_FROM "/tmp/move_mount_set_group_supported_from"
+#define SET_GROUP_TO   "/tmp/move_mount_set_group_supported_to"
+
+static int move_mount_set_group_supported(void)
+{
+       int ret;
+
+       if (mount("testing", "/tmp", "tmpfs", MS_NOATIME | MS_NODEV,
+                 "size=100000,mode=700"))
+               return -1;
+
+       if (mount(NULL, "/tmp", NULL, MS_PRIVATE, 0))
+               return -1;
+
+       if (mkdir(SET_GROUP_FROM, 0777))
+               return -1;
+
+       if (mkdir(SET_GROUP_TO, 0777))
+               return -1;
+
+       if (mount("testing", SET_GROUP_FROM, "tmpfs", MS_NOATIME | MS_NODEV,
+                 "size=100000,mode=700"))
+               return -1;
+
+       if (mount(SET_GROUP_FROM, SET_GROUP_TO, NULL, MS_BIND, NULL))
+               return -1;
+
+       if (mount(NULL, SET_GROUP_FROM, NULL, MS_SHARED, 0))
+               return -1;
+
+       ret = syscall(SYS_move_mount, AT_FDCWD, SET_GROUP_FROM,
+                     AT_FDCWD, SET_GROUP_TO, MOVE_MOUNT_SET_GROUP);
+       umount2("/tmp", MNT_DETACH);
+
+       return ret < 0 ? false : true;
+}
+
+FIXTURE(move_mount_set_group) {
+};
+
+#define SET_GROUP_A "/tmp/A"
+
+FIXTURE_SETUP(move_mount_set_group)
+{
+       int ret;
+
+       ASSERT_EQ(prepare_unpriv_mountns(), 0);
+
+       ret = move_mount_set_group_supported();
+       ASSERT_GE(ret, 0);
+       if (!ret)
+               SKIP(return, "move_mount(MOVE_MOUNT_SET_GROUP) is not supported");
+
+       umount2("/tmp", MNT_DETACH);
+
+       ASSERT_EQ(mount("testing", "/tmp", "tmpfs", MS_NOATIME | MS_NODEV,
+                       "size=100000,mode=700"), 0);
+
+       ASSERT_EQ(mkdir(SET_GROUP_A, 0777), 0);
+
+       ASSERT_EQ(mount("testing", SET_GROUP_A, "tmpfs", MS_NOATIME | MS_NODEV,
+                       "size=100000,mode=700"), 0);
+}
+
+FIXTURE_TEARDOWN(move_mount_set_group)
+{
+       int ret;
+
+       ret = move_mount_set_group_supported();
+       ASSERT_GE(ret, 0);
+       if (!ret)
+               SKIP(return, "move_mount(MOVE_MOUNT_SET_GROUP) is not supported");
+
+       umount2("/tmp", MNT_DETACH);
+}
+
+#define __STACK_SIZE (8 * 1024 * 1024)
+static pid_t do_clone(int (*fn)(void *), void *arg, int flags)
+{
+       void *stack;
+
+       stack = malloc(__STACK_SIZE);
+       if (!stack)
+               return -ENOMEM;
+
+#ifdef __ia64__
+       return __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#else
+       return clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#endif
+}
+
+static int wait_for_pid(pid_t pid)
+{
+       int status, ret;
+
+again:
+       ret = waitpid(pid, &status, 0);
+       if (ret == -1) {
+               if (errno == EINTR)
+                       goto again;
+
+               return -1;
+       }
+
+       if (!WIFEXITED(status))
+               return -1;
+
+       return WEXITSTATUS(status);
+}
+
+struct child_args {
+       int unsfd;
+       int mntnsfd;
+       bool shared;
+       int mntfd;
+};
+
+static int get_nestedns_mount_cb(void *data)
+{
+       struct child_args *ca = (struct child_args *)data;
+       int ret;
+
+       ret = prepare_unpriv_mountns();
+       if (ret)
+               return 1;
+
+       if (ca->shared) {
+               ret = mount(NULL, SET_GROUP_A, NULL, MS_SHARED, 0);
+               if (ret)
+                       return 1;
+       }
+
+       ret = open("/proc/self/ns/user", O_RDONLY);
+       if (ret < 0)
+               return 1;
+       ca->unsfd = ret;
+
+       ret = open("/proc/self/ns/mnt", O_RDONLY);
+       if (ret < 0)
+               return 1;
+       ca->mntnsfd = ret;
+
+       ret = open(SET_GROUP_A, O_RDONLY);
+       if (ret < 0)
+               return 1;
+       ca->mntfd = ret;
+
+       return 0;
+}
+
+TEST_F(move_mount_set_group, complex_sharing_copying)
+{
+       struct child_args ca_from = {
+               .shared = true,
+       };
+       struct child_args ca_to = {
+               .shared = false,
+       };
+       pid_t pid;
+       int ret;
+
+       ret = move_mount_set_group_supported();
+       ASSERT_GE(ret, 0);
+       if (!ret)
+               SKIP(return, "move_mount(MOVE_MOUNT_SET_GROUP) is not supported");
+
+       pid = do_clone(get_nestedns_mount_cb, (void *)&ca_from, CLONE_VFORK |
+                      CLONE_VM | CLONE_FILES); ASSERT_GT(pid, 0);
+       ASSERT_EQ(wait_for_pid(pid), 0);
+
+       pid = do_clone(get_nestedns_mount_cb, (void *)&ca_to, CLONE_VFORK |
+                      CLONE_VM | CLONE_FILES); ASSERT_GT(pid, 0);
+       ASSERT_EQ(wait_for_pid(pid), 0);
+
+       ASSERT_EQ(syscall(SYS_move_mount, ca_from.mntfd, "",
+                         ca_to.mntfd, "", MOVE_MOUNT_SET_GROUP
+                         | MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH),
+                 0);
+
+       ASSERT_EQ(setns(ca_to.mntnsfd, CLONE_NEWNS), 0);
+       ASSERT_EQ(is_shared_mount(SET_GROUP_A), 1);
+}
+
+TEST_HARNESS_MAIN
index f23438d..3d7dde2 100644 (file)
@@ -484,13 +484,16 @@ enum desc_type {
        MONITOR_ACQUIRE,
        EXPIRE_STATE,
        EXPIRE_POLICY,
+       SPDINFO_ATTRS,
 };
 const char *desc_name[] = {
        "create tunnel",
        "alloc spi",
        "monitor acquire",
        "expire state",
-       "expire policy"
+       "expire policy",
+       "spdinfo attributes",
+       ""
 };
 struct xfrm_desc {
        enum desc_type  type;
@@ -1593,6 +1596,155 @@ out_close:
        return ret;
 }
 
+static int xfrm_spdinfo_set_thresh(int xfrm_sock, uint32_t *seq,
+               unsigned thresh4_l, unsigned thresh4_r,
+               unsigned thresh6_l, unsigned thresh6_r,
+               bool add_bad_attr)
+
+{
+       struct {
+               struct nlmsghdr         nh;
+               union {
+                       uint32_t        unused;
+                       int             error;
+               };
+               char                    attrbuf[MAX_PAYLOAD];
+       } req;
+       struct xfrmu_spdhthresh thresh;
+
+       memset(&req, 0, sizeof(req));
+       req.nh.nlmsg_len        = NLMSG_LENGTH(sizeof(req.unused));
+       req.nh.nlmsg_type       = XFRM_MSG_NEWSPDINFO;
+       req.nh.nlmsg_flags      = NLM_F_REQUEST | NLM_F_ACK;
+       req.nh.nlmsg_seq        = (*seq)++;
+
+       thresh.lbits = thresh4_l;
+       thresh.rbits = thresh4_r;
+       if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV4_HTHRESH, &thresh, sizeof(thresh)))
+               return -1;
+
+       thresh.lbits = thresh6_l;
+       thresh.rbits = thresh6_r;
+       if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV6_HTHRESH, &thresh, sizeof(thresh)))
+               return -1;
+
+       if (add_bad_attr) {
+               BUILD_BUG_ON(XFRMA_IF_ID <= XFRMA_SPD_MAX + 1);
+               if (rtattr_pack(&req.nh, sizeof(req), XFRMA_IF_ID, NULL, 0)) {
+                       pr_err("adding attribute failed: no space");
+                       return -1;
+               }
+       }
+
+       if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+               pr_err("send()");
+               return -1;
+       }
+
+       if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+               pr_err("recv()");
+               return -1;
+       } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+               printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+               return -1;
+       }
+
+       if (req.error) {
+               printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+               return -1;
+       }
+
+       return 0;
+}
+
+static int xfrm_spdinfo_attrs(int xfrm_sock, uint32_t *seq)
+{
+       struct {
+               struct nlmsghdr                 nh;
+               union {
+                       uint32_t        unused;
+                       int             error;
+               };
+               char                    attrbuf[MAX_PAYLOAD];
+       } req;
+
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 31, 120, 16, false)) {
+               pr_err("Can't set SPD HTHRESH");
+               return KSFT_FAIL;
+       }
+
+       memset(&req, 0, sizeof(req));
+
+       req.nh.nlmsg_len        = NLMSG_LENGTH(sizeof(req.unused));
+       req.nh.nlmsg_type       = XFRM_MSG_GETSPDINFO;
+       req.nh.nlmsg_flags      = NLM_F_REQUEST;
+       req.nh.nlmsg_seq        = (*seq)++;
+       if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+               pr_err("send()");
+               return KSFT_FAIL;
+       }
+
+       if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+               pr_err("recv()");
+               return KSFT_FAIL;
+       } else if (req.nh.nlmsg_type == XFRM_MSG_NEWSPDINFO) {
+               size_t len = NLMSG_PAYLOAD(&req.nh, sizeof(req.unused));
+               struct rtattr *attr = (void *)req.attrbuf;
+               int got_thresh = 0;
+
+               for (; RTA_OK(attr, len); attr = RTA_NEXT(attr, len)) {
+                       if (attr->rta_type == XFRMA_SPD_IPV4_HTHRESH) {
+                               struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+                               got_thresh++;
+                               if (t->lbits != 32 || t->rbits != 31) {
+                                       pr_err("thresh differ: %u, %u",
+                                                       t->lbits, t->rbits);
+                                       return KSFT_FAIL;
+                               }
+                       }
+                       if (attr->rta_type == XFRMA_SPD_IPV6_HTHRESH) {
+                               struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+                               got_thresh++;
+                               if (t->lbits != 120 || t->rbits != 16) {
+                                       pr_err("thresh differ: %u, %u",
+                                                       t->lbits, t->rbits);
+                                       return KSFT_FAIL;
+                               }
+                       }
+               }
+               if (got_thresh != 2) {
+                       pr_err("only %d thresh returned by XFRM_MSG_GETSPDINFO", got_thresh);
+                       return KSFT_FAIL;
+               }
+       } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+               printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+               return KSFT_FAIL;
+       } else {
+               printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+               return -1;
+       }
+
+       /* Restore the default */
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, false)) {
+               pr_err("Can't restore SPD HTHRESH");
+               return KSFT_FAIL;
+       }
+
+       /*
+        * At this moment xfrm uses nlmsg_parse_deprecated(), which
+        * implies NL_VALIDATE_LIBERAL - ignoring attributes with
+        * (type > maxtype). nla_parse_depricated_strict() would enforce
+        * it. Or even stricter nla_parse().
+        * Right now it's not expected to fail, but to be ignored.
+        */
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, true))
+               return KSFT_PASS;
+
+       return KSFT_PASS;
+}
+
 static int child_serv(int xfrm_sock, uint32_t *seq,
                unsigned int nr, int cmd_fd, void *buf, struct xfrm_desc *desc)
 {
@@ -1717,6 +1869,9 @@ static int child_f(unsigned int nr, int test_desc_fd, int cmd_fd, void *buf)
                case EXPIRE_POLICY:
                        ret = xfrm_expire_policy(xfrm_sock, &seq, nr, &desc);
                        break;
+               case SPDINFO_ATTRS:
+                       ret = xfrm_spdinfo_attrs(xfrm_sock, &seq);
+                       break;
                default:
                        printk("Unknown desc type %d", desc.type);
                        exit(KSFT_FAIL);
@@ -1994,8 +2149,10 @@ static int write_proto_plan(int fd, int proto)
  *   sizeof(xfrm_user_polexpire)  = 168  |  sizeof(xfrm_user_polexpire)  = 176
  *
  * Check the affected by the UABI difference structures.
+ * Also, check translation for xfrm_set_spdinfo: it has it's own attributes
+ * which needs to be correctly copied, but not translated.
  */
-const unsigned int compat_plan = 4;
+const unsigned int compat_plan = 5;
 static int write_compat_struct_tests(int test_desc_fd)
 {
        struct xfrm_desc desc = {};
@@ -2019,6 +2176,10 @@ static int write_compat_struct_tests(int test_desc_fd)
        if (__write_desc(test_desc_fd, &desc))
                return -1;
 
+       desc.type = SPDINFO_ATTRS;
+       if (__write_desc(test_desc_fd, &desc))
+               return -1;
+
        return 0;
 }
 
index 15d937b..fd1ffaa 100755 (executable)
@@ -68,16 +68,12 @@ do
        cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
                srand(n + me + systime());
                ncpus = split(cpus, ca);
-               curcpu = ca[int(rand() * ncpus + 1)];
-               z = "";
-               for (i = 1; 4 * i <= curcpu; i++)
-                       z = z "0";
-               print "0x" 2 ^ (curcpu % 4) z;
+               print ca[int(rand() * ncpus + 1)];
        }' < /dev/null`
        n=$(($n+1))
-       if ! taskset -p $cpumask $$ > /dev/null 2>&1
+       if ! taskset -c -p $cpumask $$ > /dev/null 2>&1
        then
-               echo taskset failure: '"taskset -p ' $cpumask $$ '"'
+               echo taskset failure: '"taskset -c -p ' $cpumask $$ '"'
                exit 1
        fi
 
index e5cc6b2..1af5d6b 100755 (executable)
@@ -14,7 +14,7 @@ if test -z "$TORTURE_KCONFIG_KCSAN_ARG"
 then
        exit 0
 fi
-cat $1/*/console.log |
+find $1 -name console.log -exec cat {} \; |
        grep "BUG: KCSAN: " |
        sed -e 's/^\[[^]]*] //' |
        sort |
index d8c8483..5a0023d 100755 (executable)
@@ -142,7 +142,7 @@ then
        echo "Cannot copy from $oldrun to $rundir."
        usage
 fi
-rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
+rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
 touch "$rundir/log"
 echo $scriptname $args | tee -a "$rundir/log"
 echo $oldrun > "$rundir/re-run"
@@ -179,6 +179,6 @@ if test -n "$dryrun"
 then
        echo ---- Dryrun complete, directory: $rundir | tee -a "$rundir/log"
 else
-       ( cd "$rundir"; sh $T/runbatches.sh )
+       ( cd "$rundir"; sh $T/runbatches.sh ) | tee -a "$rundir/log"
        kvm-end-run-stats.sh "$rundir" "$starttime"
 fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh b/tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh
new file mode 100755 (executable)
index 0000000..f99b2c1
--- /dev/null
@@ -0,0 +1,106 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Produce awk statements roughly depicting the system's CPU and cache
+# layout.  If the required information is not available, produce
+# error messages as awk comments.  Successful exit regardless.
+#
+# Usage: kvm-assign-cpus.sh /path/to/sysfs
+
+T=/tmp/kvm-assign-cpus.sh.$$
+trap 'rm -rf $T' 0 2
+mkdir $T
+
+sysfsdir=${1-/sys/devices/system/node}
+if ! cd "$sysfsdir" > $T/msg 2>&1
+then
+       sed -e 's/^/# /' < $T/msg
+       exit 0
+fi
+nodelist="`ls -d node*`"
+for i in node*
+do
+       if ! test -d $i/
+       then
+               echo "# Not a directory: $sysfsdir/node*"
+               exit 0
+       fi
+       for j in $i/cpu*/cache/index*
+       do
+               if ! test -d $j/
+               then
+                       echo "# Not a directory: $sysfsdir/$j"
+                       exit 0
+               else
+                       break
+               fi
+       done
+       indexlist="`ls -d $i/cpu* | grep 'cpu[0-9][0-9]*' | head -1 | sed -e 's,^.*$,ls -d &/cache/index*,' | sh | sed -e 's,^.*/,,'`"
+       break
+done
+for i in node*/cpu*/cache/index*/shared_cpu_list
+do
+       if ! test -f $i
+       then
+               echo "# Not a file: $sysfsdir/$i"
+               exit 0
+       else
+               break
+       fi
+done
+firstshared=
+for i in $indexlist
+do
+       rm -f $T/cpulist
+       for n in node*
+       do
+               f="$n/cpu*/cache/$i/shared_cpu_list"
+               if ! cat $f > $T/msg 2>&1
+               then
+                       sed -e 's/^/# /' < $T/msg
+                       exit 0
+               fi
+               cat $f >> $T/cpulist
+       done
+       if grep -q '[-,]' $T/cpulist
+       then
+               if test -z "$firstshared"
+               then
+                       firstshared="$i"
+               fi
+       fi
+done
+if test -z "$firstshared"
+then
+       splitindex="`echo $indexlist | sed -e 's/ .*$//'`"
+else
+       splitindex="$firstshared"
+fi
+nodenum=0
+for n in node*
+do
+       cat $n/cpu*/cache/$splitindex/shared_cpu_list | sort -u -k1n |
+       awk -v nodenum="$nodenum" '
+       BEGIN {
+               idx = 0;
+       }
+
+       {
+               nlists = split($0, cpulists, ",");
+               for (i = 1; i <= nlists; i++) {
+                       listsize = split(cpulists[i], cpus, "-");
+                       if (listsize == 1)
+                               cpus[2] = cpus[1];
+                       for (j = cpus[1]; j <= cpus[2]; j++) {
+                               print "cpu[" nodenum "][" idx "] = " j ";";
+                               idx++;
+                       }
+               }
+       }
+
+       END {
+               print "nodecpus[" nodenum "] = " idx ";";
+       }'
+       nodenum=`expr $nodenum + 1`
+done
+echo "numnodes = $nodenum;"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh b/tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh
new file mode 100755 (executable)
index 0000000..20c7c53
--- /dev/null
@@ -0,0 +1,88 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Create an awk script that takes as input numbers of CPUs and outputs
+# lists of CPUs, one per line in both cases.
+#
+# Usage: kvm-get-cpus-script.sh /path/to/cpu/arrays /path/to/put/script [ /path/to/state ]
+#
+# The CPU arrays are output by kvm-assign-cpus.sh, and are valid awk
+# statements initializing the variables describing the system's topology.
+#
+# The optional state is input by this script (if the file exists and is
+# non-empty), and can also be output by this script.
+
+cpuarrays="${1-/sys/devices/system/node}"
+scriptfile="${2}"
+statefile="${3}"
+
+if ! test -f "$cpuarrays"
+then
+       echo "File not found: $cpuarrays" 1>&2
+       exit 1
+fi
+scriptdir="`dirname "$scriptfile"`"
+if ! test -d "$scriptdir" || ! test -x "$scriptdir" || ! test -w "$scriptdir"
+then
+       echo "Directory not usable for script output: $scriptdir"
+       exit 1
+fi
+
+cat << '___EOF___' > "$scriptfile"
+BEGIN {
+___EOF___
+cat "$cpuarrays" >> "$scriptfile"
+if test -r "$statefile"
+then
+       cat "$statefile" >> "$scriptfile"
+fi
+cat << '___EOF___' >> "$scriptfile"
+}
+
+# Do we have the system architecture to guide CPU affinity?
+function gotcpus()
+{
+       return numnodes != "";
+}
+
+# Return a comma-separated list of the next n CPUs.
+function nextcpus(n,  i, s)
+{
+       for (i = 0; i < n; i++) {
+               if (nodecpus[curnode] == "")
+                       curnode = 0;
+               if (cpu[curnode][curcpu[curnode]] == "")
+                       curcpu[curnode] = 0;
+               if (s != "")
+                       s = s ",";
+               s = s cpu[curnode][curcpu[curnode]];
+               curcpu[curnode]++;
+               curnode++
+       }
+       return s;
+}
+
+# Dump out the current node/CPU state so that a later invocation of this
+# script can continue where this one left off.  Of course, this only works
+# when a state file was specified and where there was valid sysfs state.
+# Returns 1 if the state was dumped, 0 otherwise.
+#
+# Dumping the state for one system configuration and loading it into
+# another isn't likely to do what you want, whatever that might be.
+function dumpcpustate(  i, fn)
+{
+___EOF___
+echo ' fn = "'"$statefile"'";' >> $scriptfile
+cat << '___EOF___' >> "$scriptfile"
+       if (fn != "" && gotcpus()) {
+               print "curnode = " curnode ";" > fn;
+               for (i = 0; i < numnodes; i++)
+                       if (curcpu[i] != "")
+                               print "curcpu[" i "] = " curcpu[i] ";" >> fn;
+               return 1;
+       }
+       if (fn != "")
+               print "# No CPU state to dump." > fn;
+       return 0;
+}
+___EOF___
index f3a7a5e..db2c0e2 100755 (executable)
@@ -25,7 +25,7 @@ then
        echo "$configfile -------"
 else
        title="$configfile ------- $ncs acquisitions/releases"
-       dur=`sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+       dur=`grep -v '^#' $i/qemu-cmd | sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' 2> /dev/null`
        if test -z "$dur"
        then
                :
index 671bfee..3afa5c6 100755 (executable)
@@ -25,7 +25,7 @@ if test -z "$nscfs"
 then
        echo "$configfile ------- "
 else
-       dur="`sed -e 's/^.* scftorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`"
+       dur="`grep -v '^#' $i/qemu-cmd | sed -e 's/^.* scftorture.shutdown_secs=//' -e 's/ .*$//' 2> /dev/null`"
        if test -z "$dur"
        then
                rate=""
index e01b31b..0a54199 100755 (executable)
@@ -74,7 +74,10 @@ do
        done
        if test -f "$rd/kcsan.sum"
        then
-               if grep -q CONFIG_KCSAN=y $T
+               if ! test -f $T
+               then
+                       :
+               elif grep -q CONFIG_KCSAN=y $T
                then
                        echo "Compiler or architecture does not support KCSAN!"
                        echo Did you forget to switch your compiler with '--kmake-arg CC=<cc-that-supports-kcsan>'?
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh
new file mode 100755 (executable)
index 0000000..014ce68
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Periodically scan a directory tree to prevent files from being reaped
+# by systemd and friends on long runs.
+#
+# Usage: kvm-remote-noreap.sh pathname
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+pathname="$1"
+if test "$pathname" = ""
+then
+       echo Usage: kvm-remote-noreap.sh pathname
+       exit 1
+fi
+if ! test -d "$pathname"
+then
+       echo  Usage: kvm-remote-noreap.sh pathname
+       echo "       pathname must be a directory."
+       exit 2
+fi
+
+while test -d "$pathname"
+do
+       find "$pathname" -type f -exec touch -c {} \; > /dev/null 2>&1
+       sleep 30
+done
index 79e680e..03126eb 100755 (executable)
@@ -124,10 +124,12 @@ awk < "$rundir"/scenarios -v dest="$T/bin" -v rundir="$rundir" '
        n = $1;
        sub(/\./, "", n);
        fn = dest "/kvm-remote-" n ".sh"
+       print "kvm-remote-noreap.sh " rundir " &" > fn;
        scenarios = "";
        for (i = 2; i <= NF; i++)
                scenarios = scenarios " " $i;
-       print "kvm-test-1-run-batch.sh" scenarios > fn;
+       print "kvm-test-1-run-batch.sh" scenarios >> fn;
+       print "sync" >> fn;
        print "rm " rundir "/remote.run" >> fn;
 }'
 chmod +x $T/bin/kvm-remote-*.sh
@@ -172,11 +174,20 @@ checkremotefile () {
        do
                ssh $1 "test -f \"$2\""
                ret=$?
-               if test "$ret" -ne 255
+               if test "$ret" -eq 255
                then
+                       echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date`
+               elif test "$ret" -eq 0
+               then
+                       return 0
+               elif test "$ret" -eq 1
+               then
+                       echo " ---" File \"$2\" not found: ssh $1 test -f \"$2\"
+                       return 1
+               else
+                       echo " ---" Exit code $ret: ssh $1 test -f \"$2\", retry after $sleeptime seconds. `date`
                        return $ret
                fi
-               echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date`
                sleep $sleeptime
        done
 }
@@ -242,7 +253,8 @@ do
        do
                sleep 30
        done
-       ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu_pid */qemu-retval; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
+       echo " ---" Collecting results from $i `date`
+       ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
 done
 
 ( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"
index 7ea0809..1e29d65 100755 (executable)
@@ -50,10 +50,34 @@ grep '^#' $1/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
 echo ---- System running test: `uname -a`
 echo ---- Starting kernels. `date` | tee -a log
 $TORTURE_JITTER_START
+kvm-assign-cpus.sh /sys/devices/system/node > $T/cpuarray.awk
 for i in "$@"
 do
        echo ---- System running test: `uname -a` > $i/kvm-test-1-run-qemu.sh.out
        echo > $i/kvm-test-1-run-qemu.sh.out
+       export TORTURE_AFFINITY=
+       kvm-get-cpus-script.sh $T/cpuarray.awk $T/cpubatches.awk $T/cpustate
+       cat << '        ___EOF___' >> $T/cpubatches.awk
+       END {
+               affinitylist = "";
+               if (!gotcpus()) {
+                       print "echo No CPU-affinity information, so no taskset command.";
+               } else if (cpu_count !~ /^[0-9][0-9]*$/) {
+                       print "echo " scenario ": Bogus number of CPUs (old qemu-cmd?), so no taskset command.";
+               } else {
+                       affinitylist = nextcpus(cpu_count);
+                       if (!(affinitylist ~ /^[0-9,-][0-9,-]*$/))
+                               print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
+                       else if (!dumpcpustate())
+                               print "echo " scenario ": Could not dump state, so no taskset command.";
+                       else
+                               print "export TORTURE_AFFINITY=" affinitylist;
+               }
+       }
+       ___EOF___
+       cpu_count="`grep '# TORTURE_CPU_COUNT=' $i/qemu-cmd | sed -e 's/^.*=//'`"
+       affinity_export="`awk -f $T/cpubatches.awk -v cpu_count="$cpu_count" -v scenario=$i < /dev/null`"
+       $affinity_export
        kvm-test-1-run-qemu.sh $i >> $i/kvm-test-1-run-qemu.sh.out 2>&1 &
 done
 for i in $runfiles
index 5b1aa2a..4428058 100755 (executable)
@@ -39,27 +39,34 @@ echo ' ---' `date`: Starting kernel, PID $$
 grep '^#' $resdir/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
 . $T/qemu-cmd-settings
 
-# Decorate qemu-cmd with redirection, backgrounding, and PID capture
-sed -e 's/$/ 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
-echo 'echo $! > $resdir/qemu_pid' >> $T/qemu-cmd
+# Decorate qemu-cmd with affinity, redirection, backgrounding, and PID capture
+taskset_command=
+if test -n "$TORTURE_AFFINITY"
+then
+       taskset_command="taskset -c $TORTURE_AFFINITY "
+fi
+sed -e 's/^[^#].*$/'"$taskset_command"'& 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
+echo 'qemu_pid=$!' >> $T/qemu-cmd
+echo 'echo $qemu_pid > $resdir/qemu-pid' >> $T/qemu-cmd
+echo 'taskset -c -p $qemu_pid > $resdir/qemu-affinity' >> $T/qemu-cmd
 
 # In case qemu refuses to run...
 echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
 
 # Attempt to run qemu
 kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
-( . $T/qemu-cmd; wait `cat  $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) &
+( . $T/qemu-cmd; wait `cat  $resdir/qemu-pid`; echo $? > $resdir/qemu-retval ) &
 commandcompleted=0
 if test -z "$TORTURE_KCONFIG_GDB_ARG"
 then
        sleep 10 # Give qemu's pid a chance to reach the file
-       if test -s "$resdir/qemu_pid"
+       if test -s "$resdir/qemu-pid"
        then
-               qemu_pid=`cat "$resdir/qemu_pid"`
-               echo Monitoring qemu job at pid $qemu_pid
+               qemu_pid=`cat "$resdir/qemu-pid"`
+               echo Monitoring qemu job at pid $qemu_pid `date`
        else
                qemu_pid=""
-               echo Monitoring qemu job at yet-as-unknown pid
+               echo Monitoring qemu job at yet-as-unknown pid `date`
        fi
 fi
 if test -n "$TORTURE_KCONFIG_GDB_ARG"
@@ -82,9 +89,9 @@ then
 fi
 while :
 do
-       if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+       if test -z "$qemu_pid" && test -s "$resdir/qemu-pid"
        then
-               qemu_pid=`cat "$resdir/qemu_pid"`
+               qemu_pid=`cat "$resdir/qemu-pid"`
        fi
        kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
        if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
@@ -115,22 +122,22 @@ do
                break
        fi
 done
-if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+if test -z "$qemu_pid" && test -s "$resdir/qemu-pid"
 then
-       qemu_pid=`cat "$resdir/qemu_pid"`
+       qemu_pid=`cat "$resdir/qemu-pid"`
 fi
-if test $commandcompleted -eq 0 -a -n "$qemu_pid"
+if test $commandcompleted -eq 0 && test -n "$qemu_pid"
 then
        if ! test -f "$resdir/../STOP.1"
        then
-               echo Grace period for qemu job at pid $qemu_pid
+               echo Grace period for qemu job at pid $qemu_pid `date`
        fi
        oldline="`tail $resdir/console.log`"
        while :
        do
                if test -f "$resdir/../STOP.1"
                then
-                       echo "PID $qemu_pid killed due to run STOP.1 request" >> $resdir/Warnings 2>&1
+                       echo "PID $qemu_pid killed due to run STOP.1 request `date`" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
@@ -152,13 +159,17 @@ then
                then
                        last_ts=0
                fi
-               if test "$newline" != "$oldline" -a "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               if test "$newline" != "$oldline" && test "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE)) && test "$last_ts" -gt "$TORTURE_SHUTDOWN_GRACE"
                then
                        must_continue=yes
+                       if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+                       then
+                               echo Continuing at console.log time $last_ts \"`tail -n 1 $resdir/console.log`\" `date`
+                       fi
                fi
-               if test $must_continue = no -a $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               if test $must_continue = no && test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
                then
-                       echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
+                       echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds `date`" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
@@ -172,5 +183,3 @@ fi
 
 # Tell the script that this run is done.
 rm -f $resdir/build.run
-
-parse-console.sh $resdir/console.log $title
index 420ed5c..f4c8055 100755 (executable)
@@ -205,6 +205,7 @@ echo "# TORTURE_KCONFIG_GDB_ARG=\"$TORTURE_KCONFIG_GDB_ARG\"" >> $resdir/qemu-cm
 echo "# TORTURE_JITTER_START=\"$TORTURE_JITTER_START\"" >> $resdir/qemu-cmd
 echo "# TORTURE_JITTER_STOP=\"$TORTURE_JITTER_STOP\"" >> $resdir/qemu-cmd
 echo "# TORTURE_TRUST_MAKE=\"$TORTURE_TRUST_MAKE\"; export TORTURE_TRUST_MAKE" >> $resdir/qemu-cmd
+echo "# TORTURE_CPU_COUNT=$cpu_count" >> $resdir/qemu-cmd
 
 if test -n "$TORTURE_BUILDONLY"
 then
@@ -214,3 +215,4 @@ then
 fi
 
 kvm-test-1-run-qemu.sh $resdir
+parse-console.sh $resdir/console.log $title
index b4ac4ee..f442d84 100755 (executable)
@@ -430,17 +430,10 @@ then
        git diff HEAD >> $resdir/$ds/testid.txt
 fi
 ___EOF___
-awk < $T/cfgcpu.pack \
-       -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
-       -v CONFIGDIR="$CONFIGFRAG/" \
-       -v KVM="$KVM" \
-       -v ncpus=$cpus \
-       -v jitter="$jitter" \
-       -v rd=$resdir/$ds/ \
-       -v dur=$dur \
-       -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
-       -v TORTURE_BOOTARGS="$TORTURE_BOOTARGS" \
-'BEGIN {
+kvm-assign-cpus.sh /sys/devices/system/node > $T/cpuarray.awk
+kvm-get-cpus-script.sh $T/cpuarray.awk $T/dumpbatches.awk
+cat << '___EOF___' >> $T/dumpbatches.awk
+BEGIN {
        i = 0;
 }
 
@@ -451,7 +444,7 @@ awk < $T/cfgcpu.pack \
 }
 
 # Dump out the scripting required to run one test batch.
-function dump(first, pastlast, batchnum)
+function dump(first, pastlast, batchnum,  affinitylist)
 {
        print "echo ----Start batch " batchnum ": `date` | tee -a " rd "log";
        print "needqemurun="
@@ -483,6 +476,14 @@ function dump(first, pastlast, batchnum)
                print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` | tee -a " rd "log";
                print "mkdir " rd cfr[jn] " || :";
                print "touch " builddir ".wait";
+               affinitylist = "";
+               if (gotcpus()) {
+                       affinitylist = nextcpus(cpusr[jn]);
+               }
+               if (affinitylist ~ /^[0-9,-][0-9,-]*$/)
+                       print "export TORTURE_AFFINITY=" affinitylist;
+               else
+                       print "export TORTURE_AFFINITY=";
                print "kvm-test-1-run.sh " CONFIGDIR cf[j], rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn]  "/kvm-test-1-run.sh.out 2>&1 &"
                print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` | tee -a " rd "log";
                print "while test -f " builddir ".wait"
@@ -560,7 +561,19 @@ END {
        # Dump the last batch.
        if (ncpus != 0)
                dump(first, i, batchnum);
-}' >> $T/script
+}
+___EOF___
+awk < $T/cfgcpu.pack \
+       -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
+       -v CONFIGDIR="$CONFIGFRAG/" \
+       -v KVM="$KVM" \
+       -v ncpus=$cpus \
+       -v jitter="$jitter" \
+       -v rd=$resdir/$ds/ \
+       -v dur=$dur \
+       -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
+       -v TORTURE_BOOTARGS="$TORTURE_BOOTARGS" \
+       -f $T/dumpbatches.awk >> $T/script
 echo kvm-end-run-stats.sh "$resdir/$ds" "$starttime" >> $T/script
 
 # Extract the tests and their batches from the script.
index 53ec7c0..363f560 100755 (executable)
@@ -53,6 +53,7 @@ do_refscale=yes
 do_kvfree=yes
 do_kasan=yes
 do_kcsan=no
+do_clocksourcewd=yes
 
 # doyesno - Helper function for yes/no arguments
 function doyesno () {
@@ -72,6 +73,7 @@ usage () {
        echo "       --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
        echo "       --doall"
        echo "       --doallmodconfig / --do-no-allmodconfig"
+       echo "       --do-clocksourcewd / --do-no-clocksourcewd"
        echo "       --do-kasan / --do-no-kasan"
        echo "       --do-kcsan / --do-no-kcsan"
        echo "       --do-kvfree / --do-no-kvfree"
@@ -109,7 +111,7 @@ do
                configs_scftorture="$configs_scftorture $2"
                shift
                ;;
-       --doall)
+       --do-all|--doall)
                do_allmodconfig=yes
                do_rcutorture=yes
                do_locktorture=yes
@@ -119,10 +121,14 @@ do
                do_kvfree=yes
                do_kasan=yes
                do_kcsan=yes
+               do_clocksourcewd=yes
                ;;
        --do-allmodconfig|--do-no-allmodconfig)
                do_allmodconfig=`doyesno "$1" --do-allmodconfig`
                ;;
+       --do-clocksourcewd|--do-no-clocksourcewd)
+               do_clocksourcewd=`doyesno "$1" --do-clocksourcewd`
+               ;;
        --do-kasan|--do-no-kasan)
                do_kasan=`doyesno "$1" --do-kasan`
                ;;
@@ -135,7 +141,7 @@ do
        --do-locktorture|--do-no-locktorture)
                do_locktorture=`doyesno "$1" --do-locktorture`
                ;;
-       --do-none)
+       --do-none|--donone)
                do_allmodconfig=no
                do_rcutorture=no
                do_locktorture=no
@@ -145,6 +151,7 @@ do
                do_kvfree=no
                do_kasan=no
                do_kcsan=no
+               do_clocksourcewd=no
                ;;
        --do-rcuscale|--do-no-rcuscale)
                do_rcuscale=`doyesno "$1" --do-rcuscale`
@@ -279,9 +286,9 @@ function torture_one {
 #      torture_bootargs="[ kernel boot arguments ]"
 #      torture_set flavor [ kvm.sh arguments ]
 #
-# Note that "flavor" is an arbitrary string.  Supply --torture if needed.
-# Note that quoting is problematic.  So on the command line, pass multiple
-# values with multiple kvm.sh argument instances.
+# Note that "flavor" is an arbitrary string that does not affect kvm.sh
+# in any way.  So also supply --torture if you need something other than
+# the default.
 function torture_set {
        local cur_kcsan_kmake_args=
        local kcsan_kmake_tag=
@@ -377,6 +384,22 @@ then
        torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
 fi
 
+if test "$do_clocksourcewd" = "yes"
+then
+       torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
+       torture_set "clocksourcewd-1" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+       torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 clocksource.max_cswd_read_retries=1"
+       torture_set "clocksourcewd-2" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+       # In case our work is already done...
+       if test "$do_rcutorture" != "yes"
+       then
+               torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
+               torture_set "clocksourcewd-3" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --trust-make
+       fi
+fi
+
 echo " --- " $scriptname $args
 echo " --- " Done `date` | tee -a $T/log
 ret=0
@@ -395,6 +418,10 @@ then
        nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`"
        ret=2
 fi
+if test "$do_kcsan" = "yes"
+then
+       TORTURE_KCONFIG_KCSAN_ARG=1 tools/testing/selftests/rcutorture/bin/kcsan-collapse.sh tools/testing/selftests/rcutorture/res/$ds > tools/testing/selftests/rcutorture/res/$ds/kcsan.sum
+fi
 echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
 echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
 tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
index bafe94c..3ca1124 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
index bafe94c..3ca1124 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
index ea43990..dc02083 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
index dee7a3d..92bbc5a 100644 (file)
@@ -55,10 +55,27 @@ static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m,
        return true;
 }
 
+static void reverse_bytes(void *data, int length)
+{
+       int i = 0;
+       int j = length - 1;
+       uint8_t temp;
+       uint8_t *ptr = data;
+
+       while (i < j) {
+               temp = ptr[i];
+               ptr[i] = ptr[j];
+               ptr[j] = temp;
+               i++;
+               j--;
+       }
+}
+
 static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
                      uint8_t *q2)
 {
        struct q1q2_ctx ctx;
+       int len;
 
        if (!alloc_q1q2_ctx(s, m, &ctx)) {
                fprintf(stderr, "Not enough memory for Q1Q2 calculation\n");
@@ -89,8 +106,10 @@ static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
                goto out;
        }
 
-       BN_bn2bin(ctx.q1, q1);
-       BN_bn2bin(ctx.q2, q2);
+       len = BN_bn2bin(ctx.q1, q1);
+       reverse_bytes(q1, len);
+       len = BN_bn2bin(ctx.q2, q2);
+       reverse_bytes(q2, len);
 
        free_q1q2_ctx(&ctx);
        return true;
@@ -152,22 +171,6 @@ static RSA *gen_sign_key(void)
        return key;
 }
 
-static void reverse_bytes(void *data, int length)
-{
-       int i = 0;
-       int j = length - 1;
-       uint8_t temp;
-       uint8_t *ptr = data;
-
-       while (i < j) {
-               temp = ptr[i];
-               ptr[i] = ptr[j];
-               ptr[j] = temp;
-               i++;
-               j--;
-       }
-}
-
 enum mrtags {
        MRECREATE = 0x0045544145524345,
        MREADD = 0x0000000044444145,
@@ -367,8 +370,6 @@ bool encl_measure(struct encl *encl)
        /* BE -> LE */
        reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE);
        reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE);
-       reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE);
-       reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE);
 
        EVP_MD_CTX_destroy(ctx);
        RSA_free(key);
index b587b9a..0d7bbe4 100644 (file)
@@ -4,7 +4,8 @@ test: virtio_test vringh_test
 virtio_test: virtio_ring.o virtio_test.o
 vringh_test: vringh_test.o vringh.o virtio_ring.o
 
-CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+LDFLAGS += -lpthread
 vpath %.c ../../drivers/virtio ../../drivers/vhost
 mod:
        ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
diff --git a/tools/virtio/linux/spinlock.h b/tools/virtio/linux/spinlock.h
new file mode 100644 (file)
index 0000000..028e3cd
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef SPINLOCK_H_STUB
+#define SPINLOCK_H_STUB
+
+#include <pthread.h>
+
+typedef pthread_spinlock_t  spinlock_t;
+
+static inline void spin_lock_init(spinlock_t *lock)
+{
+       int r = pthread_spin_init(lock, 0);
+       assert(!r);
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       int ret = pthread_spin_lock(lock);
+       assert(!ret);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       int ret = pthread_spin_unlock(lock);
+       assert(!ret);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+       spin_unlock(lock);
+}
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+       spin_unlock(lock);
+}
+
+static inline void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
+{
+       spin_unlock(lock);
+}
+
+#endif
index 5d90254..363b982 100644 (file)
@@ -3,6 +3,7 @@
 #define LINUX_VIRTIO_H
 #include <linux/scatterlist.h>
 #include <linux/kernel.h>
+#include <linux/spinlock.h>
 
 struct device {
        void *parent;
@@ -12,6 +13,7 @@ struct virtio_device {
        struct device dev;
        u64 features;
        struct list_head vqs;
+       spinlock_t vqs_list_lock;
 };
 
 struct virtqueue {
index d20fba0..b50dbe2 100644 (file)
@@ -892,6 +892,8 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
 
 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 {
+       static DEFINE_MUTEX(kvm_debugfs_lock);
+       struct dentry *dent;
        char dir_name[ITOA_MAX_LEN * 2];
        struct kvm_stat_data *stat_data;
        const struct _kvm_stats_desc *pdesc;
@@ -903,8 +905,20 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
                return 0;
 
        snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
-       kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+       mutex_lock(&kvm_debugfs_lock);
+       dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
+       if (dent) {
+               pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
+               dput(dent);
+               mutex_unlock(&kvm_debugfs_lock);
+               return 0;
+       }
+       dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+       mutex_unlock(&kvm_debugfs_lock);
+       if (IS_ERR(dent))
+               return 0;
 
+       kvm->debugfs_dentry = dent;
        kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
                                         sizeof(*kvm->debugfs_stat_data),
                                         GFP_KERNEL_ACCOUNT);
@@ -5201,7 +5215,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
+       if (kvm->debugfs_dentry) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
 
                if (p) {